]> git.droids-corp.org - dpdk.git/commitdiff
ethdev: change queue release callback
authorXueming Li <xuemingl@nvidia.com>
Wed, 6 Oct 2021 11:18:22 +0000 (19:18 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 6 Oct 2021 17:16:03 +0000 (19:16 +0200)
Currently, most ethdev callback API use queue ID as parameter, but Rx
and Tx queue release callback use queue object which is used by Rx and
Tx burst data plane callback.

To align with other eth device queue configuration callbacks:
- queue release callbacks are changed to use queue ID
- all drivers are adapted

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
94 files changed:
drivers/net/atlantic/atl_ethdev.h
drivers/net/atlantic/atl_rxtx.c
drivers/net/avp/avp_ethdev.c
drivers/net/axgbe/axgbe_dev.c
drivers/net/axgbe/axgbe_rxtx.c
drivers/net/axgbe/axgbe_rxtx.h
drivers/net/bnx2x/bnx2x_rxtx.c
drivers/net/bnx2x/bnx2x_rxtx.h
drivers/net/bnxt/bnxt_reps.c
drivers/net/bnxt/bnxt_reps.h
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxq.h
drivers/net/bnxt/bnxt_txq.c
drivers/net/bnxt/bnxt_txq.h
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/cxgbe/cxgbe_pfvf.h
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/e1000_ethdev.h
drivers/net/e1000/em_rxtx.c
drivers/net/e1000/igb_rxtx.c
drivers/net/ena/ena_ethdev.c
drivers/net/enetc/enetc_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_vf_representor.c
drivers/net/failsafe/failsafe_ops.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/hns3/hns3_rxtx.h
drivers/net/i40e/i40e_fdir.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx.h
drivers/net/iavf/iavf_rxtx.c
drivers/net/iavf/iavf_rxtx.h
drivers/net/ice/ice_dcf_ethdev.c
drivers/net/ice/ice_ethdev.c
drivers/net/ice/ice_rxtx.c
drivers/net/ice/ice_rxtx.h
drivers/net/igc/igc_ethdev.c
drivers/net/igc/igc_txrx.c
drivers/net/igc/igc_txrx.h
drivers/net/ionic/ionic_lif.c
drivers/net/ionic/ionic_rxtx.c
drivers/net/ionic/ionic_rxtx.h
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/liquidio/lio_ethdev.c
drivers/net/liquidio/lio_ethdev.h
drivers/net/liquidio/lio_rxtx.c
drivers/net/memif/rte_eth_memif.c
drivers/net/mlx4/mlx4.c
drivers/net/mlx4/mlx4_rxq.c
drivers/net/mlx4/mlx4_rxtx.h
drivers/net/mlx4/mlx4_txq.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_tx.h
drivers/net/mlx5/mlx5_txq.c
drivers/net/mvneta/mvneta_ethdev.c
drivers/net/mvneta/mvneta_rxtx.c
drivers/net/mvneta/mvneta_rxtx.h
drivers/net/mvpp2/mrvl_ethdev.c
drivers/net/netvsc/hn_rxtx.c
drivers/net/netvsc/hn_var.h
drivers/net/netvsc/hn_vf.c
drivers/net/nfb/nfb_ethdev.c
drivers/net/nfb/nfb_rx.c
drivers/net/nfb/nfb_rx.h
drivers/net/nfb/nfb_tx.c
drivers/net/nfb/nfb_tx.h
drivers/net/nfp/nfp_rxtx.c
drivers/net/nfp/nfp_rxtx.h
drivers/net/ngbe/ngbe_ethdev.h
drivers/net/ngbe/ngbe_rxtx.c
drivers/net/null/rte_eth_null.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/octeontx2/otx2_ethdev.c
drivers/net/octeontx_ep/otx_ep_ethdev.c
drivers/net/qede/qede_ethdev.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/szedata2/rte_eth_szedata2.c
drivers/net/tap/rte_eth_tap.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/txgbe/txgbe_ethdev.h
drivers/net/txgbe/txgbe_rxtx.c
drivers/net/vhost/rte_eth_vhost.c
drivers/net/vmxnet3/vmxnet3_ethdev.c
drivers/net/vmxnet3/vmxnet3_ethdev.h
drivers/net/vmxnet3/vmxnet3_rxtx.c
lib/ethdev/ethdev_driver.h
lib/ethdev/rte_ethdev.c

index f547571b5c97bcb75befcb54ae05fe633f5d22a9..a2d1d4397c6acc85164b9b8bed9bfb4a5e19e3cc 100644 (file)
@@ -54,8 +54,8 @@ struct atl_adapter {
 /*
  * RX/TX function prototypes
  */
-void atl_rx_queue_release(void *rxq);
-void atl_tx_queue_release(void *txq);
+void atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                uint16_t nb_rx_desc, unsigned int socket_id,
index 7d367c9306ecdb042d1c1dc20d4d737388ec7935..fca682d8b090f9d3b3faea33c793bd6ae9dd3752 100644 (file)
@@ -125,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->rx_queues[rx_queue_id] != NULL) {
-               atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+               atl_rx_queue_release(dev, rx_queue_id);
                dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
@@ -247,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->tx_queues[tx_queue_id] != NULL) {
-               atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+               atl_tx_queue_release(dev, tx_queue_id);
                dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
@@ -498,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
 
-       if (rx_queue != NULL) {
-               struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (rxq != NULL) {
                atl_rx_queue_release_mbufs(rxq);
                rte_free(rxq->sw_ring);
                rte_free(rxq);
@@ -569,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
-       if (tx_queue != NULL) {
-               struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (txq != NULL) {
                atl_tx_queue_release_mbufs(txq);
                rte_free(txq->sw_ring);
                rte_free(txq);
@@ -590,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               atl_rx_queue_release(dev->data->rx_queues[i]);
+               atl_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = 0;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               atl_tx_queue_release(dev->data->tx_queues[i]);
+               atl_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = 0;
        }
        dev->data->nb_tx_queues = 0;
index b5fafd32b09ebd959fc22c2ae47ed800a5f00f00..6cb8bb4338deb11567a7103f34d3e23bccdfb12f 100644 (file)
@@ -75,8 +75,8 @@ static uint16_t avp_xmit_pkts(void *tx_queue,
                              struct rte_mbuf **tx_pkts,
                              uint16_t nb_pkts);
 
-static void avp_dev_rx_queue_release(void *rxq);
-static void avp_dev_tx_queue_release(void *txq);
+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 static int avp_dev_stats_get(struct rte_eth_dev *dev,
                              struct rte_eth_stats *stats);
@@ -1926,18 +1926,11 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 }
 
 static void
-avp_dev_rx_queue_release(void *rx_queue)
+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-       struct avp_queue *rxq = (struct avp_queue *)rx_queue;
-       struct avp_dev *avp = rxq->avp;
-       struct rte_eth_dev_data *data = avp->dev_data;
-       unsigned int i;
-
-       for (i = 0; i < avp->num_rx_queues; i++) {
-               if (data->rx_queues[i] == rxq) {
-                       rte_free(data->rx_queues[i]);
-                       data->rx_queues[i] = NULL;
-               }
+       if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+               rte_free(eth_dev->data->rx_queues[rx_queue_id]);
+               eth_dev->data->rx_queues[rx_queue_id] = NULL;
        }
 }
 
@@ -1957,18 +1950,11 @@ avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
 }
 
 static void
-avp_dev_tx_queue_release(void *tx_queue)
+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 {
-       struct avp_queue *txq = (struct avp_queue *)tx_queue;
-       struct avp_dev *avp = txq->avp;
-       struct rte_eth_dev_data *data = avp->dev_data;
-       unsigned int i;
-
-       for (i = 0; i < avp->num_tx_queues; i++) {
-               if (data->tx_queues[i] == txq) {
-                       rte_free(data->tx_queues[i]);
-                       data->tx_queues[i] = NULL;
-               }
+       if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+               rte_free(eth_dev->data->tx_queues[tx_queue_id]);
+               eth_dev->data->tx_queues[tx_queue_id] = NULL;
        }
 }
 
index 786288a7b0798d11e8a2224a3caf682a8f2f5c4c..ca32ad6418739c6bf436c7f1729fd292a6da8323 100644 (file)
@@ -950,7 +950,7 @@ static int wrapper_rx_desc_init(struct axgbe_port *pdata)
                        if (mbuf == NULL) {
                                PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
                                            (unsigned int)rxq->queue_id, j);
-                               axgbe_dev_rx_queue_release(rxq);
+                               axgbe_dev_rx_queue_release(pdata->eth_dev, i);
                                return -ENOMEM;
                        }
                        rxq->sw_ring[j] = mbuf;
index 33f709a6bb02df456f7670ef171e407e1ad0f787..c8618d2d6daab925045b9435fca2c87065cfe915 100644 (file)
@@ -31,9 +31,9 @@ axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
        }
 }
 
-void axgbe_dev_rx_queue_release(void *rxq)
+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       axgbe_rx_queue_release(rxq);
+       axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
 }
 
 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -517,9 +517,9 @@ static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
        }
 }
 
-void axgbe_dev_tx_queue_release(void *txq)
+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       axgbe_tx_queue_release(txq);
+       axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
 }
 
 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
index c2b11bb0e6e7ec6abadf765f77a787617bdefeca..2a330339cd054c7ad2fc5da9c8a631c5c51d44ed 100644 (file)
@@ -153,7 +153,7 @@ struct axgbe_tx_queue {
  */
 
 
-void axgbe_dev_tx_queue_release(void *txq);
+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                              uint16_t nb_tx_desc, unsigned int socket_id,
                              const struct rte_eth_txconf *tx_conf);
@@ -171,7 +171,7 @@ uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                         uint16_t nb_pkts);
 
 
-void axgbe_dev_rx_queue_release(void *rxq);
+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                              uint16_t nb_rx_desc, unsigned int socket_id,
                              const struct rte_eth_rxconf *rx_conf,
index 2b1760229051445d5355322071c82204d89224f0..fea7a34e7d8ff0a6f85a113937f7a250660ccde7 100644 (file)
@@ -37,9 +37,9 @@ bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
 }
 
 void
-bnx2x_dev_rx_queue_release(void *rxq)
+bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       bnx2x_rx_queue_release(rxq);
+       bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]);
 }
 
 int
@@ -182,9 +182,9 @@ bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
 }
 
 void
-bnx2x_dev_tx_queue_release(void *txq)
+bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       bnx2x_tx_queue_release(txq);
+       bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]);
 }
 
 static uint16_t
index 3f4692b47dd12a0a10d2685a8deb132f24e66532..247a72230bb38ba5d7d91ea6285429b00edb3035 100644 (file)
@@ -72,8 +72,8 @@ int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                              uint16_t nb_tx_desc, unsigned int socket_id,
                              const struct rte_eth_txconf *tx_conf);
 
-void bnx2x_dev_rx_queue_release(void *rxq);
-void bnx2x_dev_tx_queue_release(void *txq);
+void bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+void bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);
 void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);
 void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
index bdbad53b7d7fcb17eab7237e8193973b8a9d5599..df05619c3fddfcd07d6c9b91e90b40c0ce0e241c 100644 (file)
@@ -630,7 +630,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        if (eth_dev->data->rx_queues) {
                rxq = eth_dev->data->rx_queues[queue_idx];
                if (rxq)
-                       bnxt_rx_queue_release_op(rxq);
+                       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        }
 
        rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
@@ -641,6 +641,8 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                return -ENOMEM;
        }
 
+       eth_dev->data->rx_queues[queue_idx] = rxq;
+
        rxq->nb_rx_desc = nb_desc;
 
        rc = bnxt_init_rep_rx_ring(rxq, socket_id);
@@ -660,20 +662,19 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        rxq->rx_ring->rx_buf_ring = buf_ring;
        rxq->queue_id = queue_idx;
        rxq->port_id = eth_dev->data->port_id;
-       eth_dev->data->rx_queues[queue_idx] = rxq;
 
        return 0;
 
 out:
        if (rxq)
-               bnxt_rep_rx_queue_release_op(rxq);
+               bnxt_rep_rx_queue_release_op(eth_dev, queue_idx);
 
        return rc;
 }
 
-void bnxt_rep_rx_queue_release_op(void *rx_queue)
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+       struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
 
        if (!rxq)
                return;
@@ -728,8 +729,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        if (eth_dev->data->tx_queues) {
                vfr_txq = eth_dev->data->tx_queues[queue_idx];
-               bnxt_rep_tx_queue_release_op(vfr_txq);
-               vfr_txq = NULL;
+               if (vfr_txq != NULL)
+                       bnxt_rep_tx_queue_release_op(eth_dev, queue_idx);
        }
 
        vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
@@ -758,15 +759,16 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-void bnxt_rep_tx_queue_release_op(void *tx_queue)
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+       struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];
 
        if (!vfr_txq)
                return;
 
        rte_free(vfr_txq->txq);
        rte_free(vfr_txq);
+       dev->data->tx_queues[queue_idx] = NULL;
 }
 
 int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
index 8d6139f2b70a3bdb5f49a15b3cb5c0d2904f76f7..01e57ee5b50985dace0cfb13939dbdefa782ed5c 100644 (file)
@@ -42,8 +42,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                                  __rte_unused unsigned int socket_id,
                                  __rte_unused const struct rte_eth_txconf *
                                  tx_conf);
-void bnxt_rep_rx_queue_release_op(void *rx_queue);
-void bnxt_rep_tx_queue_release_op(void *tx_queue);
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
 int  bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev);
 int bnxt_rep_dev_close_op(struct rte_eth_dev *eth_dev);
 int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
index 957b175f1b89d65089d24361212501c706c67280..aaad08e5e5b79130492b8f0bf82f53e55b83a650 100644 (file)
@@ -640,7 +640,7 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
        if (rxq->rx_started) {
                if (bnxt_init_one_rx_ring(rxq)) {
                        PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
-                       bnxt_rx_queue_release_op(rxq);
+                       bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
                        rc = -ENOMEM;
                        goto err_out;
                }
index bbcb3b06e7df50f600217414d5436bc47c424d2f..2eb7a3cb29b3d4113026bd104ae8e254746e9423 100644 (file)
@@ -240,9 +240,9 @@ void bnxt_free_rx_mbufs(struct bnxt *bp)
        }
 }
 
-void bnxt_rx_queue_release_op(void *rx_queue)
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+       struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
 
        if (rxq) {
                if (is_bnxt_in_error(rxq->bp))
@@ -273,6 +273,7 @@ void bnxt_rx_queue_release_op(void *rx_queue)
                rxq->mz = NULL;
 
                rte_free(rxq);
+               dev->data->rx_queues[queue_idx] = NULL;
        }
 }
 
@@ -307,7 +308,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        if (eth_dev->data->rx_queues) {
                rxq = eth_dev->data->rx_queues[queue_idx];
                if (rxq)
-                       bnxt_rx_queue_release_op(rxq);
+                       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        }
        rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
@@ -328,6 +329,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
 
+       eth_dev->data->rx_queues[queue_idx] = rxq;
+
        rc = bnxt_init_rx_ring_struct(rxq, socket_id);
        if (rc) {
                PMD_DRV_LOG(ERR,
@@ -343,7 +346,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        else
                rxq->crc_len = 0;
 
-       eth_dev->data->rx_queues[queue_idx] = rxq;
        /* Allocate RX ring hardware descriptors */
        rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
                              NULL, "rxr");
@@ -369,7 +371,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        return 0;
 err:
-       bnxt_rx_queue_release_op(rxq);
+       bnxt_rx_queue_release_op(eth_dev, queue_idx);
        return rc;
 }
 
index 42bd8e7ab76ba02d54d4f91a11b11413350fd8b8..9bb9352febc57baf7c5059dcdd8ccd7102d5c871 100644 (file)
@@ -46,7 +46,7 @@ struct bnxt_rx_queue {
 
 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
 int bnxt_mq_rx_configure(struct bnxt *bp);
-void bnxt_rx_queue_release_op(void *rx_queue);
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                               uint16_t queue_idx,
                               uint16_t nb_desc,
index 3ffc334cff5e28cf2f5fbfe442bf86e0e3c66a71..72a55ea643281846f6cf4d3543445f417d87f55c 100644 (file)
@@ -53,9 +53,9 @@ void bnxt_free_tx_mbufs(struct bnxt *bp)
        }
 }
 
-void bnxt_tx_queue_release_op(void *tx_queue)
+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+       struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx];
 
        if (txq) {
                if (is_bnxt_in_error(txq->bp))
@@ -83,6 +83,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)
 
                rte_free(txq->free);
                rte_free(txq);
+               dev->data->tx_queues[queue_idx] = NULL;
        }
 }
 
@@ -114,10 +115,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        if (eth_dev->data->tx_queues) {
                txq = eth_dev->data->tx_queues[queue_idx];
-               if (txq) {
-                       bnxt_tx_queue_release_op(txq);
-                       txq = NULL;
-               }
+               if (txq)
+                       bnxt_tx_queue_release_op(eth_dev, queue_idx);
        }
        txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
@@ -126,6 +125,9 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                return -ENOMEM;
        }
 
+       txq->bp = bp;
+       eth_dev->data->tx_queues[queue_idx] = txq;
+
        txq->free = rte_zmalloc_socket(NULL,
                                       sizeof(struct rte_mbuf *) * nb_desc,
                                       RTE_CACHE_LINE_SIZE, socket_id);
@@ -134,7 +136,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                rc = -ENOMEM;
                goto err;
        }
-       txq->bp = bp;
        txq->nb_tx_desc = nb_desc;
        txq->tx_free_thresh =
                RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
@@ -164,10 +165,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                goto err;
        }
 
-       eth_dev->data->tx_queues[queue_idx] = txq;
-
        return 0;
 err:
-       bnxt_tx_queue_release_op(txq);
+       bnxt_tx_queue_release_op(eth_dev, queue_idx);
        return rc;
 }
index e0e142df3eeb10aefd605f18bc9ae3f031d8064f..67fd4cbebb70c5d82ed97ed8fa0f27cc68867b2b 100644 (file)
@@ -37,7 +37,7 @@ struct bnxt_tx_queue {
 
 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
 void bnxt_free_tx_mbufs(struct bnxt *bp);
-void bnxt_tx_queue_release_op(void *tx_queue);
+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
 int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
                               uint16_t queue_idx,
                               uint16_t nb_desc,
index 6831fcb1042ac1e824f10788260513792a7719e6..542c6633b53d9322fc5a1b8caa59e52562903d52 100644 (file)
@@ -2332,8 +2332,10 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 }
 
 static void
-bond_ethdev_rx_queue_release(void *queue)
+bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
 {
+       void *queue = dev->data->rx_queues[queue_id];
+
        if (queue == NULL)
                return;
 
@@ -2341,8 +2343,10 @@ bond_ethdev_rx_queue_release(void *queue)
 }
 
 static void
-bond_ethdev_tx_queue_release(void *queue)
+bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
 {
+       void *queue = dev->data->rx_queues[queue_id];
+
        if (queue == NULL)
                return;
 
index ec00e620ebed46c25b1dadedde87a3d31c105e61..966bd23c7f986abb2bb0f72e5c7b7ad1b5c2a370 100644 (file)
@@ -346,7 +346,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
        /* Free memory prior to re-allocation if needed. */
        if (eth_dev->data->tx_queues[qid] != NULL) {
                plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
-               dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+               dev_ops->tx_queue_release(eth_dev, qid);
                eth_dev->data->tx_queues[qid] = NULL;
        }
 
@@ -396,20 +396,20 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 }
 
 static void
-cnxk_nix_tx_queue_release(void *txq)
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
+       void *txq = eth_dev->data->tx_queues[qid];
        struct cnxk_eth_txq_sp *txq_sp;
        struct cnxk_eth_dev *dev;
        struct roc_nix_sq *sq;
-       uint16_t qid;
        int rc;
 
        if (!txq)
                return;
 
        txq_sp = cnxk_eth_txq_to_sp(txq);
+
        dev = txq_sp->dev;
-       qid = txq_sp->qid;
 
        plt_nix_dbg("Releasing txq %u", qid);
 
@@ -464,7 +464,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
                const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
 
                plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
-               dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+               dev_ops->rx_queue_release(eth_dev, qid);
                eth_dev->data->rx_queues[qid] = NULL;
        }
 
@@ -572,13 +572,13 @@ fail:
 }
 
 static void
-cnxk_nix_rx_queue_release(void *rxq)
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
+       void *rxq = eth_dev->data->rx_queues[qid];
        struct cnxk_eth_rxq_sp *rxq_sp;
        struct cnxk_eth_dev *dev;
        struct roc_nix_rq *rq;
        struct roc_nix_cq *cq;
-       uint16_t qid;
        int rc;
 
        if (!rxq)
@@ -586,7 +586,6 @@ cnxk_nix_rx_queue_release(void *rxq)
 
        rxq_sp = cnxk_eth_rxq_to_sp(rxq);
        dev = rxq_sp->dev;
-       qid = rxq_sp->qid;
        rq = &dev->rqs[qid];
 
        plt_nix_dbg("Releasing rxq %u", qid);
@@ -755,7 +754,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                txq_sp = cnxk_eth_txq_to_sp(txq[i]);
                memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
                tx_qconf[i].valid = true;
-               dev_ops->tx_queue_release(txq[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
                eth_dev->data->tx_queues[i] = NULL;
        }
 
@@ -769,7 +768,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
                memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
                rx_qconf[i].valid = true;
-               dev_ops->rx_queue_release(rxq[i]);
+               dev_ops->rx_queue_release(eth_dev, i);
                eth_dev->data->rx_queues[i] = NULL;
        }
 
@@ -791,7 +790,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
        struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
        int rc, i, nb_rxq, nb_txq;
-       void **txq, **rxq;
 
        nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
        nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
@@ -826,9 +824,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             &tx_qconf[i].conf.tx);
                if (rc) {
                        plt_err("Failed to setup tx queue rc=%d", rc);
-                       txq = eth_dev->data->tx_queues;
                        for (i -= 1; i >= 0; i--)
-                               dev_ops->tx_queue_release(txq[i]);
+                               dev_ops->tx_queue_release(eth_dev, i);
                        goto fail;
                }
        }
@@ -844,9 +841,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             rx_qconf[i].mp);
                if (rc) {
                        plt_err("Failed to setup rx queue rc=%d", rc);
-                       rxq = eth_dev->data->rx_queues;
                        for (i -= 1; i >= 0; i--)
-                               dev_ops->rx_queue_release(rxq[i]);
+                               dev_ops->rx_queue_release(eth_dev, i);
                        goto tx_queue_release;
                }
        }
@@ -857,9 +853,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        return 0;
 
 tx_queue_release:
-       txq = eth_dev->data->tx_queues;
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-               dev_ops->tx_queue_release(txq[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
 fail:
        if (tx_qconf)
                free(tx_qconf);
@@ -1664,14 +1659,14 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 
        /* Free up SQs */
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
+               dev_ops->tx_queue_release(eth_dev, i);
                eth_dev->data->tx_queues[i] = NULL;
        }
        eth_dev->data->nb_tx_queues = 0;
 
        /* Free up RQ's and CQ's */
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
+               dev_ops->rx_queue_release(eth_dev, i);
                eth_dev->data->rx_queues[i] = NULL;
        }
        eth_dev->data->nb_rx_queues = 0;
index 15d9548f79f3a6046be91475ccfa4c5fbe660e95..cd9aa9f84b63c998dc6f45a5a254ec4b5a463816 100644 (file)
@@ -532,7 +532,7 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
 
        /*  Free up the existing queue  */
        if (eth_dev->data->tx_queues[queue_idx]) {
-               cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+               cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
                eth_dev->data->tx_queues[queue_idx] = NULL;
        }
 
@@ -565,9 +565,9 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        return err;
 }
 
-void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
-       struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+       struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
 
        if (txq) {
                struct port_info *pi = (struct port_info *)
@@ -655,7 +655,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
        /*  Free up the existing queue  */
        if (eth_dev->data->rx_queues[queue_idx]) {
-               cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+               cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
                eth_dev->data->rx_queues[queue_idx] = NULL;
        }
 
@@ -701,9 +701,9 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        return err;
 }
 
-void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
-       struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+       struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
 
        if (rxq) {
                struct port_info *pi = (struct port_info *)
index 176bd4626738e600a16f12e5ca6dfb28d3085730..21035dd3199eca45f07f2bf591b34f5bb0b3ff7e 100644 (file)
@@ -16,8 +16,8 @@
         V_FW_PARAMS_PARAM_Y(0) | \
         V_FW_PARAMS_PARAM_Z(0))
 
-void cxgbe_dev_rx_queue_release(void *q);
-void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
 int cxgbe_dev_close(struct rte_eth_dev *eth_dev);
 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
index c7bb7cffe1e20d088a0e2003212a32ab17be7914..ff8ae89922c75221394df3b50c45ba1140f1524e 100644 (file)
@@ -978,9 +978,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-dpaa2_dev_rx_queue_release(void *q __rte_unused)
+dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
+       struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
        struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
        struct fsl_mc_io *dpni =
                (struct fsl_mc_io *)priv->eth_dev->process_private;
index 3b4d9c3ee6f456ad015d62fce162a038b4719fd2..8e10e2777e6460d1ba9e43346bb525820a6515a6 100644 (file)
@@ -386,8 +386,8 @@ extern const struct rte_flow_ops igb_flow_ops;
 /*
  * RX/TX IGB function prototypes
  */
-void eth_igb_tx_queue_release(void *txq);
-void eth_igb_rx_queue_release(void *rxq);
+void eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void igb_dev_clear_queues(struct rte_eth_dev *dev);
 void igb_dev_free_queues(struct rte_eth_dev *dev);
 
@@ -462,8 +462,8 @@ uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
 /*
  * RX/TX EM function prototypes
  */
-void eth_em_tx_queue_release(void *txq);
-void eth_em_rx_queue_release(void *rxq);
+void eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 void em_dev_clear_queues(struct rte_eth_dev *dev);
 void em_dev_free_queues(struct rte_eth_dev *dev);
index dfd8f2fd00745ce748881acc224881cc1b668440..00a8af6d39f95582ca5a24fc359345bcb89c2906 100644 (file)
@@ -1121,9 +1121,9 @@ em_tx_queue_release(struct em_tx_queue *txq)
 }
 
 void
-eth_em_tx_queue_release(void *txq)
+eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       em_tx_queue_release(txq);
+       em_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 /* (Re)set dynamic em_tx_queue fields to defaults */
@@ -1343,9 +1343,9 @@ em_rx_queue_release(struct em_rx_queue *rxq)
 }
 
 void
-eth_em_rx_queue_release(void *rxq)
+eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       em_rx_queue_release(rxq);
+       em_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 /* Reset dynamic em_rx_queue fields back to defaults */
@@ -1609,14 +1609,14 @@ em_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               eth_em_rx_queue_release(dev->data->rx_queues[i]);
+               eth_em_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               eth_em_tx_queue_release(dev->data->tx_queues[i]);
+               eth_em_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
index 278d5d2712af18707771c515a9259852e7134e36..d97ca1a01134e3a1f843f2788e849f256d0f48d0 100644 (file)
@@ -1281,9 +1281,9 @@ igb_tx_queue_release(struct igb_tx_queue *txq)
 }
 
 void
-eth_igb_tx_queue_release(void *txq)
+eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       igb_tx_queue_release(txq);
+       igb_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 static int
@@ -1606,9 +1606,9 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)
 }
 
 void
-eth_igb_rx_queue_release(void *rxq)
+eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       igb_rx_queue_release(rxq);
+       igb_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 static void
@@ -1883,14 +1883,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+               eth_igb_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+               eth_igb_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
index 4cebf60a68a7565550626328ee98f14348783cbd..a82d4b62873672262db075fe0a4d0f285c03088e 100644 (file)
@@ -192,8 +192,8 @@ static int ena_dev_reset(struct rte_eth_dev *dev);
 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
 static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
 static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
-static void ena_rx_queue_release(void *queue);
-static void ena_tx_queue_release(void *queue);
+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 static void ena_rx_queue_release_bufs(struct ena_ring *ring);
 static void ena_tx_queue_release_bufs(struct ena_ring *ring);
 static int ena_link_update(struct rte_eth_dev *dev,
@@ -525,27 +525,25 @@ ena_dev_reset(struct rte_eth_dev *dev)
 
 static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
 {
-       struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
        int nb_queues = dev->data->nb_rx_queues;
        int i;
 
        for (i = 0; i < nb_queues; i++)
-               ena_rx_queue_release(queues[i]);
+               ena_rx_queue_release(dev, i);
 }
 
 static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
 {
-       struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
        int nb_queues = dev->data->nb_tx_queues;
        int i;
 
        for (i = 0; i < nb_queues; i++)
-               ena_tx_queue_release(queues[i]);
+               ena_tx_queue_release(dev, i);
 }
 
-static void ena_rx_queue_release(void *queue)
+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ena_ring *ring = (struct ena_ring *)queue;
+       struct ena_ring *ring = dev->data->rx_queues[qid];
 
        /* Free ring resources */
        if (ring->rx_buffer_info)
@@ -566,9 +564,9 @@ static void ena_rx_queue_release(void *queue)
                ring->port_id, ring->id);
 }
 
-static void ena_tx_queue_release(void *queue)
+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ena_ring *ring = (struct ena_ring *)queue;
+       struct ena_ring *ring = dev->data->tx_queues[qid];
 
        /* Free ring resources */
        if (ring->push_buf_intermediate_buf)
index b496cd470045dadf3c5343e1343b91c39e58aca9..246aff467248a3d4a80b7a2f4b44a0c8d78c734e 100644 (file)
@@ -325,8 +325,10 @@ fail:
 }
 
 static void
-enetc_tx_queue_release(void *txq)
+enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *txq = dev->data->tx_queues[qid];
+
        if (txq == NULL)
                return;
 
@@ -473,8 +475,10 @@ fail:
 }
 
 static void
-enetc_rx_queue_release(void *rxq)
+enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *rxq = dev->data->rx_queues[qid];
+
        if (rxq == NULL)
                return;
 
@@ -561,13 +565,13 @@ enetc_dev_close(struct rte_eth_dev *dev)
        ret = enetc_dev_stop(dev);
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               enetc_rx_queue_release(dev->data->rx_queues[i]);
+               enetc_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               enetc_tx_queue_release(dev->data->tx_queues[i]);
+               enetc_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index 8d5797523b8f6cc2a43fe1471ad0f4c78df73d05..b03e56bc2500cf8c0e03723da307067e815decd0 100644 (file)
@@ -88,8 +88,10 @@ enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
        return 0;
 }
 
-static void enicpmd_dev_tx_queue_release(void *txq)
+static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *txq = dev->data->tx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -223,8 +225,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
        return ret;
 }
 
-static void enicpmd_dev_rx_queue_release(void *rxq)
+static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *rxq = dev->data->rx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
index 79dd6e5640dd50f978bedacb8bb752c1e159c6df..cfd02c03cce248d70b2d962d39803f5ad9e141cc 100644 (file)
@@ -70,8 +70,10 @@ static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-static void enic_vf_dev_tx_queue_release(void *txq)
+static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *txq = dev->data->tx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
@@ -108,8 +110,10 @@ static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-static void enic_vf_dev_rx_queue_release(void *rxq)
+static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *rxq = dev->data->rx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return;
index 5ff33e03e0340c516cfdbec7973de4329da43e09..d0030af0610bec1fae8e4b6b28fcd1b1201474a2 100644 (file)
@@ -358,26 +358,21 @@ fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 static void
-fs_rx_queue_release(void *queue)
+fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct rte_eth_dev *dev;
        struct sub_device *sdev;
        uint8_t i;
-       struct rxq *rxq;
+       struct rxq *rxq = dev->data->rx_queues[qid];
 
-       if (queue == NULL)
+       if (rxq == NULL)
                return;
-       rxq = queue;
-       dev = &rte_eth_devices[rxq->priv->data->port_id];
        fs_lock(dev, 0);
        if (rxq->event_fd >= 0)
                close(rxq->event_fd);
        FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
                if (ETH(sdev)->data->rx_queues != NULL &&
-                   ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
-                       SUBOPS(sdev, rx_queue_release)
-                               (ETH(sdev)->data->rx_queues[rxq->qid]);
-               }
+                   ETH(sdev)->data->rx_queues[rxq->qid] != NULL)
+                       SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid);
        }
        dev->data->rx_queues[rxq->qid] = NULL;
        rte_free(rxq);
@@ -420,7 +415,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
        }
        rxq = dev->data->rx_queues[rx_queue_id];
        if (rxq != NULL) {
-               fs_rx_queue_release(rxq);
+               fs_rx_queue_release(dev, rx_queue_id);
                dev->data->rx_queues[rx_queue_id] = NULL;
        }
        rxq = rte_zmalloc(NULL,
@@ -460,7 +455,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
        fs_unlock(dev, 0);
        return 0;
 free_rxq:
-       fs_rx_queue_release(rxq);
+       fs_rx_queue_release(dev, rx_queue_id);
        fs_unlock(dev, 0);
        return ret;
 }
@@ -542,24 +537,19 @@ unlock:
 }
 
 static void
-fs_tx_queue_release(void *queue)
+fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct rte_eth_dev *dev;
        struct sub_device *sdev;
        uint8_t i;
-       struct txq *txq;
+       struct txq *txq = dev->data->tx_queues[qid];
 
-       if (queue == NULL)
+       if (txq == NULL)
                return;
-       txq = queue;
-       dev = &rte_eth_devices[txq->priv->data->port_id];
        fs_lock(dev, 0);
        FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
                if (ETH(sdev)->data->tx_queues != NULL &&
-                   ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
-                       SUBOPS(sdev, tx_queue_release)
-                               (ETH(sdev)->data->tx_queues[txq->qid]);
-               }
+                   ETH(sdev)->data->tx_queues[txq->qid] != NULL)
+                       SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid);
        }
        dev->data->tx_queues[txq->qid] = NULL;
        rte_free(txq);
@@ -591,7 +581,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
        }
        txq = dev->data->tx_queues[tx_queue_id];
        if (txq != NULL) {
-               fs_tx_queue_release(txq);
+               fs_tx_queue_release(dev, tx_queue_id);
                dev->data->tx_queues[tx_queue_id] = NULL;
        }
        txq = rte_zmalloc("ethdev TX queue",
@@ -623,7 +613,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
        fs_unlock(dev, 0);
        return 0;
 free_txq:
-       fs_tx_queue_release(txq);
+       fs_tx_queue_release(dev, tx_queue_id);
        fs_unlock(dev, 0);
        return ret;
 }
@@ -634,12 +624,12 @@ fs_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               fs_rx_queue_release(dev->data->rx_queues[i]);
+               fs_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               fs_tx_queue_release(dev->data->tx_queues[i]);
+               fs_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index 3236290e4021c2b28fa38bf85d01a5599cf540d3..7075d69022c4a5fcffdabe1cb744329d2c103645 100644 (file)
@@ -51,8 +51,8 @@ static int
 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
        const u8 *mac, bool add, uint32_t pool);
-static void fm10k_tx_queue_release(void *queue);
-static void fm10k_rx_queue_release(void *queue);
+static void fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
 static int fm10k_check_ftag(struct rte_devargs *devargs);
@@ -1210,7 +1210,7 @@ fm10k_dev_queue_release(struct rte_eth_dev *dev)
 
        if (dev->data->rx_queues) {
                for (i = 0; i < dev->data->nb_rx_queues; i++)
-                       fm10k_rx_queue_release(dev->data->rx_queues[i]);
+                       fm10k_rx_queue_release(dev, i);
        }
 }
 
@@ -1891,11 +1891,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 }
 
 static void
-fm10k_rx_queue_release(void *queue)
+fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        PMD_INIT_FUNC_TRACE();
 
-       rx_queue_free(queue);
+       rx_queue_free(dev->data->rx_queues[qid]);
 }
 
 static inline int
@@ -2080,9 +2080,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 }
 
 static void
-fm10k_tx_queue_release(void *queue)
+fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct fm10k_tx_queue *q = queue;
+       struct fm10k_tx_queue *q = dev->data->tx_queues[qid];
        PMD_INIT_FUNC_TRACE();
 
        tx_queue_free(q);
index c01e2ec1d45090ffd84879a3ed82ffbe610ab78f..cd4dad8588f3b348a61ef792371846429f25ce79 100644 (file)
@@ -1075,12 +1075,14 @@ init_qp_fail:
 /**
  * DPDK callback to release the receive queue.
  *
- * @param queue
- *   Generic receive queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
-static void hinic_rx_queue_release(void *queue)
+static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hinic_rxq *rxq = queue;
+       struct hinic_rxq *rxq = dev->data->rx_queues[qid];
        struct hinic_nic_dev *nic_dev;
 
        if (!rxq) {
@@ -1107,12 +1109,14 @@ static void hinic_rx_queue_release(void *queue)
 /**
  * DPDK callback to release the transmit queue.
  *
- * @param queue
- *   Generic transmit queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Transmit queue index.
  */
-static void hinic_tx_queue_release(void *queue)
+static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hinic_txq *txq = queue;
+       struct hinic_txq *txq = dev->data->tx_queues[qid];
        struct hinic_nic_dev *nic_dev;
 
        if (!txq) {
index 481872e3957f8b6105b4441fa1e3e9ec78040737..6b77672aa1b454b69bb2948286865e6a801a38e8 100644 (file)
@@ -108,8 +108,8 @@ hns3_tx_queue_release(void *queue)
        }
 }
 
-void
-hns3_dev_rx_queue_release(void *queue)
+static void
+hns3_rx_queue_release_lock(void *queue)
 {
        struct hns3_rx_queue *rxq = queue;
        struct hns3_adapter *hns;
@@ -124,7 +124,13 @@ hns3_dev_rx_queue_release(void *queue)
 }
 
 void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
 {
        struct hns3_tx_queue *txq = queue;
        struct hns3_adapter *hns;
@@ -138,6 +144,12 @@ hns3_dev_tx_queue_release(void *queue)
        rte_spinlock_unlock(&hns->hw.lock);
 }
 
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
 static void
 hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
 {
@@ -1536,7 +1548,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                /* re-configure */
                rxq = hw->fkq_data.rx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
-                       hns3_dev_rx_queue_release(rxq[i]);
+                       hns3_rx_queue_release_lock(rxq[i]);
 
                rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
                                  RTE_CACHE_LINE_SIZE);
@@ -1551,7 +1563,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
        } else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
                rxq = hw->fkq_data.rx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
-                       hns3_dev_rx_queue_release(rxq[i]);
+                       hns3_rx_queue_release_lock(rxq[i]);
 
                rte_free(hw->fkq_data.rx_queues);
                hw->fkq_data.rx_queues = NULL;
@@ -1583,7 +1595,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                /* re-configure */
                txq = hw->fkq_data.tx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
-                       hns3_dev_tx_queue_release(txq[i]);
+                       hns3_tx_queue_release_lock(txq[i]);
                txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
                                  RTE_CACHE_LINE_SIZE);
                if (txq == NULL)
@@ -1597,7 +1609,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
        } else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
                txq = hw->fkq_data.tx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
-                       hns3_dev_tx_queue_release(txq[i]);
+                       hns3_tx_queue_release_lock(txq[i]);
 
                rte_free(hw->fkq_data.tx_queues);
                hw->fkq_data.tx_queues = NULL;
index cd7c21c1d0c8d0f0e7f1f1b7133946f57800c7b2..bb309d38eddb2203795d5faf0a8ea96e338918bf 100644 (file)
@@ -677,8 +677,8 @@ hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)
                rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
 }
 
-void hns3_dev_rx_queue_release(void *queue);
-void hns3_dev_tx_queue_release(void *queue);
+void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
+void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
 void hns3_free_all_queues(struct rte_eth_dev *dev);
 int hns3_reset_all_tqps(struct hns3_adapter *hns);
 void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
index af075fda2a2486f9a14b779687ea968b5be40568..105a6a657ffab5bf181a7275781f72f44b0f133e 100644 (file)
@@ -264,10 +264,10 @@ i40e_fdir_setup(struct i40e_pf *pf)
        return I40E_SUCCESS;
 
 fail_mem:
-       i40e_dev_rx_queue_release(pf->fdir.rxq);
+       i40e_rx_queue_release(pf->fdir.rxq);
        pf->fdir.rxq = NULL;
 fail_setup_rx:
-       i40e_dev_tx_queue_release(pf->fdir.txq);
+       i40e_tx_queue_release(pf->fdir.txq);
        pf->fdir.txq = NULL;
 fail_setup_tx:
        i40e_vsi_release(vsi);
@@ -302,10 +302,10 @@ i40e_fdir_teardown(struct i40e_pf *pf)
                PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
 
        rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
-       i40e_dev_rx_queue_release(pf->fdir.rxq);
+       i40e_rx_queue_release(pf->fdir.rxq);
        pf->fdir.rxq = NULL;
        rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
-       i40e_dev_tx_queue_release(pf->fdir.txq);
+       i40e_tx_queue_release(pf->fdir.txq);
        pf->fdir.txq = NULL;
        i40e_vsi_release(vsi);
        pf->fdir.fdir_vsi = NULL;
index d5847ac6b5461967c493a3f5ac8dd36cd433c2fc..caab66e433c2cdfe85276b163745757891ba1aa5 100644 (file)
@@ -1975,7 +1975,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
-               i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
@@ -2019,7 +2019,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
                              ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!rz) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
                return -ENOMEM;
        }
@@ -2039,7 +2039,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!rxq->sw_ring) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
                return -ENOMEM;
        }
@@ -2062,7 +2062,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        if (dev->data->dev_started) {
                if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
-                       i40e_dev_rx_queue_release(rxq);
+                       i40e_rx_queue_release(rxq);
                        return -EINVAL;
                }
        } else {
@@ -2092,7 +2092,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 void
-i40e_dev_rx_queue_release(void *rxq)
+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       i40e_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       i40e_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+i40e_rx_queue_release(void *rxq)
 {
        struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
 
@@ -2389,7 +2401,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed. */
        if (dev->data->tx_queues[queue_idx]) {
-               i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
@@ -2410,7 +2422,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
                              ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!tz) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
                return -ENOMEM;
        }
@@ -2438,7 +2450,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!txq->sw_ring) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
                return -ENOMEM;
        }
@@ -2461,7 +2473,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        if (dev->data->dev_started) {
                if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
-                       i40e_dev_tx_queue_release(txq);
+                       i40e_tx_queue_release(txq);
                        return -EINVAL;
                }
        } else {
@@ -2477,7 +2489,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 void
-i40e_dev_tx_queue_release(void *txq)
+i40e_tx_queue_release(void *txq)
 {
        struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
 
@@ -3042,7 +3054,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (!dev->data->rx_queues[i])
                        continue;
-               i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+               i40e_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
@@ -3050,7 +3062,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                if (!dev->data->tx_queues[i])
                        continue;
-               i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+               i40e_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
@@ -3090,7 +3102,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
                                      I40E_FDIR_QUEUE_ID, ring_size,
                                      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!tz) {
-               i40e_dev_tx_queue_release(txq);
+               i40e_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
                return I40E_ERR_NO_MEMORY;
        }
@@ -3148,7 +3160,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
                                      I40E_FDIR_QUEUE_ID, ring_size,
                                      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!rz) {
-               i40e_dev_rx_queue_release(rxq);
+               i40e_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
                return I40E_ERR_NO_MEMORY;
        }
index 5ccf5773e857b443db5dfa2d6188120b735fd852..8d6ab16b4feec5f5751c98c7c21e9cdf6e5fc4c0 100644 (file)
@@ -197,8 +197,10 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                            uint16_t nb_desc,
                            unsigned int socket_id,
                            const struct rte_eth_txconf *tx_conf);
-void i40e_dev_rx_queue_release(void *rxq);
-void i40e_dev_tx_queue_release(void *txq);
+void i40e_rx_queue_release(void *rxq);
+void i40e_tx_queue_release(void *txq);
+void i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 uint16_t i40e_recv_pkts(void *rx_queue,
                        struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts);
index 87afc0b4cb9d125aaff570e495922966d2511df3..88661e5d74aaa4f3e5d40ed70a8cd51e8c4c6ddd 100644 (file)
@@ -562,7 +562,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
-               iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               iavf_dev_rx_queue_release(dev, queue_idx);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
@@ -721,7 +721,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed. */
        if (dev->data->tx_queues[queue_idx]) {
-               iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               iavf_dev_tx_queue_release(dev, queue_idx);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
@@ -962,9 +962,9 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-iavf_dev_rx_queue_release(void *rxq)
+iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
+       struct iavf_rx_queue *q = dev->data->rx_queues[qid];
 
        if (!q)
                return;
@@ -976,9 +976,9 @@ iavf_dev_rx_queue_release(void *rxq)
 }
 
 void
-iavf_dev_tx_queue_release(void *txq)
+iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+       struct iavf_tx_queue *q = dev->data->tx_queues[qid];
 
        if (!q)
                return;
index e210b913d633a6b8b0dc2392af20735296b28123..c7a868cf1ddd38712efd6f295f0984f8cae753e5 100644 (file)
@@ -420,7 +420,7 @@ int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-void iavf_dev_rx_queue_release(void *rxq);
+void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
@@ -430,7 +430,7 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
-void iavf_dev_tx_queue_release(void *txq);
+void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void iavf_stop_queues(struct rte_eth_dev *dev);
 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts);
index 4e4cdbcd7d71d79c953c769460551f6c58b47ca1..91f655874287563baf7347e9194d635f823d0fb4 100644 (file)
@@ -1060,8 +1060,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
        .dev_infos_get           = ice_dcf_dev_info_get,
        .rx_queue_setup          = ice_rx_queue_setup,
        .tx_queue_setup          = ice_tx_queue_setup,
-       .rx_queue_release        = ice_rx_queue_release,
-       .tx_queue_release        = ice_tx_queue_release,
+       .rx_queue_release        = ice_dev_rx_queue_release,
+       .tx_queue_release        = ice_dev_tx_queue_release,
        .rx_queue_start          = ice_dcf_rx_queue_start,
        .tx_queue_start          = ice_dcf_tx_queue_start,
        .rx_queue_stop           = ice_dcf_rx_queue_stop,
index 9ab7704ff0035701a9f34b7ad81c8275ba46337d..65e43a18f9f2449611f879fd22c0ca17216c3a23 100644 (file)
@@ -208,9 +208,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .tx_queue_start               = ice_tx_queue_start,
        .tx_queue_stop                = ice_tx_queue_stop,
        .rx_queue_setup               = ice_rx_queue_setup,
-       .rx_queue_release             = ice_rx_queue_release,
+       .rx_queue_release             = ice_dev_rx_queue_release,
        .tx_queue_setup               = ice_tx_queue_setup,
-       .tx_queue_release             = ice_tx_queue_release,
+       .tx_queue_release             = ice_dev_tx_queue_release,
        .dev_infos_get                = ice_dev_info_get,
        .dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
        .link_update                  = ice_link_update,
index 83fb788e6930d89204e3db789a9264b7e88b0117..3f9de55a422e816fc7a03581a76eb725836f2d98 100644 (file)
@@ -1391,6 +1391,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 void
 ice_tx_queue_release(void *txq)
 {
index eef76ffdc5b12b145ab78a95f78aeae08b65c19a..0a38740f4599deeea42f56185c08f92dfd7eeb05 100644 (file)
@@ -213,6 +213,8 @@ int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 void ice_rx_queue_release(void *rxq);
 void ice_tx_queue_release(void *txq);
+void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void ice_free_queues(struct rte_eth_dev *dev);
 int ice_fdir_setup_tx_resources(struct ice_pf *pf);
 int ice_fdir_setup_rx_resources(struct ice_pf *pf);
index be2c0661116ad8752b1b24be348fb0c6febe1e6a..0e41c85d2963aff10c46a079139e91eacca87151 100644 (file)
@@ -1153,13 +1153,13 @@ igc_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               eth_igc_rx_queue_release(dev->data->rx_queues[i]);
+               eth_igc_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               eth_igc_tx_queue_release(dev->data->tx_queues[i]);
+               eth_igc_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index b5489eedd220735da01e5b536ea58f2c66068f3b..7dee1bb0fa5f805d89f2f31a1b375bee10194c8f 100644 (file)
@@ -716,10 +716,10 @@ igc_rx_queue_release(struct igc_rx_queue *rxq)
        rte_free(rxq);
 }
 
-void eth_igc_rx_queue_release(void *rxq)
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       if (rxq)
-               igc_rx_queue_release(rxq);
+       if (dev->data->rx_queues[qid])
+               igc_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
@@ -1899,10 +1899,10 @@ igc_tx_queue_release(struct igc_tx_queue *txq)
        rte_free(txq);
 }
 
-void eth_igc_tx_queue_release(void *txq)
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       if (txq)
-               igc_tx_queue_release(txq);
+       if (dev->data->tx_queues[qid])
+               igc_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 static void
index f2b2d75bbc82dd5dd84ee8de45482b38e76ddd73..57bb87b3e4f99b4ab388140c4fb8c0759b3bb024 100644 (file)
@@ -14,8 +14,8 @@ extern "C" {
 /*
  * RX/TX function prototypes
  */
-void eth_igc_tx_queue_release(void *txq);
-void eth_igc_rx_queue_release(void *rxq);
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void igc_dev_clear_queues(struct rte_eth_dev *dev);
 int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                uint16_t nb_rx_desc, unsigned int socket_id,
index 431eda777b78b83cb42e15fad8ff1abb22d9a75e..a1f9ce2d81cb46122525981a50ad22463c533117 100644 (file)
@@ -1056,11 +1056,11 @@ ionic_lif_free_queues(struct ionic_lif *lif)
        uint32_t i;
 
        for (i = 0; i < lif->ntxqcqs; i++) {
-               ionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]);
+               ionic_dev_tx_queue_release(lif->eth_dev, i);
                lif->eth_dev->data->tx_queues[i] = NULL;
        }
        for (i = 0; i < lif->nrxqcqs; i++) {
-               ionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]);
+               ionic_dev_rx_queue_release(lif->eth_dev, i);
                lif->eth_dev->data->rx_queues[i] = NULL;
        }
 }
index b83ea1bcaa6abf12db5866f9258f342f87343b30..67631a5813b79a55e8229cc111a925cb58799eef 100644 (file)
@@ -118,9 +118,9 @@ ionic_tx_flush(struct ionic_tx_qcq *txq)
 }
 
 void __rte_cold
-ionic_dev_tx_queue_release(void *tx_queue)
+ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ionic_tx_qcq *txq = tx_queue;
+       struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
        struct ionic_tx_stats *stats = &txq->stats;
 
        IONIC_PRINT_CALL();
@@ -185,8 +185,7 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 
        /* Free memory prior to re-allocation if needed... */
        if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
-               void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
-               ionic_dev_tx_queue_release(tx_queue);
+               ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
                eth_dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
@@ -664,9 +663,9 @@ ionic_rx_empty(struct ionic_rx_qcq *rxq)
 }
 
 void __rte_cold
-ionic_dev_rx_queue_release(void *rx_queue)
+ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ionic_rx_qcq *rxq = rx_queue;
+       struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
        struct ionic_rx_stats *stats;
 
        if (!rxq)
@@ -726,8 +725,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
        /* Free memory prior to re-allocation if needed... */
        if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
-               void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
-               ionic_dev_rx_queue_release(rx_queue);
+               ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
                eth_dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
index 5c85b9c493aa8a74ccb7d879cd6e1ca707257b45..befbe61cef093c04918124551082040d3bf25c28 100644 (file)
@@ -25,14 +25,14 @@ uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        uint16_t nb_desc, uint32_t socket_id,
        const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
-void ionic_dev_rx_queue_release(void *rxq);
+void ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
 
 int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
        uint16_t nb_desc,  uint32_t socket_id,
        const struct rte_eth_txconf *tx_conf);
-void ionic_dev_tx_queue_release(void *tx_queue);
+void ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
 int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
index 645207e130d810d7ea18c37f46aa11019236e24a..49cf4f162e57c1f524eb7e736a6ab52bda13ad1f 100644 (file)
@@ -590,9 +590,9 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
 
 void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
 
-void ixgbe_dev_rx_queue_release(void *rxq);
+void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
-void ixgbe_dev_tx_queue_release(void *txq);
+void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                uint16_t nb_rx_desc, unsigned int socket_id,
index bfdfd5e755de0ccd4ee65630ea136abf76c50c94..176daaff9d76bf319e24552b6f5abe352fb989cd 100644 (file)
@@ -2487,9 +2487,9 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
 }
 
 void __rte_cold
-ixgbe_dev_tx_queue_release(void *txq)
+ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       ixgbe_tx_queue_release(txq);
+       ixgbe_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
@@ -2892,9 +2892,9 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
 }
 
 void __rte_cold
-ixgbe_dev_rx_queue_release(void *rxq)
+ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       ixgbe_rx_queue_release(rxq);
+       ixgbe_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 /*
@@ -3431,14 +3431,14 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+               ixgbe_dev_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "rx_ring", i);
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+               ixgbe_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
                rte_eth_dma_zone_free(dev, "tx_ring", i);
        }
index b72060a4499bbd6b5b01664a50f5d916425ca204..dbdab188e9629091e9c9290a2285ddeb78685648 100644 (file)
@@ -1182,7 +1182,7 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
 
        /* Free previous allocation if any */
        if (eth_dev->data->rx_queues[q_no] != NULL) {
-               lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
+               lio_dev_rx_queue_release(eth_dev, q_no);
                eth_dev->data->rx_queues[q_no] = NULL;
        }
 
@@ -1204,16 +1204,18 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
  * Release the receive queue/ringbuffer. Called by
  * the upper layers.
  *
- * @param rxq
- *    Opaque pointer to the receive queue to release
+ * @param eth_dev
+ *    Pointer to Ethernet device structure.
+ * @param q_no
+ *    Receive queue index.
  *
  * @return
  *    - nothing
  */
 void
-lio_dev_rx_queue_release(void *rxq)
+lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
 {
-       struct lio_droq *droq = rxq;
+       struct lio_droq *droq = dev->data->rx_queues[q_no];
        int oq_no;
 
        if (droq) {
@@ -1262,7 +1264,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
 
        /* Free previous allocation if any */
        if (eth_dev->data->tx_queues[q_no] != NULL) {
-               lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
+               lio_dev_tx_queue_release(eth_dev, q_no);
                eth_dev->data->tx_queues[q_no] = NULL;
        }
 
@@ -1292,16 +1294,18 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
  * Release the transmit queue/ringbuffer. Called by
  * the upper layers.
  *
- * @param txq
- *    Opaque pointer to the transmit queue to release
+ * @param eth_dev
+ *    Pointer to Ethernet device structure.
+ * @param q_no
+ *   Transmit queue index.
  *
  * @return
  *    - nothing
  */
 void
-lio_dev_tx_queue_release(void *txq)
+lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
 {
-       struct lio_instr_queue *tq = txq;
+       struct lio_instr_queue *tq = dev->data->tx_queues[q_no];
        uint32_t fw_mapped_iq_no;
 
 
index d33be1c44dc5ba51f41acbb5d1a4a44eff1d9b6f..ece2b038580d172301d204a65a74dd192182c89e 100644 (file)
@@ -172,8 +172,8 @@ struct lio_rss_set {
        uint8_t key[LIO_RSS_MAX_KEY_SZ];
 };
 
-void lio_dev_rx_queue_release(void *rxq);
+void lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
 
-void lio_dev_tx_queue_release(void *txq);
+void lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
 
 #endif /* _LIO_ETHDEV_H_ */
index a067b60e47ee98f13450e02aa28e357c2af60696..616abec07007e6a3d4d03b5bc66a013636c65042 100644 (file)
@@ -1791,7 +1791,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
                txq = eth_dev->data->tx_queues[i];
                if (txq != NULL) {
-                       lio_dev_tx_queue_release(txq);
+                       lio_dev_tx_queue_release(eth_dev, i);
                        eth_dev->data->tx_queues[i] = NULL;
                }
        }
@@ -1799,7 +1799,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                rxq = eth_dev->data->rx_queues[i];
                if (rxq != NULL) {
-                       lio_dev_rx_queue_release(rxq);
+                       lio_dev_rx_queue_release(eth_dev, i);
                        eth_dev->data->rx_queues[i] = NULL;
                }
        }
index fd9e877c3df792335bb4851e9a5f81b34cd26e8e..980150293e86fd34d189b39f950dc5c4429d28e5 100644 (file)
@@ -1258,9 +1258,9 @@ memif_dev_close(struct rte_eth_dev *dev)
                memif_disconnect(dev);
 
                for (i = 0; i < dev->data->nb_rx_queues; i++)
-                       (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
+                       (*dev->dev_ops->rx_queue_release)(dev, i);
                for (i = 0; i < dev->data->nb_tx_queues; i++)
-                       (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
+                       (*dev->dev_ops->tx_queue_release)(dev, i);
 
                memif_socket_remove_device(dev);
        } else {
@@ -1352,9 +1352,20 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-memif_queue_release(void *queue)
+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct memif_queue *mq = (struct memif_queue *)queue;
+       struct memif_queue *mq = dev->data->rx_queues[qid];
+
+       if (!mq)
+               return;
+
+       rte_free(mq);
+}
+
+static void
+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       struct memif_queue *mq = dev->data->tx_queues[qid];
 
        if (!mq)
                return;
@@ -1471,8 +1482,8 @@ static const struct eth_dev_ops ops = {
        .dev_configure = memif_dev_configure,
        .tx_queue_setup = memif_tx_queue_setup,
        .rx_queue_setup = memif_rx_queue_setup,
-       .rx_queue_release = memif_queue_release,
-       .tx_queue_release = memif_queue_release,
+       .rx_queue_release = memif_rx_queue_release,
+       .tx_queue_release = memif_tx_queue_release,
        .rx_queue_intr_enable = memif_rx_queue_intr_enable,
        .rx_queue_intr_disable = memif_rx_queue_intr_disable,
        .link_update = memif_link_update,
index 7f9f300c6cb74bba9b0f640473cc4b333e6e3cc3..f7fe831d61ae77c4159b66bf72f0d2dc1a30da83 100644 (file)
@@ -391,9 +391,9 @@ mlx4_dev_close(struct rte_eth_dev *dev)
        mlx4_flow_clean(priv);
        mlx4_rss_deinit(priv);
        for (i = 0; i != dev->data->nb_rx_queues; ++i)
-               mlx4_rx_queue_release(dev->data->rx_queues[i]);
+               mlx4_rx_queue_release(dev, i);
        for (i = 0; i != dev->data->nb_tx_queues; ++i)
-               mlx4_tx_queue_release(dev->data->tx_queues[i]);
+               mlx4_tx_queue_release(dev, i);
        mlx4_proc_priv_uninit(dev);
        mlx4_mr_release(dev);
        if (priv->pd != NULL) {
index 978cbb8201ea0d9ce9f36a9fcea4230e26a69f2c..2b75c07fad755413dbc9637d000dfc6ca68a0107 100644 (file)
@@ -826,6 +826,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                },
                .socket = socket,
        };
+       dev->data->rx_queues[idx] = rxq;
        /* Enable scattered packets support for this queue if necessary. */
        MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
        if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
@@ -896,12 +897,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                }
        }
        DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
-       dev->data->rx_queues[idx] = rxq;
        return 0;
 error:
-       dev->data->rx_queues[idx] = NULL;
        ret = rte_errno;
-       mlx4_rx_queue_release(rxq);
+       mlx4_rx_queue_release(dev, idx);
        rte_errno = ret;
        MLX4_ASSERT(rte_errno > 0);
        return -rte_errno;
@@ -910,26 +909,20 @@ error:
 /**
  * DPDK callback to release a Rx queue.
  *
- * @param dpdk_rxq
- *   Generic Rx queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Receive queue index.
  */
 void
-mlx4_rx_queue_release(void *dpdk_rxq)
+mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct rxq *rxq = (struct rxq *)dpdk_rxq;
-       struct mlx4_priv *priv;
-       unsigned int i;
+       struct rxq *rxq = dev->data->rx_queues[idx];
 
        if (rxq == NULL)
                return;
-       priv = rxq->priv;
-       for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
-               if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
-                       DEBUG("%p: removing Rx queue %p from list",
-                             (void *)ETH_DEV(priv), (void *)rxq);
-                       ETH_DEV(priv)->data->rx_queues[i] = NULL;
-                       break;
-               }
+       dev->data->rx_queues[idx] = NULL;
+       DEBUG("%p: removing Rx queue %hu from list", (void *)dev, idx);
        MLX4_ASSERT(!rxq->cq);
        MLX4_ASSERT(!rxq->wq);
        MLX4_ASSERT(!rxq->wqes);
index c838afc242c4f6fddb0f3d66ae6bb66972395de2..83e9534cd0a7cab02be5ed655e4318f5714ba151 100644 (file)
@@ -141,7 +141,7 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
                        uint16_t desc, unsigned int socket,
                        const struct rte_eth_rxconf *conf,
                        struct rte_mempool *mp);
-void mlx4_rx_queue_release(void *dpdk_rxq);
+void mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
 
 /* mlx4_rxtx.c */
 
@@ -162,7 +162,7 @@ uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
 int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
                        uint16_t desc, unsigned int socket,
                        const struct rte_eth_txconf *conf);
-void mlx4_tx_queue_release(void *dpdk_txq);
+void mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
 
 /* mlx4_mr.c */
 
index 2df26842fbe4b9876ff205112a408e64b7abff4b..7d8c4f2a2223afbe08783883c3243a97ba89eb8b 100644 (file)
@@ -404,6 +404,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                .lb = !!priv->vf,
                .bounce_buf = bounce_buf,
        };
+       dev->data->tx_queues[idx] = txq;
        priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE;
        priv->verbs_alloc_ctx.obj = txq;
        txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
@@ -507,13 +508,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        /* Save pointer of global generation number to check memory event. */
        txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
        DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
-       dev->data->tx_queues[idx] = txq;
        priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
        return 0;
 error:
-       dev->data->tx_queues[idx] = NULL;
        ret = rte_errno;
-       mlx4_tx_queue_release(txq);
+       mlx4_tx_queue_release(dev, idx);
        rte_errno = ret;
        MLX4_ASSERT(rte_errno > 0);
        priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
@@ -523,26 +522,20 @@ error:
 /**
  * DPDK callback to release a Tx queue.
  *
- * @param dpdk_txq
- *   Generic Tx queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Transmit queue index.
  */
 void
-mlx4_tx_queue_release(void *dpdk_txq)
+mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct txq *txq = (struct txq *)dpdk_txq;
-       struct mlx4_priv *priv;
-       unsigned int i;
+       struct txq *txq = dev->data->tx_queues[idx];
 
        if (txq == NULL)
                return;
-       priv = txq->priv;
-       for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
-               if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
-                       DEBUG("%p: removing Tx queue %p from list",
-                             (void *)ETH_DEV(priv), (void *)txq);
-                       ETH_DEV(priv)->data->tx_queues[i] = NULL;
-                       break;
-               }
+       DEBUG("%p: removing Tx queue %hu from list", (void *)dev, idx);
+       dev->data->tx_queues[idx] = NULL;
        mlx4_txq_free_elts(txq);
        if (txq->qp)
                claim_zero(mlx4_glue->destroy_qp(txq->qp));
index 3f2b99fb65f4b8acfbefa045113708ecfb044441..2b7ad3e48b32978574198e7ac567b24900e9c6f9 100644 (file)
@@ -191,7 +191,7 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 int mlx5_rx_hairpin_queue_setup
        (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
-void mlx5_rx_queue_release(void *dpdk_rxq);
+void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
 void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
 int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
index abd8ce798986c386801a0f69cdaf8b6bbb2f0316..b68443bed509fc689014805abfa3a97b437ccd30 100644 (file)
@@ -794,25 +794,22 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 /**
  * DPDK callback to release a RX queue.
  *
- * @param dpdk_rxq
- *   Generic RX queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 void
-mlx5_rx_queue_release(void *dpdk_rxq)
+mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
-       struct mlx5_priv *priv;
+       struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
 
        if (rxq == NULL)
                return;
-       rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
-       priv = rxq_ctrl->priv;
-       if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
+       if (!mlx5_rxq_releasable(dev, qid))
                rte_panic("port %u Rx queue %u is still used by a flow and"
-                         " cannot be removed\n",
-                         PORT_ID(priv), rxq->idx);
-       mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+                         " cannot be removed\n", dev->data->port_id, qid);
+       mlx5_rxq_release(dev, qid);
 }
 
 /**
index 77d606975551123f2b745bbbd3f8741ecb2c7a3a..e722738682e284f0cd60228812ea749766883f44 100644 (file)
@@ -204,7 +204,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 int mlx5_tx_hairpin_queue_setup
        (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
-void mlx5_tx_queue_release(void *dpdk_txq);
+void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
 int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
 void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
index eb4d34ca559e766eab143ba5f414daebbfa33e32..92fbdab56895b9934bcf173a4722d0153057cacc 100644 (file)
@@ -470,28 +470,21 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 /**
  * DPDK callback to release a TX queue.
  *
- * @param dpdk_txq
- *   Generic TX queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Transmit queue index.
  */
 void
-mlx5_tx_queue_release(void *dpdk_txq)
+mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
-       struct mlx5_txq_ctrl *txq_ctrl;
-       struct mlx5_priv *priv;
-       unsigned int i;
+       struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
 
        if (txq == NULL)
                return;
-       txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
-       priv = txq_ctrl->priv;
-       for (i = 0; (i != priv->txqs_n); ++i)
-               if ((*priv->txqs)[i] == txq) {
-                       DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
-                               PORT_ID(priv), txq->idx);
-                       mlx5_txq_release(ETH_DEV(priv), i);
-                       break;
-               }
+       DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+               dev->data->port_id, qid);
+       mlx5_txq_release(dev, qid);
 }
 
 /**
index a3ee150204665b6638fe1c1168a64a5bf0cd84d2..f51bc2258fe195201cf364415c5757f3f4b75bf8 100644 (file)
@@ -446,12 +446,12 @@ mvneta_dev_close(struct rte_eth_dev *dev)
                ret = mvneta_dev_stop(dev);
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               mvneta_rx_queue_release(dev->data->rx_queues[i]);
+               mvneta_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               mvneta_tx_queue_release(dev->data->tx_queues[i]);
+               mvneta_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
 
index dfa7ecc09039a1d1820a402b6cfd9db7cb5332a5..2d61930382cb57772a1e57bece6695d3fe9b3140 100644 (file)
@@ -796,13 +796,15 @@ mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 /**
  * DPDK callback to release the transmit queue.
  *
- * @param txq
- *   Generic transmit queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Transmit queue index.
  */
 void
-mvneta_tx_queue_release(void *txq)
+mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mvneta_txq *q = txq;
+       struct mvneta_txq *q = dev->data->tx_queues[qid];
 
        if (!q)
                return;
@@ -959,13 +961,15 @@ mvneta_flush_queues(struct rte_eth_dev *dev)
 /**
  * DPDK callback to release the receive queue.
  *
- * @param rxq
- *   Generic receive queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 void
-mvneta_rx_queue_release(void *rxq)
+mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mvneta_rxq *q = rxq;
+       struct mvneta_rxq *q = dev->data->rx_queues[qid];
 
        if (!q)
                return;
@@ -978,7 +982,7 @@ mvneta_rx_queue_release(void *rxq)
        if (q->priv->ppio)
                mvneta_rx_queue_flush(q);
 
-       rte_free(rxq);
+       rte_free(q);
 }
 
 /**
index cc29190177249efcf4583c442e2d76627b6192d7..41b7539a577ddbd7814c81e383285a022bbbb030 100644 (file)
@@ -32,7 +32,7 @@ int
 mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                      unsigned int socket, const struct rte_eth_txconf *conf);
 
-void mvneta_rx_queue_release(void *rxq);
-void mvneta_tx_queue_release(void *txq);
+void mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 #endif /* _MVNETA_RXTX_H_ */
index 078aefbb8da47b1c9257a59cb76dc4a70d30dcf0..65d011300a978a49e9fb83f5cb039b3f54858a92 100644 (file)
@@ -2059,13 +2059,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 /**
  * DPDK callback to release the receive queue.
  *
- * @param rxq
- *   Generic receive queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 static void
-mrvl_rx_queue_release(void *rxq)
+mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mrvl_rxq *q = rxq;
+       struct mrvl_rxq *q = dev->data->rx_queues[qid];
        struct pp2_ppio_tc_params *tc_params;
        int i, num, tc, inq;
        struct pp2_hif *hif;
@@ -2146,13 +2148,15 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 /**
  * DPDK callback to release the transmit queue.
  *
- * @param txq
- *   Generic transmit queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Transmit queue index.
  */
 static void
-mrvl_tx_queue_release(void *txq)
+mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct mrvl_txq *q = txq;
+       struct mrvl_txq *q = dev->data->tx_queues[qid];
 
        if (!q)
                return;
index c6bf7cc132335438ea44c074a533185149fc912c..e880dc2bb2a455bb70298b7a48d03e333b30b3f3 100644 (file)
@@ -356,9 +356,9 @@ static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
 }
 
 void
-hn_dev_tx_queue_release(void *arg)
+hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hn_tx_queue *txq = arg;
+       struct hn_tx_queue *txq = dev->data->tx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1004,9 +1004,9 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
 }
 
 void
-hn_dev_rx_queue_release(void *arg)
+hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct hn_rx_queue *rxq = arg;
+       struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1648,7 +1648,7 @@ hn_dev_free_queues(struct rte_eth_dev *dev)
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               hn_dev_tx_queue_release(dev->data->tx_queues[i]);
+               hn_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index 43642408bc94b815cd56f9889fa7a854966a1ca9..2cd1f8a881873e2d2c873ebc7bed13481638c09d 100644 (file)
@@ -198,7 +198,7 @@ int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
 int    hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                              uint16_t nb_desc, unsigned int socket_id,
                              const struct rte_eth_txconf *tx_conf);
-void   hn_dev_tx_queue_release(void *arg);
+void   hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void   hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
                             struct rte_eth_txq_info *qinfo);
 int    hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
@@ -214,7 +214,7 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
                              struct rte_mempool *mp);
 void   hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
                             struct rte_eth_rxq_info *qinfo);
-void   hn_dev_rx_queue_release(void *arg);
+void   hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
 int    hn_dev_rx_queue_status(void *rxq, uint16_t offset);
 void   hn_dev_free_queues(struct rte_eth_dev *dev);
index 75192e6319780a98ed9b3936370e7048a758ec11..fead8eba5de71678b975e07616d46e820a0958d0 100644 (file)
@@ -624,11 +624,8 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
 
        rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
-               void *subq = vf_dev->data->tx_queues[queue_id];
-
-               (*vf_dev->dev_ops->tx_queue_release)(subq);
-       }
+       if (vf_dev && vf_dev->dev_ops->tx_queue_release)
+               (*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
 
        rte_rwlock_read_unlock(&hv->vf_lock);
 }
@@ -659,11 +656,8 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
 
        rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
-               void *subq = vf_dev->data->rx_queues[queue_id];
-
-               (*vf_dev->dev_ops->rx_queue_release)(subq);
-       }
+       if (vf_dev && vf_dev->dev_ops->rx_queue_release)
+               (*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
        rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
index 7e91d5984740f61067e2ae4f677a118ebab950d7..99d93ebf46673917a09e4f7d8f5078161896fc5b 100644 (file)
@@ -231,12 +231,12 @@ nfb_eth_dev_close(struct rte_eth_dev *dev)
        nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
 
        for (i = 0; i < nb_rx; i++) {
-               nfb_eth_rx_queue_release(dev->data->rx_queues[i]);
+               nfb_eth_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
        for (i = 0; i < nb_tx; i++) {
-               nfb_eth_tx_queue_release(dev->data->tx_queues[i]);
+               nfb_eth_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index d6d4ba9663c6eab1cec94677f73a4d02b2ce7166..3ebb332ae46ceff71f19889d7b02954dff7dc9d8 100644 (file)
@@ -176,9 +176,10 @@ nfb_eth_rx_queue_init(struct nfb_device *nfb,
 }
 
 void
-nfb_eth_rx_queue_release(void *q)
+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q;
+       struct ndp_rx_queue *rxq = dev->data->rx_queues[qid];
+
        if (rxq->queue != NULL) {
                ndp_close_rx_queue(rxq->queue);
                rte_free(rxq);
index c9708259af17485a27adfb02b8d316af895d152a..638205d53c765428766ae3c68af4e4f65deeaf4f 100644 (file)
@@ -94,11 +94,13 @@ nfb_eth_rx_queue_setup(struct rte_eth_dev *dev,
 /**
  * DPDK callback to release a RX queue.
  *
- * @param dpdk_rxq
- *   Generic RX queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 void
-nfb_eth_rx_queue_release(void *q);
+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 /**
  * Start traffic on Rx queue.
index 9b912feb1deedd57fc40d9914f0bab54a140faed..d49fc324e76b0a81bda62c9155f14c431140450d 100644 (file)
@@ -102,9 +102,10 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,
 }
 
 void
-nfb_eth_tx_queue_release(void *q)
+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct ndp_tx_queue *txq = (struct ndp_tx_queue *)q;
+       struct ndp_tx_queue *txq = dev->data->tx_queues[qid];
+
        if (txq->queue != NULL) {
                ndp_close_tx_queue(txq->queue);
                rte_free(txq);
index 28daeae0b8e80f9463b902b9205d7c85acbba096..d3cbe3e6b3f7cca38e35c958bc3477db69d0ba0f 100644 (file)
@@ -70,11 +70,13 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,
 /**
  * DPDK callback to release a RX queue.
  *
- * @param dpdk_rxq
- *   Generic RX queue pointer.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param qid
+ *   Receive queue index.
  */
 void
-nfb_eth_tx_queue_release(void *q);
+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 /**
  * Start traffic on Tx queue.
index 1402c5f84a0773b6559bdcdfd074d518e735ad62..feeacb56147f570cccac5fec97ddabc680722bea 100644 (file)
@@ -464,9 +464,9 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
 }
 
 void
-nfp_net_rx_queue_release(void *rx_queue)
+nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct nfp_net_rxq *rxq = rx_queue;
+       struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
 
        if (rxq) {
                nfp_net_rx_queue_release_mbufs(rxq);
@@ -513,7 +513,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
         * calling nfp_net_stop
         */
        if (dev->data->rx_queues[queue_idx]) {
-               nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               nfp_net_rx_queue_release(dev, queue_idx);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
@@ -523,6 +523,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
        if (rxq == NULL)
                return -ENOMEM;
 
+       dev->data->rx_queues[queue_idx] = rxq;
+
        /* Hw queues mapping based on firmware configuration */
        rxq->qidx = queue_idx;
        rxq->fl_qcidx = queue_idx * hw->stride_rx;
@@ -556,7 +558,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 
        if (tz == NULL) {
                PMD_DRV_LOG(ERR, "Error allocating rx dma");
-               nfp_net_rx_queue_release(rxq);
+               nfp_net_rx_queue_release(dev, queue_idx);
+               dev->data->rx_queues[queue_idx] = NULL;
                return -ENOMEM;
        }
 
@@ -569,7 +572,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
                                         sizeof(*rxq->rxbufs) * nb_desc,
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq->rxbufs == NULL) {
-               nfp_net_rx_queue_release(rxq);
+               nfp_net_rx_queue_release(dev, queue_idx);
+               dev->data->rx_queues[queue_idx] = NULL;
                return -ENOMEM;
        }
 
@@ -578,7 +582,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 
        nfp_net_reset_rx_queue(rxq);
 
-       dev->data->rx_queues[queue_idx] = rxq;
        rxq->hw = hw;
 
        /*
@@ -651,9 +654,9 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
 }
 
 void
-nfp_net_tx_queue_release(void *tx_queue)
+nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       struct nfp_net_txq *txq = tx_queue;
+       struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
 
        if (txq) {
                nfp_net_tx_queue_release_mbufs(txq);
@@ -714,7 +717,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (dev->data->tx_queues[queue_idx]) {
                PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                           queue_idx);
-               nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               nfp_net_tx_queue_release(dev, queue_idx);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
@@ -726,6 +729,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -ENOMEM;
        }
 
+       dev->data->tx_queues[queue_idx] = txq;
+
        /*
         * Allocate TX ring hardware descriptors. A memzone large enough to
         * handle the maximum ring size is allocated in order to allow for
@@ -737,7 +742,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                   socket_id);
        if (tz == NULL) {
                PMD_DRV_LOG(ERR, "Error allocating tx dma");
-               nfp_net_tx_queue_release(txq);
+               nfp_net_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
                return -ENOMEM;
        }
 
@@ -763,7 +769,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                         sizeof(*txq->txbufs) * nb_desc,
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->txbufs == NULL) {
-               nfp_net_tx_queue_release(txq);
+               nfp_net_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
                return -ENOMEM;
        }
        PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
@@ -771,7 +778,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        nfp_net_reset_tx_queue(txq);
 
-       dev->data->tx_queues[queue_idx] = txq;
        txq->hw = hw;
 
        /*
index b0a8bf81b0f77966cda6478f0ba8d10079dd1e47..ab49898605df5d4c7f4cf07fcb4bb777c32a49fa 100644 (file)
@@ -279,13 +279,13 @@ uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
                                       uint16_t queue_idx);
 uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                  uint16_t nb_pkts);
-void nfp_net_rx_queue_release(void *rxq);
+void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq);
 int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                  uint16_t nb_desc, unsigned int socket_id,
                                  const struct rte_eth_rxconf *rx_conf,
                                  struct rte_mempool *mp);
-void nfp_net_tx_queue_release(void *txq);
+void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
 int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                  uint16_t nb_desc, unsigned int socket_id,
index 7fb72f3f1f8c5068e9f9b84057eb6ec9a5cd29f5..c039e7dcc3068b8cc6759f76c82227953b93e7a3 100644 (file)
@@ -69,9 +69,9 @@ void ngbe_dev_clear_queues(struct rte_eth_dev *dev);
 
 void ngbe_dev_free_queues(struct rte_eth_dev *dev);
 
-void ngbe_dev_rx_queue_release(void *rxq);
+void ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
-void ngbe_dev_tx_queue_release(void *txq);
+void ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                uint16_t nb_rx_desc, unsigned int socket_id,
index 5c06e0d55086d1a798052ef1eca1c0dc7e4e53f3..d508015bd2689dbcba6988338b90d484a9b55b4f 100644 (file)
@@ -453,9 +453,9 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
 }
 
 void
-ngbe_dev_tx_queue_release(void *txq)
+ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       ngbe_tx_queue_release(txq);
+       ngbe_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
@@ -673,9 +673,9 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
 }
 
 void
-ngbe_dev_rx_queue_release(void *rxq)
+ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       ngbe_rx_queue_release(rxq);
+       ngbe_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 /*
@@ -916,13 +916,13 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               ngbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+               ngbe_dev_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               ngbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+               ngbe_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index 508bafc12a1462b40e9d419b9432a95440dc2243..25b9e5b1ce1bc11acf92dac7fa680187605b6d0b 100644 (file)
@@ -353,14 +353,24 @@ eth_stats_reset(struct rte_eth_dev *dev)
 }
 
 static void
-eth_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct null_queue *nq;
+       struct null_queue *nq = dev->data->rx_queues[qid];
 
-       if (q == NULL)
+       if (nq == NULL)
+               return;
+
+       rte_free(nq->dummy_packet);
+}
+
+static void
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       struct null_queue *nq = dev->data->tx_queues[qid];
+
+       if (nq == NULL)
                return;
 
-       nq = q;
        rte_free(nq->dummy_packet);
 }
 
@@ -483,8 +493,8 @@ static const struct eth_dev_ops ops = {
        .dev_infos_get = eth_dev_info,
        .rx_queue_setup = eth_rx_queue_setup,
        .tx_queue_setup = eth_tx_queue_setup,
-       .rx_queue_release = eth_queue_release,
-       .tx_queue_release = eth_queue_release,
+       .rx_queue_release = eth_rx_queue_release,
+       .tx_queue_release = eth_tx_queue_release,
        .mtu_set = eth_mtu_set,
        .link_update = eth_link_update,
        .mac_addr_set = eth_mac_address_set,
index 9f4c0503b4d4ece860bfd92484f46b18c1fadf9e..7c91494f0e28ce63ea80bc235976c292b5a1504e 100644 (file)
@@ -971,20 +971,18 @@ octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
 }
 
 static void
-octeontx_dev_tx_queue_release(void *tx_queue)
+octeontx_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct octeontx_txq *txq = tx_queue;
        int res;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (txq) {
-               res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+       if (dev->data->tx_queues[qid]) {
+               res = octeontx_dev_tx_queue_stop(dev, qid);
                if (res < 0)
-                       octeontx_log_err("failed stop tx_queue(%d)\n",
-                                  txq->queue_id);
+                       octeontx_log_err("failed stop tx_queue(%d)\n", qid);
 
-               rte_free(txq);
+               rte_free(dev->data->tx_queues[qid]);
        }
 }
 
@@ -1013,7 +1011,7 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        if (dev->data->tx_queues[qidx] != NULL) {
                PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
                                qidx);
-               octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+               octeontx_dev_tx_queue_release(dev, qidx);
                dev->data->tx_queues[qidx] = NULL;
        }
 
@@ -1221,9 +1219,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 }
 
 static void
-octeontx_dev_rx_queue_release(void *rxq)
+octeontx_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       rte_free(rxq);
+       rte_free(dev->data->rx_queues[qid]);
 }
 
 static const uint32_t *
index 75d4cabf2e7c184f5c4e498d0c7d328ae9faafe0..d576bc6989266d16c9f74ebbc19527a4c1cc22e8 100644 (file)
@@ -555,16 +555,17 @@ otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
 }
 
 static void
-otx2_nix_rx_queue_release(void *rx_queue)
+otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct otx2_eth_rxq *rxq = rx_queue;
+       struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
 
        if (!rxq)
                return;
 
        otx2_nix_dbg("Releasing rxq %u", rxq->rq);
        nix_cq_rq_uninit(rxq->eth_dev, rxq);
-       rte_free(rx_queue);
+       rte_free(rxq);
+       dev->data->rx_queues[qid] = NULL;
 }
 
 static int
@@ -608,9 +609,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        /* Free memory prior to re-allocation if needed */
        if (eth_dev->data->rx_queues[rq] != NULL) {
                otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
-               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+               otx2_nix_rx_queue_release(eth_dev, rq);
                rte_eth_dma_zone_free(eth_dev, "cq", rq);
-               eth_dev->data->rx_queues[rq] = NULL;
        }
 
        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
@@ -641,6 +641,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
        rxq->tstamp = &dev->tstamp;
 
+       eth_dev->data->rx_queues[rq] = rxq;
+
        /* Alloc completion queue */
        rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
        if (rc) {
@@ -657,7 +659,6 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
                     rq, mp->name, qsize, nb_desc, rxq->qlen);
 
-       eth_dev->data->rx_queues[rq] = rxq;
        eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
 
        /* Calculating delta and freq mult between PTP HI clock and tsc.
@@ -679,7 +680,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
        return 0;
 
 free_rxq:
-       otx2_nix_rx_queue_release(rxq);
+       otx2_nix_rx_queue_release(eth_dev, rq);
 fail:
        return rc;
 }
@@ -1217,16 +1218,13 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
 }
 
 static void
-otx2_nix_tx_queue_release(void *_txq)
+otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
 {
-       struct otx2_eth_txq *txq = _txq;
-       struct rte_eth_dev *eth_dev;
+       struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
 
        if (!txq)
                return;
 
-       eth_dev = txq->dev->eth_dev;
-
        otx2_nix_dbg("Releasing txq %u", txq->sq);
 
        /* Flush and disable tm */
@@ -1241,6 +1239,7 @@ otx2_nix_tx_queue_release(void *_txq)
        }
        otx2_nix_sq_flush_post(txq);
        rte_free(txq);
+       eth_dev->data->tx_queues[qid] = NULL;
 }
 
 
@@ -1268,8 +1267,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
        /* Free memory prior to re-allocation if needed. */
        if (eth_dev->data->tx_queues[sq] != NULL) {
                otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
-               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
-               eth_dev->data->tx_queues[sq] = NULL;
+               otx2_nix_tx_queue_release(eth_dev, sq);
        }
 
        /* Find the expected offloads for this queue */
@@ -1288,6 +1286,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
        txq->sqb_pool = NULL;
        txq->offloads = offloads;
        dev->tx_offloads |= offloads;
+       eth_dev->data->tx_queues[sq] = txq;
 
        /*
         * Allocate memory for flow control updates from HW.
@@ -1334,12 +1333,11 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
                     " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
                     fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
                     txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
-       eth_dev->data->tx_queues[sq] = txq;
        eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
 
 free_txq:
-       otx2_nix_tx_queue_release(txq);
+       otx2_nix_tx_queue_release(eth_dev, sq);
 fail:
        return rc;
 }
@@ -1378,8 +1376,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                }
                memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
                tx_qconf[i].valid = true;
-               otx2_nix_tx_queue_release(txq[i]);
-               eth_dev->data->tx_queues[i] = NULL;
+               otx2_nix_tx_queue_release(eth_dev, i);
        }
 
        rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
@@ -1391,8 +1388,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
                }
                memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
                rx_qconf[i].valid = true;
-               otx2_nix_rx_queue_release(rxq[i]);
-               eth_dev->data->rx_queues[i] = NULL;
+               otx2_nix_rx_queue_release(eth_dev, i);
        }
 
        dev->tx_qconf = tx_qconf;
@@ -1412,8 +1408,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
        struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
        struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
-       struct otx2_eth_txq **txq;
-       struct otx2_eth_rxq **rxq;
        int rc, i, nb_rxq, nb_txq;
 
        nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
@@ -1450,9 +1444,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             &tx_qconf[i].conf.tx);
                if (rc) {
                        otx2_err("Failed to setup tx queue rc=%d", rc);
-                       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
                        for (i -= 1; i >= 0; i--)
-                               otx2_nix_tx_queue_release(txq[i]);
+                               otx2_nix_tx_queue_release(eth_dev, i);
                        goto fail;
                }
        }
@@ -1468,9 +1461,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
                                             rx_qconf[i].mempool);
                if (rc) {
                        otx2_err("Failed to setup rx queue rc=%d", rc);
-                       rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
                        for (i -= 1; i >= 0; i--)
-                               otx2_nix_rx_queue_release(rxq[i]);
+                               otx2_nix_rx_queue_release(eth_dev, i);
                        goto release_tx_queues;
                }
        }
@@ -1480,9 +1472,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
        return 0;
 
 release_tx_queues:
-       txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
-               otx2_nix_tx_queue_release(txq[i]);
+               otx2_nix_tx_queue_release(eth_dev, i);
 fail:
        if (tx_qconf)
                free(tx_qconf);
@@ -2647,17 +2638,13 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        dev->ops = NULL;
 
        /* Free up SQs */
-       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
-               eth_dev->data->tx_queues[i] = NULL;
-       }
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               otx2_nix_tx_queue_release(eth_dev, i);
        eth_dev->data->nb_tx_queues = 0;
 
        /* Free up RQ's and CQ's */
-       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
-               eth_dev->data->rx_queues[i] = NULL;
-       }
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+               otx2_nix_rx_queue_release(eth_dev, i);
        eth_dev->data->nb_rx_queues = 0;
 
        /* Free tm resources */
index a243683d61d379ca8f0333b1994ac54d35bd9f2b..eed0e05a8fc1aa5db2fa7fa3619157c89e797e08 100644 (file)
@@ -248,16 +248,18 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
  * Release the receive queue/ringbuffer. Called by
  * the upper layers.
  *
- * @param rxq
- *    Opaque pointer to the receive queue to release
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param q_no
+ *   Receive queue index.
  *
  * @return
  *    - nothing
  */
 static void
-otx_ep_rx_queue_release(void *rxq)
+otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
 {
-       struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
+       struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
        struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
        int q_id = rq->q_no;
 
@@ -321,16 +323,18 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
  * Release the transmit queue/ringbuffer. Called by
  * the upper layers.
  *
- * @param txq
- *    Opaque pointer to the transmit queue to release
+ * @param dev
+ *    Pointer to Ethernet device structure.
+ * @param q_no
+ *    Transmit queue index.
  *
  * @return
  *    - nothing
  */
 static void
-otx_ep_tx_queue_release(void *txq)
+otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
 {
-       struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
+       struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
 
        otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
 }
index a4304e0eff448c866535071032870fd1c04a17cf..fd8c62a1826b7a0772f02bbcc614a31654c2fa49 100644 (file)
@@ -2396,13 +2396,25 @@ qede_dev_reset(struct rte_eth_dev *dev)
        return qede_eth_dev_init(dev);
 }
 
+static void
+qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       qede_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+static void
+qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       qede_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 static const struct eth_dev_ops qede_eth_dev_ops = {
        .dev_configure = qede_dev_configure,
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
-       .rx_queue_release = qede_rx_queue_release,
+       .rx_queue_release = qede_dev_rx_queue_release,
        .tx_queue_setup = qede_tx_queue_setup,
-       .tx_queue_release = qede_tx_queue_release,
+       .tx_queue_release = qede_dev_tx_queue_release,
        .dev_start = qede_dev_start,
        .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
@@ -2444,9 +2456,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .dev_configure = qede_dev_configure,
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
-       .rx_queue_release = qede_rx_queue_release,
+       .rx_queue_release = qede_dev_rx_queue_release,
        .tx_queue_setup = qede_tx_queue_setup,
-       .tx_queue_release = qede_tx_queue_release,
+       .tx_queue_release = qede_dev_tx_queue_release,
        .dev_start = qede_dev_start,
        .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
index f212ca8ad6b2319ed36b8b716351b59840fddbd0..9dc5e5b3a3d4eda7747c05c93a48e07ad7132893 100644 (file)
@@ -504,9 +504,9 @@ fail_rx_qinit:
 }
 
 static void
-sfc_rx_queue_release(void *queue)
+sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct sfc_dp_rxq *dp_rxq = queue;
+       struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
        struct sfc_rxq *rxq;
        struct sfc_adapter *sa;
        sfc_sw_index_t sw_index;
@@ -561,9 +561,9 @@ fail_tx_qinit:
 }
 
 static void
-sfc_tx_queue_release(void *queue)
+sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct sfc_dp_txq *dp_txq = queue;
+       struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
        struct sfc_txq *txq;
        sfc_sw_index_t sw_index;
        struct sfc_adapter *sa;
index 7416a6b1b8161c549d05d7afbe916f5f2bd60b26..3c6a285e3c5edee605040d3e703a2f401c34cde7 100644 (file)
@@ -1143,26 +1143,28 @@ eth_stats_reset(struct rte_eth_dev *dev)
 }
 
 static void
-eth_rx_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
+       struct szedata2_rx_queue *rxq = dev->data->rx_queues[qid];
 
        if (rxq != NULL) {
                if (rxq->sze != NULL)
                        szedata_close(rxq->sze);
                rte_free(rxq);
+               dev->data->rx_queues[qid] = NULL;
        }
 }
 
 static void
-eth_tx_queue_release(void *q)
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
+       struct szedata2_tx_queue *txq = dev->data->tx_queues[qid];
 
        if (txq != NULL) {
                if (txq->sze != NULL)
                        szedata_close(txq->sze);
                rte_free(txq);
+               dev->data->tx_queues[qid] = NULL;
        }
 }
 
@@ -1182,15 +1184,11 @@ eth_dev_close(struct rte_eth_dev *dev)
 
        free(internals->sze_dev_path);
 
-       for (i = 0; i < nb_rx; i++) {
-               eth_rx_queue_release(dev->data->rx_queues[i]);
-               dev->data->rx_queues[i] = NULL;
-       }
+       for (i = 0; i < nb_rx; i++)
+               eth_rx_queue_release(dev, i);
        dev->data->nb_rx_queues = 0;
-       for (i = 0; i < nb_tx; i++) {
-               eth_tx_queue_release(dev->data->tx_queues[i]);
-               dev->data->tx_queues[i] = NULL;
-       }
+       for (i = 0; i < nb_tx; i++)
+               eth_tx_queue_release(dev, i);
        dev->data->nb_tx_queues = 0;
 
        return ret;
@@ -1244,10 +1242,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->rx_queues[rx_queue_id] != NULL) {
-               eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
-               dev->data->rx_queues[rx_queue_id] = NULL;
-       }
+       if (dev->data->rx_queues[rx_queue_id] != NULL)
+               eth_rx_queue_release(dev, rx_queue_id);
 
        rxq = rte_zmalloc_socket("szedata2 rx queue",
                        sizeof(struct szedata2_rx_queue),
@@ -1259,18 +1255,20 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        rxq->priv = internals;
+       dev->data->rx_queues[rx_queue_id] = rxq;
+
        rxq->sze = szedata_open(internals->sze_dev_path);
        if (rxq->sze == NULL) {
                PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
                                "%" PRIu16 "!", rx_queue_id);
-               eth_rx_queue_release(rxq);
+               eth_rx_queue_release(dev, rx_queue_id);
                return -EINVAL;
        }
        ret = szedata_subscribe3(rxq->sze, &rx, &tx);
        if (ret != 0 || rx == 0) {
                PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
                                "%" PRIu16 "!", rx_queue_id);
-               eth_rx_queue_release(rxq);
+               eth_rx_queue_release(dev, rx_queue_id);
                return -EINVAL;
        }
        rxq->rx_channel = rx_channel;
@@ -1281,8 +1279,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->rx_bytes = 0;
        rxq->err_pkts = 0;
 
-       dev->data->rx_queues[rx_queue_id] = rxq;
-
        PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
                        "%u (channel id %u).", rxq->qid, socket_id,
                        rxq->rx_channel);
@@ -1306,10 +1302,8 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->tx_queues[tx_queue_id] != NULL) {
-               eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
-               dev->data->tx_queues[tx_queue_id] = NULL;
-       }
+       if (dev->data->tx_queues[tx_queue_id] != NULL)
+               eth_tx_queue_release(dev, tx_queue_id);
 
        txq = rte_zmalloc_socket("szedata2 tx queue",
                        sizeof(struct szedata2_tx_queue),
@@ -1321,18 +1315,20 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        txq->priv = internals;
+       dev->data->tx_queues[tx_queue_id] = txq;
+
        txq->sze = szedata_open(internals->sze_dev_path);
        if (txq->sze == NULL) {
                PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
                                "%" PRIu16 "!", tx_queue_id);
-               eth_tx_queue_release(txq);
+               eth_tx_queue_release(dev, tx_queue_id);
                return -EINVAL;
        }
        ret = szedata_subscribe3(txq->sze, &rx, &tx);
        if (ret != 0 || tx == 0) {
                PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
                                "%" PRIu16 "!", tx_queue_id);
-               eth_tx_queue_release(txq);
+               eth_tx_queue_release(dev, tx_queue_id);
                return -EINVAL;
        }
        txq->tx_channel = tx_channel;
@@ -1341,8 +1337,6 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
        txq->tx_bytes = 0;
        txq->err_pkts = 0;
 
-       dev->data->tx_queues[tx_queue_id] = txq;
-
        PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
                        "%u (channel id %u).", txq->qid, socket_id,
                        txq->tx_channel);
index c515de3bf71d92b51189a283a429edb428032b21..046f17669d03d16b93e1d8f3d0ec9fd9c426fa68 100644 (file)
@@ -1151,9 +1151,9 @@ tap_dev_close(struct rte_eth_dev *dev)
 }
 
 static void
-tap_rx_queue_release(void *queue)
+tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct rx_queue *rxq = queue;
+       struct rx_queue *rxq = dev->data->rx_queues[qid];
        struct pmd_process_private *process_private;
 
        if (!rxq)
@@ -1170,9 +1170,9 @@ tap_rx_queue_release(void *queue)
 }
 
 static void
-tap_tx_queue_release(void *queue)
+tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct tx_queue *txq = queue;
+       struct tx_queue *txq = dev->data->tx_queues[qid];
        struct pmd_process_private *process_private;
 
        if (!txq)
index 561a98fc81a3f1927435410203d9b25d532feb28..5502f1ee69397aa5c9563c59d7975478506f9dae 100644 (file)
@@ -858,13 +858,12 @@ nicvf_configure_rss_reta(struct rte_eth_dev *dev)
 }
 
 static void
-nicvf_dev_tx_queue_release(void *sq)
+nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct nicvf_txq *txq;
+       struct nicvf_txq *txq = dev->data->tx_queues[qid];
 
        PMD_INIT_FUNC_TRACE();
 
-       txq = (struct nicvf_txq *)sq;
        if (txq) {
                if (txq->txbuffs != NULL) {
                        nicvf_tx_queue_release_mbufs(txq);
@@ -872,6 +871,7 @@ nicvf_dev_tx_queue_release(void *sq)
                        txq->txbuffs = NULL;
                }
                rte_free(txq);
+               dev->data->tx_queues[qid] = NULL;
        }
 }
 
@@ -985,8 +985,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
                PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                                nicvf_netdev_qidx(nic, qidx));
-               nicvf_dev_tx_queue_release(
-                       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
        }
 
@@ -1020,19 +1019,21 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                txq->pool_free = nicvf_single_pool_free_xmited_buffers;
        }
 
+       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
+
        /* Allocate software ring */
        txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
                                nb_desc * sizeof(struct rte_mbuf *),
                                RTE_CACHE_LINE_SIZE, nic->node);
 
        if (txq->txbuffs == NULL) {
-               nicvf_dev_tx_queue_release(txq);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
        if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
                PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
-               nicvf_dev_tx_queue_release(txq);
+               nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
@@ -1043,7 +1044,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                        nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
                        txq->phys, txq->offloads);
 
-       dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
        dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
                RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
@@ -1161,11 +1161,11 @@ nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
 }
 
 static void
-nicvf_dev_rx_queue_release(void *rx_queue)
+nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        PMD_INIT_FUNC_TRACE();
 
-       rte_free(rx_queue);
+       rte_free(dev->data->rx_queues[qid]);
 }
 
 static int
@@ -1336,8 +1336,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
                PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                                nicvf_netdev_qidx(nic, qidx));
-               nicvf_dev_rx_queue_release(
-                       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
+               nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
        }
 
@@ -1365,12 +1364,14 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        else
                rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
 
+       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
+
        nicvf_rxq_mbuf_setup(rxq);
 
        /* Alloc completion queue */
        if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
                PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
-               nicvf_dev_rx_queue_release(rxq);
+               nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
                return -ENOMEM;
        }
 
@@ -1382,7 +1383,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                        nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
                        rte_mempool_avail_count(mp), rxq->phys, offloads);
 
-       dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
        dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
                RTE_ETH_QUEUE_STATE_STOPPED;
        return 0;
index 3021933965c8f6630b761be96528a546fcdc184f..d979b120271266edd6849f087dbb93b534543816 100644 (file)
@@ -433,9 +433,9 @@ void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
 
 void txgbe_dev_free_queues(struct rte_eth_dev *dev);
 
-void txgbe_dev_rx_queue_release(void *rxq);
+void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
-void txgbe_dev_tx_queue_release(void *txq);
+void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                uint16_t nb_rx_desc, unsigned int socket_id,
index 1a261287d1bdab2e6c5ec82af9ec9dd5400e5ff9..b6339fe50b440540c6a31bb2b1cf59b93b66be71 100644 (file)
@@ -2109,9 +2109,9 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
 }
 
 void __rte_cold
-txgbe_dev_tx_queue_release(void *txq)
+txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       txgbe_tx_queue_release(txq);
+       txgbe_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 /* (Re)set dynamic txgbe_tx_queue fields to defaults */
@@ -2437,9 +2437,9 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
 }
 
 void __rte_cold
-txgbe_dev_rx_queue_release(void *rxq)
+txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       txgbe_rx_queue_release(rxq);
+       txgbe_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 /*
@@ -2795,13 +2795,13 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+               txgbe_dev_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+               txgbe_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
index a202931e9aed2010a6ad4265b3214fa8a0b36d2e..2e24e5f7ffbb0fcc157ebf9c9a6f87827c1d5ea7 100644 (file)
@@ -1346,9 +1346,15 @@ eth_stats_reset(struct rte_eth_dev *dev)
 }
 
 static void
-eth_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       rte_free(q);
+       rte_free(dev->data->rx_queues[qid]);
+}
+
+static void
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       rte_free(dev->data->tx_queues[qid]);
 }
 
 static int
@@ -1388,8 +1394,8 @@ static const struct eth_dev_ops ops = {
        .dev_infos_get = eth_dev_info,
        .rx_queue_setup = eth_rx_queue_setup,
        .tx_queue_setup = eth_tx_queue_setup,
-       .rx_queue_release = eth_queue_release,
-       .tx_queue_release = eth_queue_release,
+       .rx_queue_release = eth_rx_queue_release,
+       .tx_queue_release = eth_tx_queue_release,
        .tx_done_cleanup = eth_tx_done_cleanup,
        .link_update = eth_link_update,
        .stats_get = eth_stats_get,
index 2f40ae907dcd416972a26e98bb395eeabf6f15c4..cfffc94c48959e9acde2e6845ddb26aec7fbf046 100644 (file)
@@ -1058,18 +1058,12 @@ vmxnet3_free_queues(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               void *rxq = dev->data->rx_queues[i];
-
-               vmxnet3_dev_rx_queue_release(rxq);
-       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               vmxnet3_dev_rx_queue_release(dev, i);
        dev->data->nb_rx_queues = 0;
 
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               void *txq = dev->data->tx_queues[i];
-
-               vmxnet3_dev_tx_queue_release(txq);
-       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               vmxnet3_dev_tx_queue_release(dev, i);
        dev->data->nb_tx_queues = 0;
 }
 
index 59bee9723cfc6a1e607047140e61b0f9bea8f1dc..8950175460f0584d22fdf14640cd98ae1118c1a0 100644 (file)
@@ -182,8 +182,8 @@ vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
 
 void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
 
-void vmxnet3_dev_rx_queue_release(void *rxq);
-void vmxnet3_dev_tx_queue_release(void *txq);
+void vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int vmxnet3_v4_rss_configure(struct rte_eth_dev *dev);
 
index 5cf53d4de8257e199097e97d3d50ec28989c8545..b01c4c01f9c9d830424c44239969a50f1be3167f 100644 (file)
@@ -165,9 +165,9 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
 }
 
 void
-vmxnet3_dev_tx_queue_release(void *txq)
+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       vmxnet3_tx_queue_t *tq = txq;
+       vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
 
        if (tq != NULL) {
                /* Release mbufs */
@@ -182,10 +182,10 @@ vmxnet3_dev_tx_queue_release(void *txq)
 }
 
 void
-vmxnet3_dev_rx_queue_release(void *rxq)
+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        int i;
-       vmxnet3_rx_queue_t *rq = rxq;
+       vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
 
        if (rq != NULL) {
                /* Release mbufs */
index 96dd0ecaf3a7ae0699ae03edebabd4b0041c49fd..40f486c8a6da514fd653cf518fd2ba1d13ec57e1 100644 (file)
@@ -312,7 +312,8 @@ typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
                                    uint16_t rx_queue_id);
 /**< @internal Disable interrupt of a receive queue of an Ethernet device. */
 
-typedef void (*eth_queue_release_t)(void *queue);
+typedef void (*eth_queue_release_t)(struct rte_eth_dev *dev,
+                                   uint16_t queue_id);
 /**< @internal Release memory resources allocated by given RX/TX queue. */
 
 typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
index d75a3b88061c4dc05e11226af4b2ef0722c4fbbd..f6562d43dced2dbb27dbdcd13c619581051e6f4e 100644 (file)
@@ -898,7 +898,7 @@ eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
                return;
 
        if (dev->dev_ops->rx_queue_release != NULL)
-               (*dev->dev_ops->rx_queue_release)(rxq[qid]);
+               (*dev->dev_ops->rx_queue_release)(dev, qid);
        rxq[qid] = NULL;
 }
 
@@ -911,7 +911,7 @@ eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
                return;
 
        if (dev->dev_ops->tx_queue_release != NULL)
-               (*dev->dev_ops->tx_queue_release)(txq[qid]);
+               (*dev->dev_ops->tx_queue_release)(dev, qid);
        txq[qid] = NULL;
 }