Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Acked-by: Huawei Xie <huawei.xie@intel.com>
+static void
+eth_xenvirt_free_queues(struct rte_eth_dev *dev);
+
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
static void
eth_dev_close(struct rte_eth_dev *dev)
{
static void
eth_dev_close(struct rte_eth_dev *dev)
{
+ eth_xenvirt_free_queues(dev);
-eth_queue_release(void *q __rte_unused)
+eth_queue_release(void *q)
+static void
+eth_xenvirt_free_queues(struct rte_eth_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,