static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
-static void virtio_dev_rx_queue_release(__rte_unused void *rxq);
-static void virtio_dev_tx_queue_release(__rte_unused void *txq);
-
static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
return 0;
}
+void
+virtio_dev_queue_release(struct virtqueue *vq) {
+ struct virtio_hw *hw = vq->hw;
+
+ if (vq) {
+ /* Select and deactivate the queue */
+ VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vq->queue_id);
+ VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN, 0);
+
+ rte_free(vq);
+ vq = NULL;
+ }
+}
+
int virtio_dev_queue_setup(struct rte_eth_dev *dev,
int queue_type,
uint16_t queue_idx,
.stats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
.rx_queue_setup = virtio_dev_rx_queue_setup,
- /* meaningfull only to multiple queue */
.rx_queue_release = virtio_dev_rx_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- /* meaningfull only to multiple queue */
.tx_queue_release = virtio_dev_tx_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- rte_free(hw->cvq);
- hw->cvq = NULL;
+ virtio_dev_queue_release(hw->cvq);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
return 0;
}
-/*
- * Only 1 queue is supported, no queue release related operation
- */
-static void
-virtio_dev_rx_queue_release(__rte_unused void *rxq)
-{
-}
-
-static void
-virtio_dev_tx_queue_release(__rte_unused void *txq)
-{
-}
-
/*
* Configure virtio device
* It returns 0 on success.
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
unsigned int socket_id,
struct virtqueue **pvq);
+void virtio_dev_queue_release(struct virtqueue *vq);
+
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+void virtio_dev_rx_queue_release(void *rxq);
+
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+void virtio_dev_tx_queue_release(void *txq);
+
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
return 0;
}
+void
+virtio_dev_rx_queue_release(void *rxq)
+{
+ virtio_dev_queue_release(rxq);
+}
+
/*
* struct rte_eth_dev *dev: Used to update dev
* uint16_t nb_desc: Defaults to values read from config space
return 0;
}
+void
+virtio_dev_tx_queue_release(void *txq)
+{
+ virtio_dev_queue_release(txq);
+}
+
static void
virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
{