#include <rte_ethdev_vdev.h>
#include <rte_bus_vdev.h>
#include <rte_alarm.h>
+#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_user/virtio_user_dev.h"
+#include "virtio_user/vhost.h"
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq;
+ struct virtnet_tx *txvq;
+ uint16_t i;
+
+ /* Add lock to avoid queue contention. */
+ rte_spinlock_lock(&hw->state_lock);
+ hw->started = 0;
+
+ /*
+ * Waitting for datapath to complete before resetting queues.
+ * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+ */
+ rte_delay_ms(1);
+
+ /* Vring reset for each Tx queue and Rx queue. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ virtqueue_rxvq_reset_packed(rxvq->vq);
+ virtio_dev_rx_queue_setup_finish(dev, i);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_txvq_reset_packed(txvq->vq);
+ }
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+
static int
virtio_user_server_reconnect(struct virtio_user_dev *dev)
{
int ret;
int connectfd;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+ struct virtio_hw *hw = eth_dev->data->dev_private;
connectfd = accept(dev->listenfd, NULL, NULL);
if (connectfd < 0)
dev->features &= dev->device_features;
+ /* For packed ring, resetting queues is required in reconnection. */
+ if (vtpci_packed_queue(hw)) {
+ PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+ " when packed ring reconnecting.");
+ virtio_user_reset_queues_packed(eth_dev);
+ }
+
ret = virtio_user_start_device(dev);
if (ret < 0)
return -1;
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (offset == offsetof(struct virtio_net_config, mac) &&
- length == ETHER_ADDR_LEN) {
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ length == RTE_ETHER_ADDR_LEN) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
((uint8_t *)dst)[i] = dev->mac_addr[i];
return;
}
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if ((offset == offsetof(struct virtio_net_config, mac)) &&
- (length == ETHER_ADDR_LEN))
- for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ (length == RTE_ETHER_ADDR_LEN))
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = ((const uint8_t *)src)[i];
else
PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
return 0;
}
-static struct rte_vdev_driver virtio_user_driver;
-
static struct rte_eth_dev *
virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
{
const char *name = rte_vdev_device_name(dev);
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
- RTE_LOG(ERR, PMD, "Failed to probe %s\n", name);
+ PMD_INIT_LOG(ERR, "Failed to probe %s", name);
return -1;
}
{
const char *name;
struct rte_eth_dev *eth_dev;
- struct virtio_hw *hw;
- struct virtio_user_dev *dev;
if (!vdev)
return -EINVAL;
name = rte_vdev_device_name(vdev);
PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
eth_dev = rte_eth_dev_allocated(name);
+ /* Port has already been released by close. */
if (!eth_dev)
- return -ENODEV;
+ return 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return rte_eth_dev_release_port(eth_dev);
/* make sure the device is stopped, queues freed */
rte_eth_dev_close(eth_dev->data->port_id);
- hw = eth_dev->data->dev_private;
- dev = hw->virtio_user_dev;
- virtio_user_dev_uninit(dev);
-
- rte_eth_dev_release_port(eth_dev);
-
return 0;
}