goto err_vtpci_init;
}
+ rte_spinlock_init(&hw->state_lock);
+
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
return -EBUSY;
}
- rte_spinlock_init(&hw->state_lock);
-
hw->use_simple_rx = 1;
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
#include <rte_ethdev_vdev.h>
#include <rte_bus_vdev.h>
#include <rte_alarm.h>
+#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq;
+ struct virtnet_tx *txvq;
+ uint16_t i;
+
+ /* Add lock to avoid queue contention. */
+ rte_spinlock_lock(&hw->state_lock);
+ hw->started = 0;
+
+ /*
+ * Waitting for datapath to complete before resetting queues.
+ * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+ */
+ rte_delay_ms(1);
+
+ /* Vring reset for each Tx queue and Rx queue. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ virtqueue_rxvq_reset_packed(rxvq->vq);
+ virtio_dev_rx_queue_setup_finish(dev, i);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_txvq_reset_packed(txvq->vq);
+ }
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+
static int
virtio_user_server_reconnect(struct virtio_user_dev *dev)
{
int ret;
int connectfd;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+ struct virtio_hw *hw = eth_dev->data->dev_private;
connectfd = accept(dev->listenfd, NULL, NULL);
if (connectfd < 0)
dev->features &= dev->device_features;
+ /* For packed ring, resetting queues is required in reconnection. */
+ if (vtpci_packed_queue(hw))
+ PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+ " when packed ring reconnecting.");
+ virtio_user_reset_queues_packed(eth_dev);
+
ret = virtio_user_start_device(dev);
if (ret < 0)
return -1;
else
virtqueue_rxvq_flush_split(vq);
}
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vq_desc_extra *dxp;
+ struct virtnet_rx *rxvq;
+ uint16_t desc_idx;
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+ rxvq = &vq->rxq;
+ memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+ for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ vring_desc_init_packed(vq, size);
+
+ return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vq_desc_extra *dxp;
+ struct virtnet_tx *txvq;
+ uint16_t desc_idx;
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+
+ txvq = &vq->txq;
+ memset(txvq->mz->addr, 0, txvq->mz->len);
+ memset(txvq->virtio_net_hdr_mz->addr, 0,
+ txvq->virtio_net_hdr_mz->len);
+
+ for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ vring_desc_init_packed(vq, size);
+
+ return 0;
+}
/* Flush the elements in the used ring. */
void virtqueue_rxvq_flush(struct virtqueue *vq);
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{