]> git.droids-corp.org - dpdk.git/commitdiff
net/virtio-user: fix packed ring server mode
authorXuan Ding <xuan.ding@intel.com>
Wed, 15 Jan 2020 06:13:58 +0000 (06:13 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 17 Jan 2020 18:46:26 +0000 (19:46 +0100)
This patch fixes the situation where data path does not work properly
when vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of descriptors directly from the available ring and
the used ring during reconnection. Therefore, the data path can
continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of descriptors via the descriptor ring. By resetting Tx
and Rx queues, the data path can restart from the beginning.

Fixes: 4c3f5822eb21 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_user_ethdev.c
drivers/net/virtio/virtqueue.c
drivers/net/virtio/virtqueue.h

index 044eb10a7033b4901dc7abd72e1b3dfe9c5bbd35..f9d0ea70db4f67f33882c7a497e11f44267527f7 100644 (file)
@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
                        goto err_vtpci_init;
        }
 
+       rte_spinlock_init(&hw->state_lock);
+
        /* reset device and negotiate default features */
        ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
        if (ret < 0)
@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
                        return -EBUSY;
                }
 
-       rte_spinlock_init(&hw->state_lock);
-
        hw->use_simple_rx = 1;
 
        if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
index 3fc1725736b5f4c82d7517040c1b59b986de369e..f3b35d1bdd6f4b90dfedf7a29601aeef72f7cb11 100644 (file)
@@ -13,6 +13,7 @@
 #include <rte_ethdev_vdev.h>
 #include <rte_bus_vdev.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>
 
 #include "virtio_ethdev.h"
 #include "virtio_logs.h"
 #define virtio_user_get_dev(hw) \
        ((struct virtio_user_dev *)(hw)->virtio_user_dev)
 
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtnet_rx *rxvq;
+       struct virtnet_tx *txvq;
+       uint16_t i;
+
+       /* Add lock to avoid queue contention. */
+       rte_spinlock_lock(&hw->state_lock);
+       hw->started = 0;
+
+       /*
+        * Waitting for datapath to complete before resetting queues.
+        * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+        */
+       rte_delay_ms(1);
+
+       /* Vring reset for each Tx queue and Rx queue. */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxvq = dev->data->rx_queues[i];
+               virtqueue_rxvq_reset_packed(rxvq->vq);
+               virtio_dev_rx_queue_setup_finish(dev, i);
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txvq = dev->data->tx_queues[i];
+               virtqueue_txvq_reset_packed(txvq->vq);
+       }
+
+       hw->started = 1;
+       rte_spinlock_unlock(&hw->state_lock);
+}
+
+
 static int
 virtio_user_server_reconnect(struct virtio_user_dev *dev)
 {
        int ret;
        int connectfd;
        struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+       struct virtio_hw *hw = eth_dev->data->dev_private;
 
        connectfd = accept(dev->listenfd, NULL, NULL);
        if (connectfd < 0)
@@ -51,6 +88,12 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
        dev->features &= dev->device_features;
 
+       /* For packed ring, resetting queues is required in reconnection. */
+       if (vtpci_packed_queue(hw))
+               PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+                               " when packed ring reconnecting.");
+               virtio_user_reset_queues_packed(eth_dev);
+
        ret = virtio_user_start_device(dev);
        if (ret < 0)
                return -1;
index 5ff1e3587ebc1f8617e0a2a604737d5a971a599d..0b4e3bf3e229e0ec26df8844386a6f1af0246760 100644 (file)
@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
        else
                virtqueue_rxvq_flush_split(vq);
 }
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+       int size = vq->vq_nentries;
+       struct vq_desc_extra *dxp;
+       struct virtnet_rx *rxvq;
+       uint16_t desc_idx;
+
+       vq->vq_used_cons_idx = 0;
+       vq->vq_desc_head_idx = 0;
+       vq->vq_avail_idx = 0;
+       vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+       vq->vq_free_cnt = vq->vq_nentries;
+
+       vq->vq_packed.used_wrap_counter = 1;
+       vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+       vq->vq_packed.event_flags_shadow = 0;
+       vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+       rxvq = &vq->rxq;
+       memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+       for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+               dxp = &vq->vq_descx[desc_idx];
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+
+       vring_desc_init_packed(vq, size);
+
+       return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+       int size = vq->vq_nentries;
+       struct vq_desc_extra *dxp;
+       struct virtnet_tx *txvq;
+       uint16_t desc_idx;
+
+       vq->vq_used_cons_idx = 0;
+       vq->vq_desc_head_idx = 0;
+       vq->vq_avail_idx = 0;
+       vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+       vq->vq_free_cnt = vq->vq_nentries;
+
+       vq->vq_packed.used_wrap_counter = 1;
+       vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+       vq->vq_packed.event_flags_shadow = 0;
+
+       txvq = &vq->txq;
+       memset(txvq->mz->addr, 0, txvq->mz->len);
+       memset(txvq->virtio_net_hdr_mz->addr, 0,
+               txvq->virtio_net_hdr_mz->len);
+
+       for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+               dxp = &vq->vq_descx[desc_idx];
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+       }
+
+       vring_desc_init_packed(vq, size);
+
+       return 0;
+}
index 8d7f197b139cd6459293cfe6557a3eafb3e870ad..58ad7309ae7c6a4cfa07bfcd878ed035d936efbf 100644 (file)
@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {