]> git.droids-corp.org - dpdk.git/commitdiff
vhost: support clear in-flight packets for async dequeue
authorYuan Wang <yuanx.wang@intel.com>
Thu, 9 Jun 2022 17:34:03 +0000 (01:34 +0800)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Fri, 17 Jun 2022 13:19:57 +0000 (15:19 +0200)
rte_vhost_clear_queue_thread_unsafe() supports to clear
in-flight packets for async enqueue only. But after
supporting async dequeue, this API should support async dequeue too.

This patch also adds the thread-safe version of this API,
the difference between the two API is that thread safety uses lock.

These APIs maybe used to clean up packets in the async channel
to prevent packet loss when the device state changes or
when the device is destroyed.

Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Jiayu Hu <jiayu.hu@intel.com>
doc/guides/prog_guide/vhost_lib.rst
doc/guides/rel_notes/release_22_07.rst
lib/vhost/rte_vhost_async.h
lib/vhost/version.map
lib/vhost/virtio_net.c

index cd3f6caa9ad8da04fdf7145ebe7d69fb9dee5a7b..606edee94050b294066466276658f35c335572a4 100644 (file)
@@ -288,7 +288,13 @@ The following is an overview of some key Vhost API functions:
 
 * ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count, dma_id, vchan_id)``
 
-  Clear inflight packets which are submitted to DMA engine in vhost async data
+  Clear in-flight packets which are submitted to async channel in vhost
+  async data path without performing locking on virtqueue. Completed
+  packets are returned to applications through ``pkts``.
+
+* ``rte_vhost_clear_queue(vid, queue_id, **pkts, count, dma_id, vchan_id)``
+
+  Clear in-flight packets which are submitted to async channel in vhost async data
   path. Completed packets are returned to applications through ``pkts``.
 
 * ``rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, struct rte_vhost_stat_name *names, unsigned int size)``
index d2504bb9cdf187154b2866e7e9a897cff2aea34c..29ad556ea27bcc429acf409838284a3759a47d3f 100644 (file)
@@ -109,6 +109,11 @@ New Features
   Added vhost async dequeue API which can leverage DMA devices to
   accelerate receiving packets from guest.
 
+* **Added thread-safe version of in-flight packet clear API in vhost library.**
+
+  Added an API which can clear the in-flight packets submitted to
+  the async channel in a thread-safe manner in the vhost async data path.
+
 * **Added vhost API to get the device type of a vDPA device.**
 
   Added an API which can get the device type of vDPA device.
index a1e7f674ed9be0d082dcbcdf99bd1ab9dd7b2e7a..1db2a10124dc971cb2958a63640a2c2c3c942913 100644 (file)
@@ -183,6 +183,31 @@ uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
                struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
                uint16_t vchan_id);
 
+/**
+ * This function checks async completion status and clear packets for
+ * a specific vhost device queue. Packets which are inflight will be
+ * returned in an array.
+ *
+ * @param vid
+ *  ID of vhost device to clear data
+ * @param queue_id
+ *  Queue id to clear data
+ * @param pkts
+ *  Blank array to get return packet pointer
+ * @param count
+ *  Size of the packet array
+ * @param dma_id
+ *  The identifier of the DMA device
+ * @param vchan_id
+ *  The identifier of virtual DMA channel
+ * @return
+ *  Number of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_clear_queue(int vid, uint16_t queue_id,
+               struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+               uint16_t vchan_id);
+
 /**
  * The DMA vChannels used in asynchronous data path must be configured
  * first. So this function needs to be called before enabling DMA
index 4880b9a42219a647584a3ddeec9d5a1dc57739ec..9329f88e7994d07be733e47684cf355aacd3462e 100644 (file)
@@ -95,6 +95,7 @@ EXPERIMENTAL {
        rte_vhost_vring_stats_reset;
        rte_vhost_async_try_dequeue_burst;
        rte_vhost_driver_get_vdpa_dev_type;
+       rte_vhost_clear_queue;
 };
 
 INTERNAL {
index ce22e3ac79176d342c624af4b0308b97ccaad80b..c939624ad981fa6d5f045024770dd76bc1b954fe 100644 (file)
 
 #define MAX_BATCH_LEN 256
 
+static __rte_always_inline uint16_t
+async_poll_dequeue_completed_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+               struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+               uint16_t vchan_id, bool legacy_ol_flags);
+
 /* DMA device copy operation tracking array. */
 struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
 
@@ -2165,12 +2170,18 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
                return 0;
 
        VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+       if (unlikely(queue_id >= dev->nr_vring)) {
                VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
                        dev->ifname, __func__, queue_id);
                return 0;
        }
 
+       if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
+               VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
+                               dev->ifname, __func__, dma_id);
+               return 0;
+       }
+
        vq = dev->virtqueue[queue_id];
 
        if (unlikely(!rte_spinlock_is_locked(&vq->access_lock))) {
@@ -2192,11 +2203,89 @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
                return 0;
        }
 
-       n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+       if ((queue_id & 1) == 0)
+               n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
+                                       pkts, count, dma_id, vchan_id);
+       else {
+               if (unlikely(vq_is_packed(dev)))
+                       VHOST_LOG_DATA(ERR,
+                                       "(%s) %s: async dequeue does not support packed ring.\n",
+                                       dev->ifname, __func__);
+               else
+                       n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
+                                       dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
+       }
+
+       vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
+       vq->stats.inflight_completed += n_pkts_cpl;
+
+       return n_pkts_cpl;
+}
+
+uint16_t
+rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
+               uint16_t count, int16_t dma_id, uint16_t vchan_id)
+{
+       struct virtio_net *dev = get_device(vid);
+       struct vhost_virtqueue *vq;
+       uint16_t n_pkts_cpl = 0;
+
+       if (!dev)
+               return 0;
+
+       VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
+       if (unlikely(queue_id >= dev->nr_vring)) {
+               VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %u.\n",
+                       dev->ifname, __func__, queue_id);
+               return 0;
+       }
+
+       if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
+               VHOST_LOG_DATA(ERR, "(%s) %s: invalid dma id %d.\n",
+                               dev->ifname, __func__, dma_id);
+               return 0;
+       }
+
+       vq = dev->virtqueue[queue_id];
+
+       if (!rte_spinlock_trylock(&vq->access_lock)) {
+               VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n",
+                               dev->ifname, __func__, queue_id);
+               return 0;
+       }
+
+       if (unlikely(!vq->async)) {
+               VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %u.\n",
+                               dev->ifname, __func__, queue_id);
+               goto out_access_unlock;
+       }
+
+       if (unlikely(!dma_copy_track[dma_id].vchans ||
+                               !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+               VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+                               dma_id, vchan_id);
+               goto out_access_unlock;
+       }
+
+       if ((queue_id & 1) == 0)
+               n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
+                               pkts, count, dma_id, vchan_id);
+       else {
+               if (unlikely(vq_is_packed(dev)))
+                       VHOST_LOG_DATA(ERR,
+                                       "(%s) %s: async dequeue does not support packed ring.\n",
+                                       dev->ifname, __func__);
+               else
+                       n_pkts_cpl = async_poll_dequeue_completed_split(dev, vq, pkts, count,
+                                       dma_id, vchan_id, dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS);
+       }
 
        vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
        vq->stats.inflight_completed += n_pkts_cpl;
 
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
        return n_pkts_cpl;
 }