vhost: fix async callbacks return type
authorCheng Jiang <cheng1.jiang@intel.com>
Fri, 23 Jul 2021 08:09:34 +0000 (08:09 +0000)
committerChenbo Xia <chenbo.xia@intel.com>
Fri, 23 Jul 2021 08:58:53 +0000 (10:58 +0200)
The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 6e9a9d2a02ae ("examples/vhost: fix ioat dependency")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
examples/vhost/ioat.c
examples/vhost/ioat.h
lib/vhost/rte_vhost_async.h
lib/vhost/virtio_net.c

index 2a2c2d7..457f817 100644 (file)
@@ -122,7 +122,7 @@ out:
        return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
                struct rte_vhost_async_desc *descs,
                struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
        return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
                struct rte_vhost_async_status *opaque_data,
                uint16_t max_packets)
index 1aa28ed..62e163c 100644 (file)
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
                struct rte_vhost_async_desc *descs,
                struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
                struct rte_vhost_async_status *opaque_data,
                uint16_t max_packets);
@@ -42,7 +42,7 @@ static int open_ioat(const char *value __rte_unused)
        return -1;
 }
 
-static uint32_t
+static int32_t
 ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
                struct rte_vhost_async_desc *descs __rte_unused,
                struct rte_vhost_async_status *opaque_data __rte_unused,
@@ -51,7 +51,7 @@ ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
        return -1;
 }
 
-static uint32_t
+static int32_t
 ioat_check_completed_copies_cb(int vid __rte_unused,
                uint16_t queue_id __rte_unused,
                struct rte_vhost_async_status *opaque_data __rte_unused,
index 69ec66b..02d012a 100644 (file)
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
         * @param count
         *  number of elements in the "descs" array
         * @return
-        *  number of descs processed
+        *  number of descs processed, negative value means error
         */
-       uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+       int32_t (*transfer_data)(int vid, uint16_t queue_id,
                struct rte_vhost_async_desc *descs,
                struct rte_vhost_async_status *opaque_data,
                uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
         * @param max_packets
         *  max number of packets could be completed
         * @return
-        *  number of async descs completed
+        *  number of async descs completed, negative value means error
         */
-       uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+       int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
                struct rte_vhost_async_status *opaque_data,
                uint16_t max_packets);
 };
index 6e5d82c..3ab5229 100644 (file)
@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
        struct async_inflight_info *pkts_info = vq->async_pkts_info;
        uint32_t n_pkts = 0, pkt_err = 0;
        uint32_t num_async_pkts = 0, num_done_pkts = 0;
+       int32_t n_xfer;
        struct {
                uint16_t pkt_idx;
                uint16_t last_avail_idx;
@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
                if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
                        ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
                        BUF_VECTOR_MAX))) {
-                       n_pkts = vq->async_ops.transfer_data(dev->vid,
+                       n_xfer = vq->async_ops.transfer_data(dev->vid,
                                        queue_id, tdes, 0, pkt_burst_idx);
+                       if (n_xfer >= 0) {
+                               n_pkts = n_xfer;
+                       } else {
+                               VHOST_LOG_DATA(ERR,
+                                       "(%d) %s: failed to transfer data for queue id %d.\n",
+                                       dev->vid, __func__, queue_id);
+                               n_pkts = 0;
+                       }
+
                        iovec_idx = 0;
                        it_idx = 0;
 
@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
        }
 
        if (pkt_burst_idx) {
-               n_pkts = vq->async_ops.transfer_data(dev->vid,
-                               queue_id, tdes, 0, pkt_burst_idx);
+               n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+               if (n_xfer >= 0) {
+                       n_pkts = n_xfer;
+               } else {
+                       VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+                               dev->vid, __func__, queue_id);
+                       n_pkts = 0;
+               }
+
                vq->async_pkts_inflight_n += n_pkts;
 
                if (unlikely(n_pkts < pkt_burst_idx))
@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
        uint16_t async_descs_idx = 0;
        uint16_t num_buffers;
        uint16_t num_descs;
+       int32_t n_xfer;
 
        struct rte_vhost_iov_iter *it_pool = vq->it_pool;
        struct iovec *vec_pool = vq->vec_pool;
@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
                 */
                if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
                        ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-                       n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-                               tdes, 0, pkt_burst_idx);
+                       n_xfer = vq->async_ops.transfer_data(dev->vid,
+                                       queue_id, tdes, 0, pkt_burst_idx);
+                       if (n_xfer >= 0) {
+                               n_pkts = n_xfer;
+                       } else {
+                               VHOST_LOG_DATA(ERR,
+                                       "(%d) %s: failed to transfer data for queue id %d.\n",
+                                       dev->vid, __func__, queue_id);
+                               n_pkts = 0;
+                       }
+
                        iovec_idx = 0;
                        it_idx = 0;
                        segs_await = 0;
@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
        } while (pkt_idx < count);
 
        if (pkt_burst_idx) {
-               n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+               n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+               if (n_xfer >= 0) {
+                       n_pkts = n_xfer;
+               } else {
+                       VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+                               dev->vid, __func__, queue_id);
+                       n_pkts = 0;
+               }
+
                vq->async_pkts_inflight_n += n_pkts;
 
                if (unlikely(n_pkts < pkt_burst_idx))
@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
        uint16_t start_idx, pkts_idx, vq_size;
        struct async_inflight_info *pkts_info;
        uint16_t from, i;
+       int32_t n_cpl;
 
        if (!dev)
                return 0;
@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
        start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
                vq_size, vq->async_pkts_inflight_n);
 
-       if (count > vq->async_last_pkts_n)
-               n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+       if (count > vq->async_last_pkts_n) {
+               n_cpl = vq->async_ops.check_completed_copies(vid,
                        queue_id, 0, count - vq->async_last_pkts_n);
+               if (n_cpl >= 0) {
+                       n_pkts_cpl = n_cpl;
+               } else {
+                       VHOST_LOG_DATA(ERR,
+                               "(%d) %s: failed to check completed copies for queue id %d.\n",
+                               dev->vid, __func__, queue_id);
+                       n_pkts_cpl = 0;
+               }
+       }
        n_pkts_cpl += vq->async_last_pkts_n;
 
        n_pkts_put = RTE_MIN(count, n_pkts_cpl);