vhost: checkout resubmit inflight information
[dpdk.git] / lib / librte_vhost / vhost.c
index 981837b..1f3e1b1 100644 (file)
@@ -115,6 +115,26 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
        }
 }
 
+void
+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                            uint64_t iova, uint64_t len)
+{
+       uint64_t hva, gpa, map_len;
+       map_len = len;
+
+       hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+       if (map_len != len) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+                       iova);
+               return;
+       }
+
+       gpa = hva_to_gpa(dev, hva, len);
+       if (gpa)
+               __vhost_log_write(dev, gpa, len);
+}
+
 void
 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
@@ -200,6 +220,26 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
        }
 }
 
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                            uint64_t iova, uint64_t len)
+{
+       uint64_t hva, gpa, map_len;
+       map_len = len;
+
+       hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+       if (map_len != len) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+                       iova);
+               return;
+       }
+
+       gpa = hva_to_gpa(dev, hva, len);
+       if (gpa)
+               __vhost_log_cache_write(dev, vq, gpa, len);
+}
+
 void *
 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
                uint64_t desc_addr, uint64_t desc_len)
@@ -242,6 +282,31 @@ cleanup_vq(struct vhost_virtqueue *vq, int destroy)
                close(vq->kickfd);
 }
 
+void
+cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       if (!(dev->protocol_features &
+           (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+               return;
+
+       if (vq_is_packed(dev)) {
+               if (vq->inflight_packed)
+                       vq->inflight_packed = NULL;
+       } else {
+               if (vq->inflight_split)
+                       vq->inflight_split = NULL;
+       }
+
+       if (vq->resubmit_inflight) {
+               if (vq->resubmit_inflight->resubmit_list) {
+                       free(vq->resubmit_inflight->resubmit_list);
+                       vq->resubmit_inflight->resubmit_list = NULL;
+               }
+               free(vq->resubmit_inflight);
+               vq->resubmit_inflight = NULL;
+       }
+}
+
 /*
  * Unmap any memory, close any file descriptors and
  * free any memory owned by a device.
@@ -253,8 +318,10 @@ cleanup_device(struct virtio_net *dev, int destroy)
 
        vhost_backend_cleanup(dev);
 
-       for (i = 0; i < dev->nr_vring; i++)
+       for (i = 0; i < dev->nr_vring; i++) {
                cleanup_vq(dev->virtqueue[i], destroy);
+               cleanup_vq_inflight(dev, dev->virtqueue[i]);
+       }
 }
 
 void
@@ -358,7 +425,7 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 
        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
-               goto out;
+               return -1;
 
        if (vq_is_packed(dev)) {
                if (vring_translate_packed(dev, vq) < 0)
@@ -367,7 +434,6 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
                if (vring_translate_split(dev, vq) < 0)
                        return -1;
        }
-out:
        vq->access_ok = 1;
 
        return 0;
@@ -383,6 +449,7 @@ vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
        vq->desc = NULL;
        vq->avail = NULL;
        vq->used = NULL;
+       vq->log_guest_addr = 0;
 
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
                vhost_user_iotlb_wr_unlock(vq);
@@ -786,22 +853,33 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
 {
        struct virtio_net *dev;
        struct vhost_virtqueue *vq;
+       uint16_t ret = 0;
 
        dev = get_device(vid);
        if (!dev)
                return 0;
 
        vq = dev->virtqueue[queue_id];
-       if (!vq->enabled)
-               return 0;
 
-       return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+       rte_spinlock_lock(&vq->access_lock);
+
+       if (unlikely(!vq->enabled || vq->avail == NULL))
+               goto out;
+
+       ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+
+out:
+       rte_spinlock_unlock(&vq->access_lock);
+       return ret;
 }
 
-static inline void
+static inline int
 vhost_enable_notify_split(struct virtio_net *dev,
                struct vhost_virtqueue *vq, int enable)
 {
+       if (vq->used == NULL)
+               return -1;
+
        if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
                if (enable)
                        vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -811,17 +889,21 @@ vhost_enable_notify_split(struct virtio_net *dev,
                if (enable)
                        vhost_avail_event(vq) = vq->last_avail_idx;
        }
+       return 0;
 }
 
-static inline void
+static inline int
 vhost_enable_notify_packed(struct virtio_net *dev,
                struct vhost_virtqueue *vq, int enable)
 {
        uint16_t flags;
 
+       if (vq->device_event == NULL)
+               return -1;
+
        if (!enable) {
                vq->device_event->flags = VRING_EVENT_F_DISABLE;
-               return;
+               return 0;
        }
 
        flags = VRING_EVENT_F_ENABLE;
@@ -834,6 +916,7 @@ vhost_enable_notify_packed(struct virtio_net *dev,
        rte_smp_wmb();
 
        vq->device_event->flags = flags;
+       return 0;
 }
 
 int
@@ -841,18 +924,23 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
 {
        struct virtio_net *dev = get_device(vid);
        struct vhost_virtqueue *vq;
+       int ret;
 
        if (!dev)
                return -1;
 
        vq = dev->virtqueue[queue_id];
 
+       rte_spinlock_lock(&vq->access_lock);
+
        if (vq_is_packed(dev))
-               vhost_enable_notify_packed(dev, vq, enable);
+               ret = vhost_enable_notify_packed(dev, vq, enable);
        else
-               vhost_enable_notify_split(dev, vq, enable);
+               ret = vhost_enable_notify_split(dev, vq, enable);
 
-       return 0;
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return ret;
 }
 
 void
@@ -891,6 +979,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
 {
        struct virtio_net *dev;
        struct vhost_virtqueue *vq;
+       uint32_t ret = 0;
 
        dev = get_device(vid);
        if (dev == NULL)
@@ -906,10 +995,16 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
        if (vq == NULL)
                return 0;
 
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0 || vq->avail == NULL))
-               return 0;
+               goto out;
+
+       ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
 
-       return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+out:
+       rte_spinlock_unlock(&vq->access_lock);
+       return ret;
 }
 
 int rte_vhost_get_vdpa_device_id(int vid)