vhost: translate incoming log address to GPA
[dpdk.git] / lib / librte_vhost / vhost.c
index 7d427b6..76e7534 100644 (file)
@@ -200,6 +200,39 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
        }
 }
 
+void *
+vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+               uint64_t desc_addr, uint64_t desc_len)
+{
+       void *idesc;
+       uint64_t src, dst;
+       uint64_t len, remain = desc_len;
+
+       idesc = rte_malloc(__func__, desc_len, 0);
+       if (unlikely(!idesc))
+               return NULL;
+
+       dst = (uint64_t)(uintptr_t)idesc;
+
+       while (remain) {
+               len = remain;
+               src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+                               VHOST_ACCESS_RO);
+               if (unlikely(!src || !len)) {
+                       rte_free(idesc);
+                       return NULL;
+               }
+
+               rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+               remain -= len;
+               dst += len;
+               desc_addr += len;
+       }
+
+       return idesc;
+}
+
 void
 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
 {
@@ -325,7 +358,7 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 
        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
-               goto out;
+               return -1;
 
        if (vq_is_packed(dev)) {
                if (vring_translate_packed(dev, vq) < 0)
@@ -334,7 +367,6 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
                if (vring_translate_split(dev, vq) < 0)
                        return -1;
        }
-out:
        vq->access_ok = 1;
 
        return 0;
@@ -350,6 +382,7 @@ vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
        vq->desc = NULL;
        vq->avail = NULL;
        vq->used = NULL;
+       vq->log_guest_addr = 0;
 
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
                vhost_user_iotlb_wr_unlock(vq);
@@ -753,22 +786,33 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
 {
        struct virtio_net *dev;
        struct vhost_virtqueue *vq;
+       uint16_t ret = 0;
 
        dev = get_device(vid);
        if (!dev)
                return 0;
 
        vq = dev->virtqueue[queue_id];
-       if (!vq->enabled)
-               return 0;
 
-       return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+       rte_spinlock_lock(&vq->access_lock);
+
+       if (unlikely(!vq->enabled || vq->avail == NULL))
+               goto out;
+
+       ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+
+out:
+       rte_spinlock_unlock(&vq->access_lock);
+       return ret;
 }
 
-static inline void
+static inline int
 vhost_enable_notify_split(struct virtio_net *dev,
                struct vhost_virtqueue *vq, int enable)
 {
+       if (vq->used == NULL)
+               return -1;
+
        if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
                if (enable)
                        vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -778,17 +822,21 @@ vhost_enable_notify_split(struct virtio_net *dev,
                if (enable)
                        vhost_avail_event(vq) = vq->last_avail_idx;
        }
+       return 0;
 }
 
-static inline void
+static inline int
 vhost_enable_notify_packed(struct virtio_net *dev,
                struct vhost_virtqueue *vq, int enable)
 {
        uint16_t flags;
 
+       if (vq->device_event == NULL)
+               return -1;
+
        if (!enable) {
                vq->device_event->flags = VRING_EVENT_F_DISABLE;
-               return;
+               return 0;
        }
 
        flags = VRING_EVENT_F_ENABLE;
@@ -801,6 +849,7 @@ vhost_enable_notify_packed(struct virtio_net *dev,
        rte_smp_wmb();
 
        vq->device_event->flags = flags;
+       return 0;
 }
 
 int
@@ -808,18 +857,23 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
 {
        struct virtio_net *dev = get_device(vid);
        struct vhost_virtqueue *vq;
+       int ret;
 
        if (!dev)
                return -1;
 
        vq = dev->virtqueue[queue_id];
 
+       rte_spinlock_lock(&vq->access_lock);
+
        if (vq_is_packed(dev))
-               vhost_enable_notify_packed(dev, vq, enable);
+               ret = vhost_enable_notify_packed(dev, vq, enable);
        else
-               vhost_enable_notify_split(dev, vq, enable);
+               ret = vhost_enable_notify_split(dev, vq, enable);
 
-       return 0;
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return ret;
 }
 
 void
@@ -858,6 +912,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
 {
        struct virtio_net *dev;
        struct vhost_virtqueue *vq;
+       uint32_t ret = 0;
 
        dev = get_device(vid);
        if (dev == NULL)
@@ -873,10 +928,16 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
        if (vq == NULL)
                return 0;
 
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0 || vq->avail == NULL))
-               return 0;
+               goto out;
 
-       return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+       ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+
+out:
+       rte_spinlock_unlock(&vq->access_lock);
+       return ret;
 }
 
 int rte_vhost_get_vdpa_device_id(int vid)