}
}
+void
+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_write(dev, gpa, len);
+}
+
void
__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
}
}
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_cache_write(dev, vq, gpa, len);
+}
+
+void *
+vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t desc_addr, uint64_t desc_len)
+{
+ void *idesc;
+ uint64_t src, dst;
+ uint64_t len, remain = desc_len;
+
+ idesc = rte_malloc(__func__, desc_len, 0);
+ if (unlikely(!idesc))
+ return NULL;
+
+ dst = (uint64_t)(uintptr_t)idesc;
+
+ while (remain) {
+ len = remain;
+ src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ rte_free(idesc);
+ return NULL;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ desc_addr += len;
+ }
+
+ return idesc;
+}
+
void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
close(vq->kickfd);
}
+void
+cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ if (!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+ return;
+
+ if (vq_is_packed(dev)) {
+ if (vq->inflight_packed)
+ vq->inflight_packed = NULL;
+ } else {
+ if (vq->inflight_split)
+ vq->inflight_split = NULL;
+ }
+
+ if (vq->resubmit_inflight) {
+ if (vq->resubmit_inflight->resubmit_list) {
+ free(vq->resubmit_inflight->resubmit_list);
+ vq->resubmit_inflight->resubmit_list = NULL;
+ }
+ free(vq->resubmit_inflight);
+ vq->resubmit_inflight = NULL;
+ }
+}
+
/*
* Unmap any memory, close any file descriptors and
* free any memory owned by a device.
vhost_backend_cleanup(dev);
- for (i = 0; i < dev->nr_vring; i++)
+ for (i = 0; i < dev->nr_vring; i++) {
cleanup_vq(dev->virtqueue[i], destroy);
+ cleanup_vq_inflight(dev, dev->virtqueue[i]);
+ }
}
void
{
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
- goto out;
+ return -1;
if (vq_is_packed(dev)) {
if (vring_translate_packed(dev, vq) < 0)
if (vring_translate_split(dev, vq) < 0)
return -1;
}
-out:
vq->access_ok = 1;
return 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
+ vq->log_guest_addr = 0;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_wr_unlock(vq);
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+ uint16_t ret = 0;
dev = get_device(vid);
if (!dev)
return 0;
vq = dev->virtqueue[queue_id];
- if (!vq->enabled)
- return 0;
- return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(!vq->enabled || vq->avail == NULL))
+ goto out;
+
+ ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
}
-static inline void
+static inline int
vhost_enable_notify_split(struct virtio_net *dev,
struct vhost_virtqueue *vq, int enable)
{
+ if (vq->used == NULL)
+ return -1;
+
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (enable)
vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
if (enable)
vhost_avail_event(vq) = vq->last_avail_idx;
}
+ return 0;
}
-static inline void
+static inline int
vhost_enable_notify_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq, int enable)
{
uint16_t flags;
+ if (vq->device_event == NULL)
+ return -1;
+
if (!enable) {
vq->device_event->flags = VRING_EVENT_F_DISABLE;
- return;
+ return 0;
}
flags = VRING_EVENT_F_ENABLE;
rte_smp_wmb();
vq->device_event->flags = flags;
+ return 0;
}
int
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
+ int ret;
if (!dev)
return -1;
vq = dev->virtqueue[queue_id];
+ rte_spinlock_lock(&vq->access_lock);
+
if (vq_is_packed(dev))
- vhost_enable_notify_packed(dev, vq, enable);
+ ret = vhost_enable_notify_packed(dev, vq, enable);
else
- vhost_enable_notify_split(dev, vq, enable);
+ ret = vhost_enable_notify_split(dev, vq, enable);
- return 0;
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
}
void
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+ uint32_t ret = 0;
dev = get_device(vid);
if (dev == NULL)
if (vq == NULL)
return 0;
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0 || vq->avail == NULL))
- return 0;
+ goto out;
- return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+ ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
}
int rte_vhost_get_vdpa_device_id(int vid)