if (tmp_size == size)
return vva;
- if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
+ iova += tmp_size;
+
+ if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
/*
* iotlb_lock is read-locked for a full burst,
* but it only protects the iotlb cache.
*/
vhost_user_iotlb_rd_unlock(vq);
- vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
- vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
+ vhost_user_iotlb_pending_insert(vq, iova, perm);
+ if (vhost_user_iotlb_miss(dev, iova, perm)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+ iova);
+ vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
+ }
vhost_user_iotlb_rd_lock(vq);
}
return 0;
}
-struct virtio_net *
-get_device(int vid)
-{
- struct virtio_net *dev = vhost_devices[vid];
-
- if (unlikely(!dev)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) device not found.\n", vid);
- }
-
- return dev;
-}
-
-static void
+void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
if ((vq->callfd >= 0) && (destroy != 0))
cleanup_vq(dev->virtqueue[i], destroy);
}
+void
+free_vq(struct vhost_virtqueue *vq)
+{
+ rte_free(vq->shadow_used_ring);
+ rte_free(vq->batch_copy_elems);
+ rte_mempool_free(vq->iotlb_pool);
+ rte_free(vq);
+}
+
/*
* Release virtqueues and device memory.
*/
free_device(struct virtio_net *dev)
{
uint32_t i;
- struct vhost_virtqueue *vq;
-
- for (i = 0; i < dev->nr_vring; i++) {
- vq = dev->virtqueue[i];
- rte_free(vq->shadow_used_ring);
- rte_free(vq->batch_copy_elems);
- rte_mempool_free(vq->iotlb_pool);
- rte_free(vq);
- }
+ for (i = 0; i < dev->nr_vring; i++)
+ free_vq(dev->virtqueue[i]);
rte_free(dev);
}
dev->virtqueue[vring_idx] = vq;
init_vring_queue(dev, vring_idx);
+ rte_spinlock_init(&vq->access_lock);
dev->nr_vring += 1;
dev->features = 0;
dev->protocol_features = 0;
- dev->flags = 0;
+ dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
for (i = 0; i < dev->nr_vring; i++)
reset_vring_queue(dev, i);
vhost_devices[i] = dev;
dev->vid = i;
+ dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
+ dev->vdpa_dev_id = -1;
return i;
}
vhost_devices[vid] = NULL;
}
+void
+vhost_attach_vdpa_device(int vid, int did)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (rte_vdpa_get_device(did) == NULL)
+ return;
+
+ dev->vdpa_dev_id = did;
+}
+
+void
+vhost_detach_vdpa_device(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ dev->vdpa_dev_id = -1;
+}
+
void
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
dev->dequeue_zero_copy = 1;
}
+void
+vhost_set_builtin_virtio_net(int vid, bool enable)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (enable)
+ dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
+ else
+ dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
+}
+
int
rte_vhost_get_mtu(int vid, uint16_t *mtu)
{
if (!vq)
return -1;
- vhost_vring_call(vq);
+ vhost_vring_call(dev, vq);
return 0;
}
return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
}
+
+int rte_vhost_get_vdpa_device_id(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ return dev->vdpa_dev_id;
+}