/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <linux/vhost.h>
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *size, uint8_t perm)
{
uint64_t vva, tmp_size;
- if (unlikely(!size))
+ if (unlikely(!*size))
return 0;
- tmp_size = size;
+ tmp_size = *size;
vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
- if (tmp_size == size)
+ if (tmp_size == *size)
return vva;
- if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
+ iova += tmp_size;
+
+ if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
/*
* iotlb_lock is read-locked for a full burst,
* but it only protects the iotlb cache.
*/
vhost_user_iotlb_rd_unlock(vq);
- vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
- vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
+ vhost_user_iotlb_pending_insert(vq, iova, perm);
+ if (vhost_user_iotlb_miss(dev, iova, perm)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+ iova);
+ vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
+ }
vhost_user_iotlb_rd_lock(vq);
}
return 0;
}
-struct virtio_net *
-get_device(int vid)
-{
- struct virtio_net *dev = vhost_devices[vid];
-
- if (unlikely(!dev)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) device not found.\n", vid);
- }
-
- return dev;
-}
-
void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
int
vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- uint64_t size;
+ uint64_t req_size, size;
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
goto out;
- size = sizeof(struct vring_desc) * vq->size;
+ req_size = sizeof(struct vring_desc) * vq->size;
+ size = req_size;
vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.desc_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->desc)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc || size != req_size)
return -1;
- size = sizeof(struct vring_avail);
- size += sizeof(uint16_t) * vq->size;
+ req_size = sizeof(struct vring_avail);
+ req_size += sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.avail_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->avail)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->avail || size != req_size)
return -1;
- size = sizeof(struct vring_used);
- size += sizeof(struct vring_used_elem) * vq->size;
+ req_size = sizeof(struct vring_used);
+ req_size += sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.used_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->used)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->used || size != req_size)
return -1;
out:
dev->virtqueue[vring_idx] = vq;
init_vring_queue(dev, vring_idx);
+ rte_spinlock_init(&vq->access_lock);
dev->nr_vring += 1;
dev->features = 0;
dev->protocol_features = 0;
- dev->flags = 0;
+ dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
for (i = 0; i < dev->nr_vring; i++)
reset_vring_queue(dev, i);
struct virtio_net *dev;
int i;
- dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
- if (dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "Failed to allocate memory for new dev.\n");
- return -1;
- }
-
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
+
if (i == MAX_VHOST_DEVICE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to find a free slot for new device.\n");
- rte_free(dev);
+ return -1;
+ }
+
+ dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
+ if (dev == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to allocate memory for new dev.\n");
return -1;
}
vhost_devices[i] = dev;
dev->vid = i;
+ dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
+ dev->vdpa_dev_id = -1;
+ rte_spinlock_init(&dev->slave_req_lock);
return i;
}
vhost_destroy_device(int vid)
{
struct virtio_net *dev = get_device(vid);
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
if (dev == NULL)
return;
if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(vid);
}
vhost_devices[vid] = NULL;
}
+void
+vhost_attach_vdpa_device(int vid, int did)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (rte_vdpa_get_device(did) == NULL)
+ return;
+
+ dev->vdpa_dev_id = did;
+}
+
+void
+vhost_detach_vdpa_device(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ vhost_user_host_notifier_ctrl(vid, false);
+
+ dev->vdpa_dev_id = -1;
+}
+
void
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
dev->dequeue_zero_copy = 1;
}
+void
+vhost_set_builtin_virtio_net(int vid, bool enable)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (enable)
+ dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
+ else
+ dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
+}
+
int
rte_vhost_get_mtu(int vid, uint16_t *mtu)
{
if (!vq)
return -1;
- vhost_vring_call(vq);
+ vhost_vring_call(dev, vq);
return 0;
}
{
struct virtio_net *dev = get_device(vid);
- if (dev == NULL)
- return -1;
-
- if (enable) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "guest notification isn't supported.\n");
+ if (!dev)
return -1;
- }
- dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
+ if (enable)
+ dev->virtqueue[queue_id]->used->flags &=
+ ~VRING_USED_F_NO_NOTIFY;
+ else
+ dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
return 0;
}
return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
}
+
+int rte_vhost_get_vdpa_device_id(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ return dev->vdpa_dev_id;
+}
+
+int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+ uint64_t *log_size)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *log_base = dev->log_base;
+ *log_size = dev->log_size;
+
+ return 0;
+}
+
+int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
+ *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
+
+ return 0;
+}
+
+int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
+ dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
+
+ return 0;
+}