/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <linux/vhost.h>
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *size, uint8_t perm)
{
uint64_t vva, tmp_size;
- if (unlikely(!size))
+ if (unlikely(!*size))
return 0;
- tmp_size = size;
+ tmp_size = *size;
vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
- if (tmp_size == size)
+ if (tmp_size == *size)
return vva;
iova += tmp_size;
int
vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- uint64_t size;
+ uint64_t req_size, size;
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
goto out;
- size = sizeof(struct vring_desc) * vq->size;
+ req_size = sizeof(struct vring_desc) * vq->size;
+ size = req_size;
vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.desc_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->desc)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc || size != req_size)
return -1;
- size = sizeof(struct vring_avail);
- size += sizeof(uint16_t) * vq->size;
+ req_size = sizeof(struct vring_avail);
+ req_size += sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.avail_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->avail)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->avail || size != req_size)
return -1;
- size = sizeof(struct vring_used);
- size += sizeof(struct vring_used_elem) * vq->size;
+ req_size = sizeof(struct vring_used);
+ req_size += sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.used_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->used)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->used || size != req_size)
return -1;
out:
struct virtio_net *dev;
int i;
- dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
- if (dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "Failed to allocate memory for new dev.\n");
- return -1;
- }
-
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
+
if (i == MAX_VHOST_DEVICE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to find a free slot for new device.\n");
- rte_free(dev);
+ return -1;
+ }
+
+ dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
+ if (dev == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to allocate memory for new dev.\n");
return -1;
}
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
+ dev->vdpa_dev_id = -1;
+ rte_spinlock_init(&dev->slave_req_lock);
return i;
}
vhost_destroy_device(int vid)
{
struct virtio_net *dev = get_device(vid);
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
if (dev == NULL)
return;
if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(vid);
}
vhost_devices[vid] = NULL;
}
+void
+vhost_attach_vdpa_device(int vid, int did)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (rte_vdpa_get_device(did) == NULL)
+ return;
+
+ dev->vdpa_dev_id = did;
+}
+
+void
+vhost_detach_vdpa_device(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ vhost_user_host_notifier_ctrl(vid, false);
+
+ dev->vdpa_dev_id = -1;
+}
+
void
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
{
struct virtio_net *dev = get_device(vid);
- if (dev == NULL)
+ if (!dev)
return -1;
- if (enable) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "guest notification isn't supported.\n");
- return -1;
- }
-
- dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
+ if (enable)
+ dev->virtqueue[queue_id]->used->flags &=
+ ~VRING_USED_F_NO_NOTIFY;
+ else
+ dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
return 0;
}
return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
}
+
+int rte_vhost_get_vdpa_device_id(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ return dev->vdpa_dev_id;
+}
+
+int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+ uint64_t *log_size)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *log_base = dev->log_base;
+ *log_size = dev->log_size;
+
+ return 0;
+}
+
+int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
+ *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
+
+ return 0;
+}
+
+int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
+ dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
+
+ return 0;
+}