#include <numaif.h>
#endif
+#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_memory.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
+#include <rte_rwlock.h>
+#include "iotlb.h"
#include "vhost.h"
+#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+/* Called with iotlb_lock read-locked */
+uint64_t
+__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ uint64_t vva, tmp_size;
+
+ if (unlikely(!size))
+ return 0;
+
+ tmp_size = size;
+
+ vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
+ if (tmp_size == size)
+ return vva;
+
+ if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
+ /*
+ * iotlb_lock is read-locked for a full burst,
+ * but it only protects the iotlb cache.
+ * In case of IOTLB miss, we might block on the socket,
+ * which could cause a deadlock with QEMU if an IOTLB update
+ * is being handled. We can safely unlock here to avoid it.
+ */
+ vhost_user_iotlb_rd_unlock(vq);
+
+ vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
+ vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
+
+ vhost_user_iotlb_rd_lock(vq);
+ }
+
+ return 0;
+}
+
struct virtio_net *
get_device(int vid)
{
vhost_backend_cleanup(dev);
- for (i = 0; i < dev->virt_qp_nb; i++) {
- cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy);
- cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy);
- }
+ for (i = 0; i < dev->nr_vring; i++)
+ cleanup_vq(dev->virtqueue[i], destroy);
}
/*
free_device(struct virtio_net *dev)
{
uint32_t i;
- struct vhost_virtqueue *rxq, *txq;
-
- for (i = 0; i < dev->virt_qp_nb; i++) {
- rxq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
- txq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
+ struct vhost_virtqueue *vq;
- rte_free(rxq->shadow_used_ring);
- rte_free(txq->shadow_used_ring);
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
- /* rxq and txq are allocated together as queue-pair */
- rte_free(rxq);
+ rte_free(vq->shadow_used_ring);
+ rte_free(vq->batch_copy_elems);
+ rte_mempool_free(vq->iotlb_pool);
+ rte_free(vq);
}
rte_free(dev);
}
-static void
-init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
+int
+vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- memset(vq, 0, sizeof(struct vhost_virtqueue));
+ uint64_t size;
- vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
- vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ goto out;
- /* Backends are set to -1 indicating an inactive device. */
- vq->backend = -1;
+ size = sizeof(struct vring_desc) * vq->size;
+ vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.desc_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->desc)
+ return -1;
- /* always set the default vq pair to enabled */
- if (qp_idx == 0)
- vq->enabled = 1;
+ size = sizeof(struct vring_avail);
+ size += sizeof(uint16_t) * vq->size;
+ vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.avail_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->avail)
+ return -1;
- TAILQ_INIT(&vq->zmbuf_list);
+ size = sizeof(struct vring_used);
+ size += sizeof(struct vring_used_elem) * vq->size;
+ vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
+ vq->ring_addrs.used_user_addr,
+ size, VHOST_ACCESS_RW);
+ if (!vq->used)
+ return -1;
+
+out:
+ vq->access_ok = 1;
+
+ return 0;
}
-static void
-init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
+void
+vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- uint32_t base_idx = qp_idx * VIRTIO_QNUM;
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_wr_lock(vq);
+
+ vq->access_ok = 0;
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
- init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
- init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_wr_unlock(vq);
}
static void
-reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
+init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
- int callfd;
+ struct vhost_virtqueue *vq;
- callfd = vq->callfd;
- init_vring_queue(vq, qp_idx);
- vq->callfd = callfd;
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed not init vring, out of bound (%d)\n",
+ vring_idx);
+ return;
+ }
+
+ vq = dev->virtqueue[vring_idx];
+
+ memset(vq, 0, sizeof(struct vhost_virtqueue));
+
+ vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
+ vhost_user_iotlb_init(dev, vring_idx);
+ /* Backends are set to -1 indicating an inactive device. */
+ vq->backend = -1;
+
+ TAILQ_INIT(&vq->zmbuf_list);
}
static void
-reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
+reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
- uint32_t base_idx = qp_idx * VIRTIO_QNUM;
+ struct vhost_virtqueue *vq;
+ int callfd;
- reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
- reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed not init vring, out of bound (%d)\n",
+ vring_idx);
+ return;
+ }
+
+ vq = dev->virtqueue[vring_idx];
+ callfd = vq->callfd;
+ init_vring_queue(dev, vring_idx);
+ vq->callfd = callfd;
}
int
-alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
+alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
- struct vhost_virtqueue *virtqueue = NULL;
- uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
- uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+ struct vhost_virtqueue *vq;
- virtqueue = rte_malloc(NULL,
- sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0);
- if (virtqueue == NULL) {
+ vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
+ if (vq == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "Failed to allocate memory for virt qp:%d.\n", qp_idx);
+ "Failed to allocate memory for vring:%u.\n", vring_idx);
return -1;
}
- dev->virtqueue[virt_rx_q_idx] = virtqueue;
- dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
+ dev->virtqueue[vring_idx] = vq;
+ init_vring_queue(dev, vring_idx);
- init_vring_queue_pair(dev, qp_idx);
-
- dev->virt_qp_nb += 1;
+ dev->nr_vring += 1;
return 0;
}
/*
* Reset some variables in device structure, while keeping few
- * others untouched, such as vid, ifname, virt_qp_nb: they
+ * others untouched, such as vid, ifname, nr_vring: they
* should be same unless the device is removed.
*/
void
dev->protocol_features = 0;
dev->flags = 0;
- for (i = 0; i < dev->virt_qp_nb; i++)
- reset_vring_queue_pair(dev, i);
+ for (i = 0; i < dev->nr_vring; i++)
+ reset_vring_queue(dev, i);
}
/*
vhost_devices[i] = dev;
dev->vid = i;
+ dev->slave_req_fd = -1;
return i;
}
if (!(dev->flags & VIRTIO_DEV_READY))
return -EAGAIN;
- if (!(dev->features & VIRTIO_NET_F_MTU))
+ if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
return -ENOTSUP;
*mtu = dev->mtu;
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) failed to query numa node: %d\n", vid, ret);
+ "(%d) failed to query numa node: %s\n",
+ vid, rte_strerror(errno));
return -1;
}
if (dev == NULL)
return 0;
- return dev->virt_qp_nb;
+ return dev->nr_vring / 2;
+}
+
+uint16_t
+rte_vhost_get_vring_num(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return 0;
+
+ return dev->nr_vring;
}
int
return 0;
}
+int
+rte_vhost_get_negotiated_features(int vid, uint64_t *features)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ *features = dev->features;
+ return 0;
+}
+
int
rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
{
return -1;
size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
- m = malloc(size);
+ m = malloc(sizeof(struct rte_vhost_memory) + size);
if (!m)
return -1;
return 0;
}
+int
+rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
+ struct rte_vhost_vring *vring)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ if (vring_idx >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (!vq)
+ return -1;
+
+ vring->desc = vq->desc;
+ vring->avail = vq->avail;
+ vring->used = vq->used;
+ vring->log_guest_addr = vq->log_guest_addr;
+
+ vring->callfd = vq->callfd;
+ vring->kickfd = vq->kickfd;
+ vring->size = vq->size;
+
+ return 0;
+}
+
uint16_t
rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
return 0;
}
+
+void
+rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ vhost_log_write(dev, addr, len);
+}
+
+void
+rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
+ uint64_t offset, uint64_t len)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (dev == NULL)
+ return;
+
+ if (vring_idx >= VHOST_MAX_VRING)
+ return;
+ vq = dev->virtqueue[vring_idx];
+ if (!vq)
+ return;
+
+ vhost_log_used_vring(dev, vq, offset, len);
+}
+
+uint32_t
+rte_vhost_rx_queue_count(int vid, uint16_t qid)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (dev == NULL)
+ return 0;
+
+ if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, qid);
+ return 0;
+ }
+
+ vq = dev->virtqueue[qid];
+ if (vq == NULL)
+ return 0;
+
+ if (unlikely(vq->enabled == 0 || vq->avail == NULL))
+ return 0;
+
+ return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+}