Add code to set up packed queues when enabled.
Signed-off-by: Yuanhan Liu <yliu@fridaylinux.org>
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Acked-by: Tiwei Bie <tiwei.bie@intel.com>
rte_free(dev);
}
-int
-vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+static int
+vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint64_t req_size, size;
- if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
- goto out;
-
req_size = sizeof(struct vring_desc) * vq->size;
size = req_size;
vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
if (!vq->used || size != req_size)
return -1;
+ return 0;
+}
+
+static int
+vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint64_t req_size, size;
+
+ req_size = sizeof(struct vring_packed_desc) * vq->size;
+ size = req_size;
+ vq->desc_packed =
+ (struct vring_packed_desc *)(uintptr_t)vhost_iova_to_vva(dev,
+ vq, vq->ring_addrs.desc_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc_packed || size != req_size)
+ return -1;
+
+ return 0;
+}
+
+int
+vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ goto out;
+
+ if (vq_is_packed(dev)) {
+ if (vring_translate_packed(dev, vq) < 0)
+ return -1;
+ } else {
+ if (vring_translate_split(dev, vq) < 0)
+ return -1;
+ }
out:
vq->access_ok = 1;
dev->virtqueue[vring_idx] = vq;
init_vring_queue(dev, vring_idx);
rte_spinlock_init(&vq->access_lock);
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
dev->nr_vring += 1;
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct vhost_virtqueue {
- struct vring_desc *desc;
+ union {
+ struct vring_desc *desc;
+ struct vring_packed_desc *desc_packed;
+ };
struct vring_avail *avail;
struct vring_used *used;
uint32_t size;
struct batch_copy_elem *batch_copy_elems;
uint16_t batch_copy_nb_elems;
+ bool used_wrap_counter;
+ bool avail_wrap_counter;
struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
uint16_t log_cache_nb_elem;
struct vhost_vring_addr *addr = &vq->ring_addrs;
uint64_t len;
+ if (vq_is_packed(dev)) {
+ len = sizeof(struct vring_packed_desc) * vq->size;
+ vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
+ ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
+ vq->log_guest_addr = 0;
+ if (vq->desc_packed == NULL ||
+ len != sizeof(struct vring_packed_desc) *
+ vq->size) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to map desc_packed ring.\n",
+ dev->vid);
+ return dev;
+ }
+
+ dev = numa_realloc(dev, vq_index);
+ vq = dev->virtqueue[vq_index];
+ addr = &vq->ring_addrs;
+
+ return dev;
+ }
+
/* The addresses are converted from QEMU virtual to Vhost virtual. */
if (vq->desc && vq->avail && vq->used)
return dev;
return -1;
}
-static int
-vq_is_ready(struct vhost_virtqueue *vq)
+static bool
+vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- return vq && vq->desc && vq->avail && vq->used &&
+ bool rings_ok;
+
+ if (!vq)
+ return false;
+
+ if (vq_is_packed(dev))
+ rings_ok = !!vq->desc_packed;
+ else
+ rings_ok = vq->desc && vq->avail && vq->used;
+
+ return rings_ok &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
}
for (i = 0; i < dev->nr_vring; i++) {
vq = dev->virtqueue[i];
- if (!vq_is_ready(vq))
+ if (!vq_is_ready(dev, vq))
return 0;
}