/* Vector value used to disable MSI for queue. */
#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_VRING_ALIGN 4096
+
/*
* This structure is just a reference to read net device specific
* config space; it is just a shadow structure.
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (virtio_with_packed_queue(vq->hw)) {
vring_init_packed(&vq->vq_packed.ring, ring_mem,
- VIRTIO_PCI_VRING_ALIGN, size);
+ VIRTIO_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
/*
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
- VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
sizeof(struct vring_packed_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr +
sizeof(struct vring_packed_desc_event),
- VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_VRING_ALIGN);
vring->num = vq->vq_nentries;
vring->desc = (void *)(uintptr_t)desc_addr;
vring->driver = (void *)(uintptr_t)avail_addr;
avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
ring[vq->vq_nentries]),
- VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_VRING_ALIGN);
dev->vrings[queue_idx].num = vq->vq_nentries;
dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;