{
struct virtqueue *vq = cvq->vq;
int head;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
uint16_t flags;
int sum = 0;
struct vring_packed {
unsigned int num;
- struct vring_packed_desc *desc_packed;
- struct vring_packed_desc_event *driver_event;
- struct vring_packed_desc_event *device_event;
-
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
};
struct vring {
unsigned int num)
{
vr->num = num;
- vr->desc_packed = (struct vring_packed_desc *)p;
- vr->driver_event = (struct vring_packed_desc_event *)(p +
+ vr->desc = (struct vring_packed_desc *)p;
+ vr->driver = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
- vr->device_event = (struct vring_packed_desc_event *)
- RTE_ALIGN_CEIL(((uintptr_t)vr->driver_event +
+ vr->device = (struct vring_packed_desc_event *)
+ RTE_ALIGN_CEIL(((uintptr_t)vr->driver +
sizeof(struct vring_packed_desc_event)), align);
}
struct vring_packed_desc *desc;
uint16_t i;
- desc = vq->vq_packed.ring.desc_packed;
+ desc = vq->vq_packed.ring.desc;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
{
uint16_t used_idx, id, curr_id, free_cnt = 0;
uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
{
uint16_t used_idx, id;
uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
- struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
idx = vq->vq_avail_idx;
- dp = &vq->vq_packed.ring.desc_packed[idx];
+ dp = &vq->vq_packed.ring.desc[idx];
dxp = &vq->vq_descx[id];
dxp->ndescs = 1;
head_idx = vq->vq_avail_idx;
idx = head_idx;
prev = head_idx;
- start_dp = vq->vq_packed.ring.desc_packed;
+ start_dp = vq->vq_packed.ring.desc;
- head_dp = &vq->vq_packed.ring.desc_packed[idx];
+ head_dp = &vq->vq_packed.ring.desc[idx];
head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
head_flags |= vq->vq_packed.cached_flags;
if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
addr.desc_user_addr =
- (uint64_t)(uintptr_t)pq_vring->desc_packed;
+ (uint64_t)(uintptr_t)pq_vring->desc;
addr.avail_user_addr =
- (uint64_t)(uintptr_t)pq_vring->driver_event;
+ (uint64_t)(uintptr_t)pq_vring->driver;
addr.used_user_addr =
- (uint64_t)(uintptr_t)pq_vring->device_event;
+ (uint64_t)(uintptr_t)pq_vring->device;
} else {
addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
n_descs++;
idx_status = idx_data;
- while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) {
+ while (vring->desc[idx_status].flags & VRING_DESC_F_NEXT) {
idx_status++;
if (idx_status >= dev->queue_size)
idx_status -= dev->queue_size;
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr;
+ hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
uint16_t queues;
queues = *(uint16_t *)(uintptr_t)
- vring->desc_packed[idx_data].addr;
+ vring->desc[idx_data].addr;
status = virtio_user_handle_mq(dev, queues);
}
/* Update status */
*(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc_packed[idx_status].addr = status;
+ vring->desc[idx_status].addr = status;
/* Update used descriptor */
- vring->desc_packed[idx_hdr].id = vring->desc_packed[idx_status].id;
- vring->desc_packed[idx_hdr].len = sizeof(status);
+ vring->desc[idx_hdr].id = vring->desc[idx_status].id;
+ vring->desc[idx_hdr].len = sizeof(status);
return n_descs;
}
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
uint16_t n_descs;
- while (desc_is_avail(&vring->desc_packed[vq->used_idx],
+ while (desc_is_avail(&vring->desc[vq->used_idx],
vq->used_wrap_counter)) {
n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
vq->used_idx);
rte_smp_wmb();
- vring->desc_packed[vq->used_idx].flags =
+ vring->desc[vq->used_idx].flags =
VRING_DESC_F_WRITE |
VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
VRING_DESC_F_USED(vq->used_wrap_counter);
sizeof(struct vring_packed_desc_event),
VIRTIO_PCI_VRING_ALIGN);
vring->num = vq->vq_nentries;
- vring->desc_packed =
- (void *)(uintptr_t)desc_addr;
- vring->driver_event =
- (void *)(uintptr_t)avail_addr;
- vring->device_event =
- (void *)(uintptr_t)used_addr;
+ vring->desc = (void *)(uintptr_t)desc_addr;
+ vring->driver = (void *)(uintptr_t)avail_addr;
+ vring->device = (void *)(uintptr_t)used_addr;
dev->packed_queues[queue_idx].avail_wrap_counter = true;
dev->packed_queues[queue_idx].used_wrap_counter = true;
for (i = 0; i < vring->num; i++)
- vring->desc_packed[i].flags = 0;
+ vring->desc[i].flags = 0;
}
static void
struct vq_desc_extra *dxp;
uint16_t i;
- struct vring_packed_desc *descs = vq->vq_packed.ring.desc_packed;
+ struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
int cnt = 0;
i = vq->vq_used_cons_idx;
{
int i;
for (i = 0; i < n - 1; i++) {
- vq->vq_packed.ring.desc_packed[i].id = i;
+ vq->vq_packed.ring.desc[i].id = i;
vq->vq_descx[i].next = i + 1;
}
- vq->vq_packed.ring.desc_packed[i].id = i;
+ vq->vq_packed.ring.desc[i].id = i;
vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
}
{
if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
- vq->vq_packed.ring.driver_event->desc_event_flags =
+ vq->vq_packed.ring.driver->desc_event_flags =
vq->vq_packed.event_flags_shadow;
}
}
{
if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
- vq->vq_packed.ring.driver_event->desc_event_flags =
+ vq->vq_packed.ring.driver->desc_event_flags =
vq->vq_packed.event_flags_shadow;
}
}
* Ensure updated data is visible to vhost before reading the flags.
*/
virtio_mb(vq->hw->weak_barriers);
- flags = vq->vq_packed.ring.device_event->desc_event_flags;
+ flags = vq->vq_packed.ring.device->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;
}