{
struct virtqueue *vq = cvq->vq;
int head;
- struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
struct virtio_pmd_ctrl *result;
uint16_t flags;
int sum = 0;
* One RX packet for ACK.
*/
head = vq->vq_avail_idx;
- flags = vq->cached_flags;
+ flags = vq->vq_packed.cached_flags;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
- vq->cached_flags;
+ vq->vq_packed.cached_flags;
sum += dlen[k];
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | vq->cached_flags;
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ vq->vq_packed.cached_flags;
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
vq->vq_used_cons_idx += nb_descs;
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
"vq->vq_avail_idx=%d\n"
"vq->vq_used_cons_idx=%d\n"
- "vq->cached_flags=0x%x\n"
- "vq->used_wrap_counter=%d\n",
+ "vq->vq_packed.cached_flags=0x%x\n"
+ "vq->vq_packed.used_wrap_counter=%d\n",
vq->vq_free_cnt,
vq->vq_avail_idx,
vq->vq_used_cons_idx,
- vq->cached_flags,
- vq->used_wrap_counter);
+ vq->vq_packed.cached_flags,
+ vq->vq_packed.used_wrap_counter);
result = cvq->virtio_net_hdr_mz->addr;
return result;
* At least one TX packet per argument;
* One RX packet for ACK.
*/
- vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
- vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[head].next;
+ i = vq->vq_split.ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
- vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
- vq->vq_ring.desc[i].len = dlen[k];
+ vq->vq_split.ring.desc[i].len = dlen[k];
sum += dlen[k];
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[i].next;
+ i = vq->vq_split.ring.desc[i].next;
}
- vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
vq->vq_free_cnt--;
- vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+ vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
vq_update_avail_ring(vq, head);
vq_update_avail_idx(vq);
used_idx = (uint32_t)(vq->vq_used_cons_idx
& (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
idx = (uint32_t) uep->id;
desc_idx = idx;
- while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
- desc_idx = vq->vq_ring.desc[desc_idx].next;
+ while (vq->vq_split.ring.desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_split.ring.desc[desc_idx].next;
vq->vq_free_cnt++;
}
- vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = idx;
vq->vq_used_cons_idx++;
virtio_init_vring(struct virtqueue *vq)
{
int size = vq->vq_nentries;
- struct vring *vr = &vq->vq_ring;
uint8_t *ring_mem = vq->vq_ring_virt_mem;
PMD_INIT_FUNC_TRACE();
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (vtpci_packed_queue(vq->hw)) {
- vring_init_packed(&vq->ring_packed, ring_mem,
+ vring_init_packed(&vq->vq_packed.ring, ring_mem,
VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
+ struct vring *vr = &vq->vq_split.ring;
+
vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
- vq->event_flags_shadow = 0;
if (vtpci_packed_queue(hw)) {
- vq->used_wrap_counter = 1;
- vq->cached_flags = VRING_DESC_F_AVAIL(1);
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);
+ vq->vq_packed.event_flags_shadow = 0;
if (queue_type == VTNET_RQ)
- vq->cached_flags |= VRING_DESC_F_WRITE;
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
}
/*
struct vq_desc_extra *dxp;
uint16_t desc_idx_last = desc_idx;
- dp = &vq->vq_ring.desc[desc_idx];
+ dp = &vq->vq_split.ring.desc[desc_idx];
dxp = &vq->vq_descx[desc_idx];
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
desc_idx_last = dp->next;
- dp = &vq->vq_ring.desc[dp->next];
+ dp = &vq->vq_split.ring.desc[dp->next];
}
}
dxp->ndescs = 0;
if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
vq->vq_desc_head_idx = desc_idx;
} else {
- dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
dp_tail->next = desc_idx;
}
struct vring_packed_desc *desc;
uint16_t i;
- desc = vq->ring_packed.desc_packed;
+ desc = vq->vq_packed.ring.desc_packed;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
vq->vq_used_cons_idx++;
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
}
/* Caller does the check */
for (i = 0; i < num ; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
len[i] = uep->len;
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
/* Desc idx same as used idx */
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
len[i] = uep->len;
cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
{
uint16_t used_idx, id, curr_id, free_cnt = 0;
uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
num -= dxp->ndescs;
if (used_idx >= size) {
used_idx -= size;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
{
uint16_t used_idx, id;
uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
vq->vq_used_cons_idx += dxp->ndescs;
if (vq->vq_used_cons_idx >= size) {
vq->vq_used_cons_idx -= size;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
vq_ring_free_id_packed(vq, id);
if (dxp->cookie != NULL) {
struct vq_desc_extra *dxp;
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
dxp = &vq->vq_descx[desc_idx];
return -EMSGSIZE;
head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
while (i < num) {
idx = head_idx & (vq->vq_nentries - 1);
{
struct vq_desc_extra *dxp;
struct virtio_hw *hw = vq->hw;
- struct vring_desc *start_dp = vq->vq_ring.desc;
+ struct vring_desc *start_dp = vq->vq_split.ring.desc;
uint16_t idx, i;
if (unlikely(vq->vq_free_cnt == 0))
virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
- struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
- uint16_t flags = vq->cached_flags;
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc_packed;
+ uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
start_dp[idx].flags = flags;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
- flags = vq->cached_flags;
+ flags = vq->vq_packed.cached_flags;
}
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
while (i < num) {
idx = idx & (vq->vq_nentries - 1);
id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
idx = vq->vq_avail_idx;
- dp = &vq->ring_packed.desc_packed[idx];
+ dp = &vq->vq_packed.ring.desc_packed[idx];
dxp = &vq->vq_descx[id];
dxp->ndescs = 1;
dxp->cookie = cookie;
- flags = vq->cached_flags;
+ flags = vq->vq_packed.cached_flags;
/* prepend cannot fail, checked by caller */
hdr = (struct virtio_net_hdr *)
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
head_idx = vq->vq_avail_idx;
idx = head_idx;
prev = head_idx;
- start_dp = vq->ring_packed.desc_packed;
+ start_dp = vq->vq_packed.ring.desc_packed;
- head_dp = &vq->ring_packed.desc_packed[idx];
+ head_dp = &vq->vq_packed.ring.desc_packed[idx];
head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- head_flags |= vq->cached_flags;
+ head_flags |= vq->vq_packed.cached_flags;
if (can_push) {
/* prepend cannot fail, checked by caller */
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
start_dp[idx].len = cookie->data_len;
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- flags |= vq->cached_flags;
+ flags |= vq->vq_packed.cached_flags;
start_dp[idx].flags = flags;
}
prev = idx;
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->cached_flags ^=
+ vq->vq_packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
} while ((cookie = cookie->next) != NULL);
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
- start_dp = vq->vq_ring.desc;
+ start_dp = vq->vq_split.ring.desc;
if (can_push) {
/* prepend cannot fail, checked by caller */
} while ((cookie = cookie->next) != NULL);
if (use_indirect)
- idx = vq->vq_ring.desc[head_idx].next;
+ idx = vq->vq_split.ring.desc[head_idx].next;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
if (hw->use_simple_rx) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- vq->vq_ring.desc[desc_idx].flags =
+ vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
+ vq->vq_split.ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
if (!vtpci_packed_queue(hw)) {
if (hw->use_inorder_tx)
- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
}
VIRTQUEUE_DUMP(vq);
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
sw_ring = &vq->sw_ring[desc_idx];
- start_dp = &vq->vq_ring.desc[desc_idx];
+ start_dp = &vq->vq_split.ring.desc[desc_idx];
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
nb_used = RTE_MIN(nb_used, nb_pkts);
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- rused = &vq->vq_ring.used->ring[desc_idx];
+ rused = &vq->vq_split.ring.used->ring[desc_idx];
sw_ring = &vq->sw_ring[desc_idx];
sw_ring_end = &vq->sw_ring[vq->vq_nentries];
nb_used = RTE_MIN(nb_used, nb_pkts);
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- rused = &vq->vq_ring.used->ring[desc_idx];
+ rused = &vq->vq_split.ring.used->ring[desc_idx];
sw_ring = &vq->sw_ring[desc_idx];
sw_ring_end = &vq->sw_ring[vq->vq_nentries];
struct vq_desc_extra *dxp;
uint16_t i;
- struct vring_packed_desc *descs = vq->ring_packed.desc_packed;
+ struct vring_packed_desc *descs = vq->vq_packed.ring.desc_packed;
int cnt = 0;
i = vq->vq_used_cons_idx;
vq->vq_used_cons_idx++;
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
i = vq->vq_used_cons_idx;
}
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
if (hw->use_simple_rx) {
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
- struct vring vq_ring; /**< vring keeping desc, used and avail */
- struct vring_packed ring_packed; /**< vring keeping descs */
- bool used_wrap_counter;
- uint16_t cached_flags; /**< cached flags for descs */
- uint16_t event_flags_shadow;
+ union {
+ struct {
+ /**< vring keeping desc, used and avail */
+ struct vring ring;
+ } vq_split;
+
+ struct {
+ /**< vring keeping descs and events */
+ struct vring_packed ring;
+ bool used_wrap_counter;
+ uint16_t cached_flags; /**< cached flags for descs */
+ uint16_t event_flags_shadow;
+ } vq_packed;
+ };
- /**
- * Last consumed descriptor in the used table,
- * trails vq_ring.used->idx.
- */
- uint16_t vq_used_cons_idx;
+ uint16_t vq_used_cons_idx; /**< last consumed descriptor */
uint16_t vq_nentries; /**< vring desc numbers */
uint16_t vq_free_cnt; /**< num of desc available */
uint16_t vq_avail_idx; /**< sync until needed */
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == vq->used_wrap_counter;
+ return avail == used && used == vq->vq_packed.used_wrap_counter;
}
static inline void
{
int i;
for (i = 0; i < n - 1; i++) {
- vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_packed.ring.desc_packed[i].id = i;
vq->vq_descx[i].next = i + 1;
}
- vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_packed.ring.desc_packed[i].id = i;
vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
}
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
- if (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
- vq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
- vq->ring_packed.driver_event->desc_event_flags =
- vq->event_flags_shadow;
+ if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+ vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+ vq->vq_packed.ring.driver_event->desc_event_flags =
+ vq->vq_packed.event_flags_shadow;
}
}
if (vtpci_packed_queue(vq->hw))
virtqueue_disable_intr_packed(vq);
else
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
/**
static inline void
virtqueue_enable_intr_packed(struct virtqueue *vq)
{
- uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
- if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
- vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
- *event_flags = vq->event_flags_shadow;
+ if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+ vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+ vq->vq_packed.ring.driver_event->desc_event_flags =
+ vq->vq_packed.event_flags_shadow;
}
}
static inline void
virtqueue_enable_intr_split(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
}
/**
return VTNET_TQ;
}
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \
+ (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
vq_update_avail_idx(struct virtqueue *vq)
{
virtio_wmb(vq->hw->weak_barriers);
- vq->vq_ring.avail->idx = vq->vq_avail_idx;
+ vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
}
static inline void
* descriptor.
*/
avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
- if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
- vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
vq->vq_avail_idx++;
}
* the used->flags.
*/
virtio_mb(vq->hw->weak_barriers);
- return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+ return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
static inline int
* Ensure updated data is visible to vhost before reading the flags.
*/
virtio_mb(vq->hw->weak_barriers);
- flags = vq->ring_packed.device_event->desc_event_flags;
+ flags = vq->vq_packed.ring.device_event->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;
}
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = (vq)->vq_ring.used->idx; \
+ used_idx = (vq)->vq_split.ring.used->idx; \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (vtpci_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
" cached_flags=0x%x; used_wrap_counter=%d", \
(vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
- (vq)->vq_avail_idx, (vq)->cached_flags, \
- (vq)->used_wrap_counter); \
+ (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
+ (vq)->vq_packed.used_wrap_counter); \
break; \
} \
PMD_INIT_LOG(DEBUG, \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
- (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
- (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
- (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+ (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \
+ (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
#define VIRTQUEUE_DUMP(vq) do { } while (0)