int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
- bool avail_wrap_counter;
+ uint16_t flags;
int sum = 0;
int nb_descs = 0;
int k;
* One RX packet for ACK.
*/
head = vq->vq_avail_idx;
- avail_wrap_counter = vq->avail_wrap_counter;
+ flags = vq->cached_flags;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
for (k = 0; k < pkt_num; k++) {
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->cached_flags;
sum += dlen[k];
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | vq->cached_flags;
vq->vq_free_cnt--;
nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
virtio_wmb(vq->hw->weak_barriers);
- desc[head].flags = VRING_DESC_F_NEXT |
- VRING_DESC_F_AVAIL(avail_wrap_counter) |
- VRING_DESC_F_USED(!avail_wrap_counter);
+ desc[head].flags = VRING_DESC_F_NEXT | flags;
virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
"vq->vq_avail_idx=%d\n"
"vq->vq_used_cons_idx=%d\n"
- "vq->avail_wrap_counter=%d\n"
+ "vq->cached_flags=0x%x\n"
"vq->used_wrap_counter=%d\n",
vq->vq_free_cnt,
vq->vq_avail_idx,
vq->vq_used_cons_idx,
- vq->avail_wrap_counter,
+ vq->cached_flags,
vq->used_wrap_counter);
result = cvq->virtio_net_hdr_mz->addr;
vq->vq_nentries = vq_size;
vq->event_flags_shadow = 0;
if (vtpci_packed_queue(hw)) {
- vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->cached_flags = VRING_DESC_F_AVAIL(1);
+ if (queue_type == VTNET_RQ)
+ vq->cached_flags |= VRING_DESC_F_WRITE;
}
/*
struct rte_mbuf **cookie, uint16_t num)
{
struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
- uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ uint16_t flags = vq->cached_flags;
struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
start_dp[idx].flags = flags;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ flags = vq->cached_flags;
}
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
dxp->ndescs = 1;
dxp->cookie = cookie;
- flags = vq->avail_used_flags;
+ flags = vq->cached_flags;
/* prepend cannot fail, checked by caller */
hdr = (struct virtio_net_hdr *)
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags ^=
+ vq->cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
head_dp = &vq->ring_packed.desc_packed[idx];
head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- head_flags |= vq->avail_used_flags;
+ head_flags |= vq->cached_flags;
if (can_push) {
/* prepend cannot fail, checked by caller */
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
start_dp[idx].len = cookie->data_len;
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- flags |= vq->avail_used_flags;
+ flags |= vq->cached_flags;
start_dp[idx].flags = flags;
}
prev = idx;
idx++;
if (idx >= vq->vq_nentries) {
idx -= vq->vq_nentries;
- vq->avail_wrap_counter ^= 1;
- vq->avail_used_flags =
- VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ vq->cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
} while ((cookie = cookie->next) != NULL);
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
struct vring vq_ring; /**< vring keeping desc, used and avail */
struct vring_packed ring_packed; /**< vring keeping descs */
- bool avail_wrap_counter;
bool used_wrap_counter;
+ uint16_t cached_flags; /**< cached flags for descs */
uint16_t event_flags_shadow;
- uint16_t avail_used_flags;
+
/**
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
if (vtpci_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
- "VQ: - avail_wrap_counter=%d; used_wrap_counter=%d", \
+ " cached_flags=0x%x; used_wrap_counter=%d", \
(vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
- (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \
+ (vq)->vq_avail_idx, (vq)->cached_flags, \
(vq)->used_wrap_counter); \
break; \
} \