used_idx -= vq->size;
}
- rte_smp_wmb();
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
- rte_smp_wmb();
- vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+ /* desc flags is the synchronization point for virtio packed vring */
+ __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+ used_elem->flags, __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vq->desc_packed[vq->last_used_idx + i].len = lens[i];
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
vq->desc_packed[vq->last_used_idx + i].len = 0;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
return -1;
}
- rte_smp_rmb();
-
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint16_t nr_descs = 0;
- rte_smp_rmb();
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
VHOST_LOG_DATA(DEBUG,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
int n_pkts = 0;
- avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
-
/*
- * The ordering between avail index and
- * desc reads needs to be enforced.
+ * The ordering between avail index and desc reads need to be enforced.
*/
- rte_smp_rmb();
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
src_it = it_pool;
dst_it = it_pool + 1;
segs_await = 0;
- vq->async_pkts_inflight_n += n_pkts;
+ vq->async_pkts_inflight_n += pkt_burst_idx;
if (unlikely(n_pkts < (int)pkt_burst_idx)) {
/*
if (pkt_burst_idx) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
- vq->async_pkts_inflight_n += n_pkts;
+ vq->async_pkts_inflight_n += pkt_burst_idx;
if (unlikely(n_pkts < (int)pkt_burst_idx))
pkt_err = pkt_burst_idx - n_pkts;
queue_id, 0, count - vq->async_last_pkts_n);
n_pkts_cpl += vq->async_last_pkts_n;
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
while (likely((n_pkts_put < count) && n_inflight)) {
uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
return -1;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;