struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
- rte_smp_wmb();
- vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+ /* desc flags is the synchronization point for virtio packed vring */
+ __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+ used_elem->flags, __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
VHOST_LOG_DATA(DEBUG,
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
VHOST_LOG_DATA(DEBUG,
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct rte_vhost_iov_iter *src_it = it_pool;
struct rte_vhost_iov_iter *dst_it = it_pool + 1;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct rte_vhost_iov_iter *src_it = it_pool;
struct rte_vhost_iov_iter *dst_it = it_pool + 1;
if (pkt_burst_idx) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
if (pkt_burst_idx) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {