return __vhost_iova_to_vva(dev, vq, iova, size, perm);
}
+static __rte_always_inline void
+vhost_vring_call(struct vhost_virtqueue *vq)
+{
+ /* Flush used->idx update before we read avail->flags. */
+ rte_mb();
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
+ eventfd_write(vq->callfd, (eventfd_t)1);
+}
+
#endif /* _VHOST_NET_CDEV_H_ */
offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
- /* flush used->idx update before we read avail->flags. */
- rte_mb();
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ vhost_vring_call(vq);
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
if (likely(vq->shadow_used_idx)) {
flush_shadow_used_ring(dev, vq);
-
- /* flush used->idx update before we read avail->flags. */
- rte_mb();
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ vhost_vring_call(vq);
}
out:
vq->used->idx += count;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
-
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
- eventfd_write(vq->callfd, (eventfd_t)1);
+ vhost_vring_call(vq);
}
static __rte_always_inline struct zcopy_mbuf *