X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=105a9c00c504cfb7f9e91be3a578482df9e25185;hb=25ae7f1a5d9d127a46f8d62d1d689f77a78138fd;hp=4f6956d8458395330f288bb6e3876e9c7a8a15e2;hpb=ea5207c158edb00d7e5da17369ea45a462a40dcc;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 4f6956d845..105a9c00c5 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -505,8 +505,24 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, static inline void vq_update_avail_idx(struct virtqueue *vq) { - virtio_wmb(vq->hw->weak_barriers); - vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + if (vq->hw->weak_barriers) { + /* x86 prefers to using rte_smp_wmb over __atomic_store_n as + * it reports a slightly better perf, which comes from the + * saved branch by the compiler. + * The if and else branches are identical with the smp and + * cio barriers both defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; +#else + __atomic_store_n(&vq->vq_split.ring.avail->idx, + vq->vq_avail_idx, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + } } static inline void