uint32_t idx, desc_idx, used_idx;
struct vring_used_elem *uep;
- rmb();
+ virtio_rmb();
used_idx = (uint32_t)(vq->vq_used_cons_idx
& (vq->vq_nentries - 1));
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
}
if (likely(nb_enqueued)) {
+ virtio_wmb();
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
PMD_RX_LOG(DEBUG, "Notified\n");
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
if (nb_used == 0)
return 0;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
- rmb();
+ virtio_rmb();
num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
}
}
vq_update_avail_idx(txvq);
+ virtio_wmb();
txvq->packets += nb_tx;
#include "virtio_ring.h"
#include "virtio_logs.h"
-#define mb() rte_mb()
-#define wmb() rte_wmb()
-#define rmb() rte_rmb()
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ * This driver is for virtio_pci on SMP and therefore can assume
+ * weaker (compiler barriers)
+ */
+#define virtio_mb() rte_mb()
+#define virtio_rmb() rte_compiler_barrier()
+#define virtio_wmb() rte_compiler_barrier()
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
- rte_compiler_barrier();
+ virtio_wmb();
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
virtqueue_notify(struct virtqueue *vq)
{
/*
- * Ensure updated avail->idx is visible to host. mb() necessary?
+ * Ensure updated avail->idx is visible to host.
* For virtio on IA, the notificaiton is through io port operation
* which is a serialization instruction itself.
*/