* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
- * This driver is for virtio_pci on SMP and therefore can assume
- * weaker (compiler barriers)
*/
-#define virtio_mb() rte_mb()
-#define virtio_rmb() rte_compiler_barrier()
-#define virtio_wmb() rte_compiler_barrier()
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
nb_used = VIRTQUEUE_NUSED(rxvq);
- rte_compiler_barrier(); /* rmb */
+ rte_smp_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
if (unlikely(num == 0)) return 0;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
- rte_compiler_barrier(); /* rmb */
+ rte_smp_rmb();
num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);
*/
avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
- rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
+ rte_smp_wmb();
vq->vq_ring.avail->idx++;
}
#include <rte_atomic_32.h>
#endif
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_wmb()
+
+#define rte_smp_rmb() rte_rmb()
+
#endif /* _RTE_ATOMIC_ARM_H_ */
*/
#define rte_rmb() {asm volatile("sync" : : : "memory"); }
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
/* To be compatible with Power7, use GCC built-in functions for 16 bit
* operations */
__sync_synchronize();
}
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
#ifdef __cplusplus
}
#endif
#define rte_rmb() _mm_lfence()
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
#ifndef RTE_FORCE_INTRINSICS
*/
static inline void rte_rmb(void);
+/**
+ * General memory barrier between lcores
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_smp_mb() call are globally visible across the lcores
+ * before the the LOAD and STORE operations that follows it.
+ */
+static inline void rte_smp_mb(void);
+
+/**
+ * Write memory barrier between lcores
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_smp_wmb() call are globally visible across the lcores
+ * before the the STORE operations that follows it.
+ */
+static inline void rte_smp_wmb(void);
+
+/**
+ * Read memory barrier between lcores
+ *
+ * Guarantees that the LOAD operations that precede the
+ * rte_smp_rmb() call are globally visible across the lcores
+ * before the the LOAD operations that follows it.
+ */
+static inline void rte_smp_rmb(void);
+
#endif /* __DOXYGEN__ */
/**
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
/* write entries in ring */
ENQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
/*
* If there are other dequeues in progress that preceded us,
/* copy in table */
DEQUEUE_PTRS();
- rte_compiler_barrier();
+ rte_smp_rmb();
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;