From 6f5f8ecce1b2d11abb758fe9a89f7a39a32cd6e9 Mon Sep 17 00:00:00 2001
From: Bruce Richardson <bruce.richardson@intel.com>
Date: Mon, 10 Feb 2014 14:43:44 +0000
Subject: [PATCH] eal: add rte_compiler_barrier() macro

The rte_ring functions used a compiler barrier to stop the compiler
reordering certain expressions. This is generally useful so is moved
to the common header file with the other barriers.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/librte_eal/common/include/rte_atomic.h | 10 ++++++++++
 lib/librte_ring/rte_ring.h                 | 11 ++++-------
 2 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/lib/librte_eal/common/include/rte_atomic.h b/lib/librte_eal/common/include/rte_atomic.h
index c2e4f4781b..a07079979a 100644
--- a/lib/librte_eal/common/include/rte_atomic.h
+++ b/lib/librte_eal/common/include/rte_atomic.h
@@ -81,6 +81,16 @@ extern "C" {
  */
 #define	rte_rmb() _mm_lfence()
 
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time 
+ * for operations directly before and after the barrier.
+ */
+#define	rte_compiler_barrier() do {		\
+	asm volatile ("" : : : "memory");	\
+} while(0)
+
 #include <emmintrin.h>
 
 /**
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 6022a72ea9..751b5a86a9 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -173,9 +173,6 @@ struct rte_ring {
 	 	 	 	 	 	 	 	 	 	 * about compiler re-ordering */
 };
 
-/* dummy assembly operation to prevent compiler re-ordering of instructions */
-#define COMPILER_BARRIER() do { asm volatile("" ::: "memory"); } while(0)
-
 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
 #define RTE_RING_QUOT_EXCEED (1 << 31)  /**< Quota exceed for burst ops */
@@ -393,7 +390,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
 
 	/* write entries in ring */
 	ENQUEUE_PTRS();
-	COMPILER_BARRIER();
+	rte_compiler_barrier();
 
 	/* if we exceed the watermark */
 	if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -479,7 +476,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
 
 	/* write entries in ring */
 	ENQUEUE_PTRS();
-	COMPILER_BARRIER();
+	rte_compiler_barrier();
 
 	/* if we exceed the watermark */
 	if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -570,7 +567,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
 
 	/* copy in table */
 	DEQUEUE_PTRS();
-	COMPILER_BARRIER();
+	rte_compiler_barrier();
 
 	/*
 	 * If there are other dequeues in progress that preceded us,
@@ -645,7 +642,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
 
 	/* copy in table */
 	DEQUEUE_PTRS();
-	COMPILER_BARRIER();
+	rte_compiler_barrier();
 
 	__RING_STAT_ADD(r, deq_success, n);
 	r->cons.tail = cons_next;
-- 
2.39.5