-/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static inline unsigned int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- unsigned int *free_space)
-{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned int max = n;
- int success;
- uint32_t mask = r->mask;
-
- /* move prod.head atomically */
- do {
- /* Reset n to the initial burst count */
- n = max;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : free_entries;
-
- if (n == 0)
- goto end;
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
- } while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head))
- rte_pause();
-
- r->prod.tail = prod_next;
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
- return n;
-}
+/* Between load and load. there might be cpu reorder in weak model
+ * (powerpc/arm).
+ * There are 2 choices for the users
+ * 1.use rmb() memory barrier
+ * 2.use one-direction load_acquire/store_release barrier,defined by
+ * CONFIG_RTE_USE_C11_MEM_MODEL=y
+ * It depends on performance test results.
+ * By default, move common functions to rte_ring_generic.h
+ */
+#ifdef RTE_USE_C11_MEM_MODEL
+#include "rte_ring_c11_mem.h"
+#else
+#include "rte_ring_generic.h"
+#endif