For legacy modes, rename ring_generic/c11 to ring_generic/c11_pvt.
Furthermore, add new file ring_elem_pvt.h which includes ring_do_eq/deq
and ring element copy/delete APIs.
The update_tail internal helper has been prefixed with the library prefix.
For other modes, rename xx_c11_mem to xx_elem_pvt. Move all private APIs
into these new header files.
Finally, the external APIs and internal APIs will be separated from each
other. This can remind users not to use internal APIs and make ring
library easier to maintain.
Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
headers = files('rte_ring.h',
'rte_ring_core.h',
'rte_ring_elem.h',
- 'rte_ring_c11_mem.h',
- 'rte_ring_generic.h',
+ 'rte_ring_elem_pvt.h',
+ 'rte_ring_c11_pvt.h',
+ 'rte_ring_generic_pvt.h',
'rte_ring_hts.h',
- 'rte_ring_hts_c11_mem.h',
+ 'rte_ring_hts_elem_pvt.h',
'rte_ring_peek.h',
- 'rte_ring_peek_c11_mem.h',
+ 'rte_ring_peek_elem_pvt.h',
'rte_ring_peek_zc.h',
'rte_ring_rts.h',
- 'rte_ring_rts_c11_mem.h')
+ 'rte_ring_rts_elem_pvt.h')
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (c) 2017,2018 HXT-semitech Corporation.
- * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- * Derived from FreeBSD's bufring.h
- * Used as BSD-3 Licensed with permission from Kip Macy.
- */
-
-#ifndef _RTE_RING_C11_MEM_H_
-#define _RTE_RING_C11_MEM_H_
-
-static __rte_always_inline void
-update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
- uint32_t single, uint32_t enqueue)
-{
- RTE_SET_USED(enqueue);
-
- /*
- * If there are other enqueues/dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- if (!single)
- while (unlikely(ht->tail != old_val))
- rte_pause();
-
- __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);
-}
-
-/**
- * @internal This function updates the producer head for enqueue
- *
- * @param r
- * A pointer to the ring structure
- * @param is_sp
- * Indicates whether multi-producer path is needed or not
- * @param n
- * The number of elements we will want to enqueue, i.e. how far should the
- * head be moved
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
- * @param old_head
- * Returns head value as it was before the move, i.e. where enqueue starts
- * @param new_head
- * Returns the current/new head value i.e. where enqueue finishes
- * @param free_entries
- * Returns the amount of free space in the ring BEFORE head was moved
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- uint32_t *old_head, uint32_t *new_head,
- uint32_t *free_entries)
-{
- const uint32_t capacity = r->capacity;
- uint32_t cons_tail;
- unsigned int max = n;
- int success;
-
- *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_RELAXED);
- do {
- /* Reset n to the initial burst count */
- n = max;
-
- /* Ensure the head is read before tail */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
-
- /* load-acquire synchronize with store-release of ht->tail
- * in update_tail.
- */
- cons_tail = __atomic_load_n(&r->cons.tail,
- __ATOMIC_ACQUIRE);
-
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * *old_head > cons_tail). So 'free_entries' is always between 0
- * and capacity (which is < size).
- */
- *free_entries = (capacity + cons_tail - *old_head);
-
- /* check that we have enough room in ring */
- if (unlikely(n > *free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : *free_entries;
-
- if (n == 0)
- return 0;
-
- *new_head = *old_head + n;
- if (is_sp)
- r->prod.head = *new_head, success = 1;
- else
- /* on failure, *old_head is updated */
- success = __atomic_compare_exchange_n(&r->prod.head,
- old_head, *new_head,
- 0, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
- } while (unlikely(success == 0));
- return n;
-}
-
-/**
- * @internal This function updates the consumer head for dequeue
- *
- * @param r
- * A pointer to the ring structure
- * @param is_sc
- * Indicates whether multi-consumer path is needed or not
- * @param n
- * The number of elements we will want to enqueue, i.e. how far should the
- * head be moved
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
- * @param old_head
- * Returns head value as it was before the move, i.e. where dequeue starts
- * @param new_head
- * Returns the current/new head value i.e. where dequeue finishes
- * @param entries
- * Returns the number of entries in the ring BEFORE head was moved
- * @return
- * - Actual number of objects dequeued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- uint32_t *old_head, uint32_t *new_head,
- uint32_t *entries)
-{
- unsigned int max = n;
- uint32_t prod_tail;
- int success;
-
- /* move cons.head atomically */
- *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);
- do {
- /* Restore n as it may change every loop */
- n = max;
-
- /* Ensure the head is read before tail */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
-
- /* this load-acquire synchronize with store-release of ht->tail
- * in update_tail.
- */
- prod_tail = __atomic_load_n(&r->prod.tail,
- __ATOMIC_ACQUIRE);
-
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1.
- */
- *entries = (prod_tail - *old_head);
-
- /* Set the actual entries for dequeue */
- if (n > *entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
-
- if (unlikely(n == 0))
- return 0;
-
- *new_head = *old_head + n;
- if (is_sc)
- r->cons.head = *new_head, success = 1;
- else
- /* on failure, *old_head will be updated */
- success = __atomic_compare_exchange_n(&r->cons.head,
- old_head, *new_head,
- 0, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
- } while (unlikely(success == 0));
- return n;
-}
-
-#endif /* _RTE_RING_C11_MEM_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017,2018 HXT-semitech Corporation.
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_C11_PVT_H_
+#define _RTE_RING_C11_PVT_H_
+
+static __rte_always_inline void
+__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
+ uint32_t new_val, uint32_t single, uint32_t enqueue)
+{
+ RTE_SET_USED(enqueue);
+
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);
+}
+
+/**
+ * @internal This function updates the producer head for enqueue
+ *
+ * @param r
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
+ * @param n
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
+ * @return
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
+{
+ const uint32_t capacity = r->capacity;
+ uint32_t cons_tail;
+ unsigned int max = n;
+ int success;
+
+ *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_RELAXED);
+ do {
+ /* Reset n to the initial burst count */
+ n = max;
+
+ /* Ensure the head is read before tail */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ /* load-acquire synchronize with store-release of ht->tail
+ * in update_tail.
+ */
+ cons_tail = __atomic_load_n(&r->cons.tail,
+ __ATOMIC_ACQUIRE);
+
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * *old_head > cons_tail). So 'free_entries' is always between 0
+ * and capacity (which is < size).
+ */
+ *free_entries = (capacity + cons_tail - *old_head);
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ /* on failure, *old_head is updated */
+ success = __atomic_compare_exchange_n(&r->prod.head,
+ old_head, *new_head,
+ 0, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+ } while (unlikely(success == 0));
+ return n;
+}
+
+/**
+ * @internal This function updates the consumer head for dequeue
+ *
+ * @param r
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
+ * @param n
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
+ * @return
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
+{
+ unsigned int max = n;
+ uint32_t prod_tail;
+ int success;
+
+ /* move cons.head atomically */
+ *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);
+ do {
+ /* Restore n as it may change every loop */
+ n = max;
+
+ /* Ensure the head is read before tail */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ /* this load-acquire synchronize with store-release of ht->tail
+ * in update_tail.
+ */
+ prod_tail = __atomic_load_n(&r->prod.tail,
+ __ATOMIC_ACQUIRE);
+
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1.
+ */
+ *entries = (prod_tail - *old_head);
+
+ /* Set the actual entries for dequeue */
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ /* on failure, *old_head will be updated */
+ success = __atomic_compare_exchange_n(&r->cons.head,
+ old_head, *new_head,
+ 0, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+ } while (unlikely(success == 0));
+ return n;
+}
+
+#endif /* _RTE_RING_C11_PVT_H_ */
#endif
#include <rte_ring_core.h>
+#include <rte_ring_elem_pvt.h>
/**
* Calculate the memory size needed for a ring with given element size
struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
unsigned int count, int socket_id, unsigned int flags);
-static __rte_always_inline void
-__rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
- uint32_t idx, const void *obj_table, uint32_t n)
-{
- unsigned int i;
- uint32_t *ring = (uint32_t *)&r[1];
- const uint32_t *obj = (const uint32_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
- ring[idx] = obj[i];
- ring[idx + 1] = obj[i + 1];
- ring[idx + 2] = obj[i + 2];
- ring[idx + 3] = obj[i + 3];
- ring[idx + 4] = obj[i + 4];
- ring[idx + 5] = obj[i + 5];
- ring[idx + 6] = obj[i + 6];
- ring[idx + 7] = obj[i + 7];
- }
- switch (n & 0x7) {
- case 7:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 6:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 5:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 4:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 3:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 2:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 1:
- ring[idx++] = obj[i++]; /* fallthrough */
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- ring[idx] = obj[i];
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- ring[idx] = obj[i];
- }
-}
-
-static __rte_always_inline void
-__rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
- const void *obj_table, uint32_t n)
-{
- unsigned int i;
- const uint32_t size = r->size;
- uint32_t idx = prod_head & r->mask;
- uint64_t *ring = (uint64_t *)&r[1];
- const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
- ring[idx] = obj[i];
- ring[idx + 1] = obj[i + 1];
- ring[idx + 2] = obj[i + 2];
- ring[idx + 3] = obj[i + 3];
- }
- switch (n & 0x3) {
- case 3:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 2:
- ring[idx++] = obj[i++]; /* fallthrough */
- case 1:
- ring[idx++] = obj[i++];
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- ring[idx] = obj[i];
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- ring[idx] = obj[i];
- }
-}
-
-static __rte_always_inline void
-__rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
- const void *obj_table, uint32_t n)
-{
- unsigned int i;
- const uint32_t size = r->size;
- uint32_t idx = prod_head & r->mask;
- rte_int128_t *ring = (rte_int128_t *)&r[1];
- const rte_int128_t *obj = (const rte_int128_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
- memcpy((void *)(ring + idx),
- (const void *)(obj + i), 32);
- switch (n & 0x1) {
- case 1:
- memcpy((void *)(ring + idx),
- (const void *)(obj + i), 16);
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- memcpy((void *)(ring + idx),
- (const void *)(obj + i), 16);
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- memcpy((void *)(ring + idx),
- (const void *)(obj + i), 16);
- }
-}
-
-/* the actual enqueue of elements on the ring.
- * Placed here since identical code needed in both
- * single and multi producer enqueue functions.
- */
-static __rte_always_inline void
-__rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
- const void *obj_table, uint32_t esize, uint32_t num)
-{
- /* 8B and 16B copies implemented individually to retain
- * the current performance.
- */
- if (esize == 8)
- __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
- else if (esize == 16)
- __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
- else {
- uint32_t idx, scale, nr_idx, nr_num, nr_size;
-
- /* Normalize to uint32_t */
- scale = esize / sizeof(uint32_t);
- nr_num = num * scale;
- idx = prod_head & r->mask;
- nr_idx = idx * scale;
- nr_size = r->size * scale;
- __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
- obj_table, nr_num);
- }
-}
-
-static __rte_always_inline void
-__rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
- uint32_t idx, void *obj_table, uint32_t n)
-{
- unsigned int i;
- uint32_t *ring = (uint32_t *)&r[1];
- uint32_t *obj = (uint32_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
- obj[i] = ring[idx];
- obj[i + 1] = ring[idx + 1];
- obj[i + 2] = ring[idx + 2];
- obj[i + 3] = ring[idx + 3];
- obj[i + 4] = ring[idx + 4];
- obj[i + 5] = ring[idx + 5];
- obj[i + 6] = ring[idx + 6];
- obj[i + 7] = ring[idx + 7];
- }
- switch (n & 0x7) {
- case 7:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 6:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 5:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 4:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 3:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 2:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 1:
- obj[i++] = ring[idx++]; /* fallthrough */
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- obj[i] = ring[idx];
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- obj[i] = ring[idx];
- }
-}
-
-static __rte_always_inline void
-__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
- void *obj_table, uint32_t n)
-{
- unsigned int i;
- const uint32_t size = r->size;
- uint32_t idx = prod_head & r->mask;
- uint64_t *ring = (uint64_t *)&r[1];
- unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
- obj[i] = ring[idx];
- obj[i + 1] = ring[idx + 1];
- obj[i + 2] = ring[idx + 2];
- obj[i + 3] = ring[idx + 3];
- }
- switch (n & 0x3) {
- case 3:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 2:
- obj[i++] = ring[idx++]; /* fallthrough */
- case 1:
- obj[i++] = ring[idx++]; /* fallthrough */
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- obj[i] = ring[idx];
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- obj[i] = ring[idx];
- }
-}
-
-static __rte_always_inline void
-__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
- void *obj_table, uint32_t n)
-{
- unsigned int i;
- const uint32_t size = r->size;
- uint32_t idx = prod_head & r->mask;
- rte_int128_t *ring = (rte_int128_t *)&r[1];
- rte_int128_t *obj = (rte_int128_t *)obj_table;
- if (likely(idx + n < size)) {
- for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
- memcpy((void *)(obj + i), (void *)(ring + idx), 32);
- switch (n & 0x1) {
- case 1:
- memcpy((void *)(obj + i), (void *)(ring + idx), 16);
- }
- } else {
- for (i = 0; idx < size; i++, idx++)
- memcpy((void *)(obj + i), (void *)(ring + idx), 16);
- /* Start at the beginning */
- for (idx = 0; i < n; i++, idx++)
- memcpy((void *)(obj + i), (void *)(ring + idx), 16);
- }
-}
-
-/* the actual dequeue of elements from the ring.
- * Placed here since identical code needed in both
- * single and multi producer enqueue functions.
- */
-static __rte_always_inline void
-__rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
- void *obj_table, uint32_t esize, uint32_t num)
-{
- /* 8B and 16B copies implemented individually to retain
- * the current performance.
- */
- if (esize == 8)
- __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
- else if (esize == 16)
- __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
- else {
- uint32_t idx, scale, nr_idx, nr_num, nr_size;
-
- /* Normalize to uint32_t */
- scale = esize / sizeof(uint32_t);
- nr_num = num * scale;
- idx = cons_head & r->mask;
- nr_idx = idx * scale;
- nr_size = r->size * scale;
- __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
- obj_table, nr_num);
- }
-}
-
-/* Between load and load. there might be cpu reorder in weak model
- * (powerpc/arm).
- * There are 2 choices for the users
- * 1.use rmb() memory barrier
- * 2.use one-direction load_acquire/store_release barrier
- * It depends on performance test results.
- * By default, move common functions to rte_ring_generic.h
- */
-#ifdef RTE_USE_C11_MEM_MODEL
-#include "rte_ring_c11_mem.h"
-#else
-#include "rte_ring_generic.h"
-#endif
-
-/**
- * @internal Enqueue several objects on the ring
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
- * @param is_sp
- * Indicates whether to use single producer or multi-producer head update
- * @param free_space
- * returns the amount of space after the enqueue operation has finished
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
- unsigned int esize, unsigned int n,
- enum rte_ring_queue_behavior behavior, unsigned int is_sp,
- unsigned int *free_space)
-{
- uint32_t prod_head, prod_next;
- uint32_t free_entries;
-
- n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
- &prod_head, &prod_next, &free_entries);
- if (n == 0)
- goto end;
-
- __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
-
- update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
-end:
- if (free_space != NULL)
- *free_space = free_entries - n;
- return n;
-}
-
-/**
- * @internal Dequeue several objects from the ring
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to pull from the ring.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
- * @param is_sc
- * Indicates whether to use single consumer or multi-consumer head update
- * @param available
- * returns the number of remaining ring entries after the dequeue has finished
- * @return
- * - Actual number of objects dequeued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
- unsigned int esize, unsigned int n,
- enum rte_ring_queue_behavior behavior, unsigned int is_sc,
- unsigned int *available)
-{
- uint32_t cons_head, cons_next;
- uint32_t entries;
-
- n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
- &cons_head, &cons_next, &entries);
- if (n == 0)
- goto end;
-
- __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
-
- update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
-
-end:
- if (available != NULL)
- *available = entries - n;
- return n;
-}
-
/**
* Enqueue several objects on the ring (multi-producers safe).
*
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017,2018 HXT-semitech Corporation.
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_ELEM_PVT_H_
+#define _RTE_RING_ELEM_PVT_H_
+
+static __rte_always_inline void
+__rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
+ uint32_t idx, const void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ uint32_t *ring = (uint32_t *)&r[1];
+ const uint32_t *obj = (const uint32_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
+ ring[idx] = obj[i];
+ ring[idx + 1] = obj[i + 1];
+ ring[idx + 2] = obj[i + 2];
+ ring[idx + 3] = obj[i + 3];
+ ring[idx + 4] = obj[i + 4];
+ ring[idx + 5] = obj[i + 5];
+ ring[idx + 6] = obj[i + 6];
+ ring[idx + 7] = obj[i + 7];
+ }
+ switch (n & 0x7) {
+ case 7:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 6:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 5:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 4:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 3:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 2:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 1:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ ring[idx] = obj[i];
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ ring[idx] = obj[i];
+ }
+}
+
+static __rte_always_inline void
+__rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
+ const void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ const uint32_t size = r->size;
+ uint32_t idx = prod_head & r->mask;
+ uint64_t *ring = (uint64_t *)&r[1];
+ const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
+ ring[idx] = obj[i];
+ ring[idx + 1] = obj[i + 1];
+ ring[idx + 2] = obj[i + 2];
+ ring[idx + 3] = obj[i + 3];
+ }
+ switch (n & 0x3) {
+ case 3:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 2:
+ ring[idx++] = obj[i++]; /* fallthrough */
+ case 1:
+ ring[idx++] = obj[i++];
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ ring[idx] = obj[i];
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ ring[idx] = obj[i];
+ }
+}
+
+static __rte_always_inline void
+__rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
+ const void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ const uint32_t size = r->size;
+ uint32_t idx = prod_head & r->mask;
+ rte_int128_t *ring = (rte_int128_t *)&r[1];
+ const rte_int128_t *obj = (const rte_int128_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
+ memcpy((void *)(ring + idx),
+ (const void *)(obj + i), 32);
+ switch (n & 0x1) {
+ case 1:
+ memcpy((void *)(ring + idx),
+ (const void *)(obj + i), 16);
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ memcpy((void *)(ring + idx),
+ (const void *)(obj + i), 16);
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ memcpy((void *)(ring + idx),
+ (const void *)(obj + i), 16);
+ }
+}
+
+/* the actual enqueue of elements on the ring.
+ * Placed here since identical code needed in both
+ * single and multi producer enqueue functions.
+ */
+static __rte_always_inline void
+__rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
+ const void *obj_table, uint32_t esize, uint32_t num)
+{
+ /* 8B and 16B copies implemented individually to retain
+ * the current performance.
+ */
+ if (esize == 8)
+ __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
+ else if (esize == 16)
+ __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
+ else {
+ uint32_t idx, scale, nr_idx, nr_num, nr_size;
+
+ /* Normalize to uint32_t */
+ scale = esize / sizeof(uint32_t);
+ nr_num = num * scale;
+ idx = prod_head & r->mask;
+ nr_idx = idx * scale;
+ nr_size = r->size * scale;
+ __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
+ obj_table, nr_num);
+ }
+}
+
+static __rte_always_inline void
+__rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
+ uint32_t idx, void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ uint32_t *ring = (uint32_t *)&r[1];
+ uint32_t *obj = (uint32_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
+ obj[i] = ring[idx];
+ obj[i + 1] = ring[idx + 1];
+ obj[i + 2] = ring[idx + 2];
+ obj[i + 3] = ring[idx + 3];
+ obj[i + 4] = ring[idx + 4];
+ obj[i + 5] = ring[idx + 5];
+ obj[i + 6] = ring[idx + 6];
+ obj[i + 7] = ring[idx + 7];
+ }
+ switch (n & 0x7) {
+ case 7:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 6:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 5:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 4:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 3:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 2:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 1:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ obj[i] = ring[idx];
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ obj[i] = ring[idx];
+ }
+}
+
+static __rte_always_inline void
+__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
+ void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ const uint32_t size = r->size;
+ uint32_t idx = prod_head & r->mask;
+ uint64_t *ring = (uint64_t *)&r[1];
+ unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
+ obj[i] = ring[idx];
+ obj[i + 1] = ring[idx + 1];
+ obj[i + 2] = ring[idx + 2];
+ obj[i + 3] = ring[idx + 3];
+ }
+ switch (n & 0x3) {
+ case 3:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 2:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ case 1:
+ obj[i++] = ring[idx++]; /* fallthrough */
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ obj[i] = ring[idx];
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ obj[i] = ring[idx];
+ }
+}
+
+static __rte_always_inline void
+__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
+ void *obj_table, uint32_t n)
+{
+ unsigned int i;
+ const uint32_t size = r->size;
+ uint32_t idx = prod_head & r->mask;
+ rte_int128_t *ring = (rte_int128_t *)&r[1];
+ rte_int128_t *obj = (rte_int128_t *)obj_table;
+ if (likely(idx + n < size)) {
+ for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
+ memcpy((void *)(obj + i), (void *)(ring + idx), 32);
+ switch (n & 0x1) {
+ case 1:
+ memcpy((void *)(obj + i), (void *)(ring + idx), 16);
+ }
+ } else {
+ for (i = 0; idx < size; i++, idx++)
+ memcpy((void *)(obj + i), (void *)(ring + idx), 16);
+ /* Start at the beginning */
+ for (idx = 0; i < n; i++, idx++)
+ memcpy((void *)(obj + i), (void *)(ring + idx), 16);
+ }
+}
+
+/* the actual dequeue of elements from the ring.
+ * Placed here since identical code needed in both
+ * single and multi producer enqueue functions.
+ */
+static __rte_always_inline void
+__rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
+ void *obj_table, uint32_t esize, uint32_t num)
+{
+ /* 8B and 16B copies implemented individually to retain
+ * the current performance.
+ */
+ if (esize == 8)
+ __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
+ else if (esize == 16)
+ __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
+ else {
+ uint32_t idx, scale, nr_idx, nr_num, nr_size;
+
+ /* Normalize to uint32_t */
+ scale = esize / sizeof(uint32_t);
+ nr_num = num * scale;
+ idx = cons_head & r->mask;
+ nr_idx = idx * scale;
+ nr_size = r->size * scale;
+ __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
+ obj_table, nr_num);
+ }
+}
+
+/* Between load and load. there might be cpu reorder in weak model
+ * (powerpc/arm).
+ * There are 2 choices for the users
+ * 1.use rmb() memory barrier
+ * 2.use one-direction load_acquire/store_release barrier
+ * It depends on performance test results.
+ */
+#ifdef RTE_USE_C11_MEM_MODEL
+#include "rte_ring_c11_pvt.h"
+#else
+#include "rte_ring_generic_pvt.h"
+#endif
+
+/**
+ * @internal Enqueue several objects on the ring
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
+ * @return
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
+ unsigned int esize, unsigned int n,
+ enum rte_ring_queue_behavior behavior, unsigned int is_sp,
+ unsigned int *free_space)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
+
+ __rte_ring_update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
+}
+
+/**
+ * @internal Dequeue several objects from the ring
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to pull from the ring.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
+ unsigned int esize, unsigned int n,
+ enum rte_ring_queue_behavior behavior, unsigned int is_sc,
+ unsigned int *available)
+{
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
+
+ __rte_ring_update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+#endif /* _RTE_RING_ELEM_PVT_H_ */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (c) 2010-2017 Intel Corporation
- * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- * Derived from FreeBSD's bufring.h
- * Used as BSD-3 Licensed with permission from Kip Macy.
- */
-
-#ifndef _RTE_RING_GENERIC_H_
-#define _RTE_RING_GENERIC_H_
-
-static __rte_always_inline void
-update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
- uint32_t single, uint32_t enqueue)
-{
- if (enqueue)
- rte_smp_wmb();
- else
- rte_smp_rmb();
- /*
- * If there are other enqueues/dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- if (!single)
- while (unlikely(ht->tail != old_val))
- rte_pause();
-
- ht->tail = new_val;
-}
-
-/**
- * @internal This function updates the producer head for enqueue
- *
- * @param r
- * A pointer to the ring structure
- * @param is_sp
- * Indicates whether multi-producer path is needed or not
- * @param n
- * The number of elements we will want to enqueue, i.e. how far should the
- * head be moved
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
- * @param old_head
- * Returns head value as it was before the move, i.e. where enqueue starts
- * @param new_head
- * Returns the current/new head value i.e. where enqueue finishes
- * @param free_entries
- * Returns the amount of free space in the ring BEFORE head was moved
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- uint32_t *old_head, uint32_t *new_head,
- uint32_t *free_entries)
-{
- const uint32_t capacity = r->capacity;
- unsigned int max = n;
- int success;
-
- do {
- /* Reset n to the initial burst count */
- n = max;
-
- *old_head = r->prod.head;
-
- /* add rmb barrier to avoid load/load reorder in weak
- * memory model. It is noop on x86
- */
- rte_smp_rmb();
-
- /*
- * The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * *old_head > cons_tail). So 'free_entries' is always between 0
- * and capacity (which is < size).
- */
- *free_entries = (capacity + r->cons.tail - *old_head);
-
- /* check that we have enough room in ring */
- if (unlikely(n > *free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : *free_entries;
-
- if (n == 0)
- return 0;
-
- *new_head = *old_head + n;
- if (is_sp)
- r->prod.head = *new_head, success = 1;
- else
- success = rte_atomic32_cmpset(&r->prod.head,
- *old_head, *new_head);
- } while (unlikely(success == 0));
- return n;
-}
-
-/**
- * @internal This function updates the consumer head for dequeue
- *
- * @param r
- * A pointer to the ring structure
- * @param is_sc
- * Indicates whether multi-consumer path is needed or not
- * @param n
- * The number of elements we will want to enqueue, i.e. how far should the
- * head be moved
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
- * @param old_head
- * Returns head value as it was before the move, i.e. where dequeue starts
- * @param new_head
- * Returns the current/new head value i.e. where dequeue finishes
- * @param entries
- * Returns the number of entries in the ring BEFORE head was moved
- * @return
- * - Actual number of objects dequeued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
- unsigned int n, enum rte_ring_queue_behavior behavior,
- uint32_t *old_head, uint32_t *new_head,
- uint32_t *entries)
-{
- unsigned int max = n;
- int success;
-
- /* move cons.head atomically */
- do {
- /* Restore n as it may change every loop */
- n = max;
-
- *old_head = r->cons.head;
-
- /* add rmb barrier to avoid load/load reorder in weak
- * memory model. It is noop on x86
- */
- rte_smp_rmb();
-
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1.
- */
- *entries = (r->prod.tail - *old_head);
-
- /* Set the actual entries for dequeue */
- if (n > *entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
-
- if (unlikely(n == 0))
- return 0;
-
- *new_head = *old_head + n;
- if (is_sc) {
- r->cons.head = *new_head;
- rte_smp_rmb();
- success = 1;
- } else {
- success = rte_atomic32_cmpset(&r->cons.head, *old_head,
- *new_head);
- }
- } while (unlikely(success == 0));
- return n;
-}
-
-#endif /* _RTE_RING_GENERIC_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2010-2017 Intel Corporation
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_GENERIC_PVT_H_
+#define _RTE_RING_GENERIC_PVT_H_
+
+static __rte_always_inline void
+__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
+ uint32_t new_val, uint32_t single, uint32_t enqueue)
+{
+ if (enqueue)
+ rte_smp_wmb();
+ else
+ rte_smp_rmb();
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
+/**
+ * @internal This function updates the producer head for enqueue
+ *
+ * @param r
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
+ * @param n
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
+ * @return
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
+{
+ const uint32_t capacity = r->capacity;
+ unsigned int max = n;
+ int success;
+
+ do {
+ /* Reset n to the initial burst count */
+ n = max;
+
+ *old_head = r->prod.head;
+
+ /* add rmb barrier to avoid load/load reorder in weak
+ * memory model. It is noop on x86
+ */
+ rte_smp_rmb();
+
+ /*
+ * The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * *old_head > cons_tail). So 'free_entries' is always between 0
+ * and capacity (which is < size).
+ */
+ *free_entries = (capacity + r->cons.tail - *old_head);
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
+ } while (unlikely(success == 0));
+ return n;
+}
+
+/**
+ * @internal This function updates the consumer head for dequeue
+ *
+ * @param r
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
+ * @param n
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
+ * @return
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
+{
+ unsigned int max = n;
+ int success;
+
+ /* move cons.head atomically */
+ do {
+ /* Restore n as it may change every loop */
+ n = max;
+
+ *old_head = r->cons.head;
+
+ /* add rmb barrier to avoid load/load reorder in weak
+ * memory model. It is noop on x86
+ */
+ rte_smp_rmb();
+
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1.
+ */
+ *entries = (r->prod.tail - *old_head);
+
+ /* Set the actual entries for dequeue */
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc) {
+ r->cons.head = *new_head;
+ rte_smp_rmb();
+ success = 1;
+ } else {
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
+ }
+ } while (unlikely(success == 0));
+ return n;
+}
+
+#endif /* _RTE_RING_GENERIC_PVT_H_ */
extern "C" {
#endif
-#include <rte_ring_hts_c11_mem.h>
-
-/**
- * @internal Enqueue several objects on the HTS ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
- * @param free_space
- * returns the amount of space after the enqueue operation has finished
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_hts_enqueue_elem(struct rte_ring *r, const void *obj_table,
- uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
- uint32_t *free_space)
-{
- uint32_t free, head;
-
- n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
-
- if (n != 0) {
- __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
- __rte_ring_hts_update_tail(&r->hts_prod, head, n, 1);
- }
-
- if (free_space != NULL)
- *free_space = free - n;
- return n;
-}
-
-/**
- * @internal Dequeue several objects from the HTS ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to pull from the ring.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
- * @param available
- * returns the number of remaining ring entries after the dequeue has finished
- * @return
- * - Actual number of objects dequeued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_hts_dequeue_elem(struct rte_ring *r, void *obj_table,
- uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
- uint32_t *available)
-{
- uint32_t entries, head;
-
- n = __rte_ring_hts_move_cons_head(r, n, behavior, &head, &entries);
-
- if (n != 0) {
- __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
- __rte_ring_hts_update_tail(&r->hts_cons, head, n, 0);
- }
-
- if (available != NULL)
- *available = entries - n;
- return n;
-}
+#include <rte_ring_hts_elem_pvt.h>
/**
* Enqueue several objects on the HTS ring (multi-producers safe).
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (c) 2010-2020 Intel Corporation
- * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- * Derived from FreeBSD's bufring.h
- * Used as BSD-3 Licensed with permission from Kip Macy.
- */
-
-#ifndef _RTE_RING_HTS_C11_MEM_H_
-#define _RTE_RING_HTS_C11_MEM_H_
-
-/**
- * @file rte_ring_hts_c11_mem.h
- * It is not recommended to include this file directly,
- * include <rte_ring.h> instead.
- * Contains internal helper functions for head/tail sync (HTS) ring mode.
- * For more information please refer to <rte_ring_hts.h>.
- */
-
-/**
- * @internal update tail with new value.
- */
-static __rte_always_inline void
-__rte_ring_hts_update_tail(struct rte_ring_hts_headtail *ht, uint32_t old_tail,
- uint32_t num, uint32_t enqueue)
-{
- uint32_t tail;
-
- RTE_SET_USED(enqueue);
-
- tail = old_tail + num;
- __atomic_store_n(&ht->ht.pos.tail, tail, __ATOMIC_RELEASE);
-}
-
-/**
- * @internal waits till tail will become equal to head.
- * Means no writer/reader is active for that ring.
- * Suppose to work as serialization point.
- */
-static __rte_always_inline void
-__rte_ring_hts_head_wait(const struct rte_ring_hts_headtail *ht,
- union __rte_ring_hts_pos *p)
-{
- while (p->pos.head != p->pos.tail) {
- rte_pause();
- p->raw = __atomic_load_n(&ht->ht.raw, __ATOMIC_ACQUIRE);
- }
-}
-
-/**
- * @internal This function updates the producer head for enqueue
- */
-static __rte_always_inline unsigned int
-__rte_ring_hts_move_prod_head(struct rte_ring *r, unsigned int num,
- enum rte_ring_queue_behavior behavior, uint32_t *old_head,
- uint32_t *free_entries)
-{
- uint32_t n;
- union __rte_ring_hts_pos np, op;
-
- const uint32_t capacity = r->capacity;
-
- op.raw = __atomic_load_n(&r->hts_prod.ht.raw, __ATOMIC_ACQUIRE);
-
- do {
- /* Reset n to the initial burst count */
- n = num;
-
- /*
- * wait for tail to be equal to head,
- * make sure that we read prod head/tail *before*
- * reading cons tail.
- */
- __rte_ring_hts_head_wait(&r->hts_prod, &op);
-
- /*
- * The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * *old_head > cons_tail). So 'free_entries' is always between 0
- * and capacity (which is < size).
- */
- *free_entries = capacity + r->cons.tail - op.pos.head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > *free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : *free_entries;
-
- if (n == 0)
- break;
-
- np.pos.tail = op.pos.tail;
- np.pos.head = op.pos.head + n;
-
- /*
- * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
- * - OOO reads of cons tail value
- * - OOO copy of elems from the ring
- */
- } while (__atomic_compare_exchange_n(&r->hts_prod.ht.raw,
- &op.raw, np.raw,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
-
- *old_head = op.pos.head;
- return n;
-}
-
-/**
- * @internal This function updates the consumer head for dequeue
- */
-static __rte_always_inline unsigned int
-__rte_ring_hts_move_cons_head(struct rte_ring *r, unsigned int num,
- enum rte_ring_queue_behavior behavior, uint32_t *old_head,
- uint32_t *entries)
-{
- uint32_t n;
- union __rte_ring_hts_pos np, op;
-
- op.raw = __atomic_load_n(&r->hts_cons.ht.raw, __ATOMIC_ACQUIRE);
-
- /* move cons.head atomically */
- do {
- /* Restore n as it may change every loop */
- n = num;
-
- /*
- * wait for tail to be equal to head,
- * make sure that we read cons head/tail *before*
- * reading prod tail.
- */
- __rte_ring_hts_head_wait(&r->hts_cons, &op);
-
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1.
- */
- *entries = r->prod.tail - op.pos.head;
-
- /* Set the actual entries for dequeue */
- if (n > *entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
-
- if (unlikely(n == 0))
- break;
-
- np.pos.tail = op.pos.tail;
- np.pos.head = op.pos.head + n;
-
- /*
- * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
- * - OOO reads of prod tail value
- * - OOO copy of elems from the ring
- */
- } while (__atomic_compare_exchange_n(&r->hts_cons.ht.raw,
- &op.raw, np.raw,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
-
- *old_head = op.pos.head;
- return n;
-}
-
-#endif /* _RTE_RING_HTS_C11_MEM_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2010-2020 Intel Corporation
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_HTS_ELEM_PVT_H_
+#define _RTE_RING_HTS_ELEM_PVT_H_
+
+/**
+ * @file rte_ring_hts_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for head/tail sync (HTS) ring mode.
+ * For more information please refer to <rte_ring_hts.h>.
+ */
+
+/**
+ * @internal update tail with new value.
+ */
+static __rte_always_inline void
+__rte_ring_hts_update_tail(struct rte_ring_hts_headtail *ht, uint32_t old_tail,
+ uint32_t num, uint32_t enqueue)
+{
+ uint32_t tail;
+
+ RTE_SET_USED(enqueue);
+
+ tail = old_tail + num;
+ __atomic_store_n(&ht->ht.pos.tail, tail, __ATOMIC_RELEASE);
+}
+
+/**
+ * @internal waits till tail will become equal to head.
+ * Means no writer/reader is active for that ring.
+ * Suppose to work as serialization point.
+ */
+static __rte_always_inline void
+__rte_ring_hts_head_wait(const struct rte_ring_hts_headtail *ht,
+ union __rte_ring_hts_pos *p)
+{
+ while (p->pos.head != p->pos.tail) {
+ rte_pause();
+ p->raw = __atomic_load_n(&ht->ht.raw, __ATOMIC_ACQUIRE);
+ }
+}
+
+/**
+ * @internal This function updates the producer head for enqueue
+ */
+static __rte_always_inline unsigned int
+__rte_ring_hts_move_prod_head(struct rte_ring *r, unsigned int num,
+ enum rte_ring_queue_behavior behavior, uint32_t *old_head,
+ uint32_t *free_entries)
+{
+ uint32_t n;
+ union __rte_ring_hts_pos np, op;
+
+ const uint32_t capacity = r->capacity;
+
+ op.raw = __atomic_load_n(&r->hts_prod.ht.raw, __ATOMIC_ACQUIRE);
+
+ do {
+ /* Reset n to the initial burst count */
+ n = num;
+
+ /*
+ * wait for tail to be equal to head,
+ * make sure that we read prod head/tail *before*
+ * reading cons tail.
+ */
+ __rte_ring_hts_head_wait(&r->hts_prod, &op);
+
+ /*
+ * The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * *old_head > cons_tail). So 'free_entries' is always between 0
+ * and capacity (which is < size).
+ */
+ *free_entries = capacity + r->cons.tail - op.pos.head;
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ break;
+
+ np.pos.tail = op.pos.tail;
+ np.pos.head = op.pos.head + n;
+
+ /*
+ * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
+ * - OOO reads of cons tail value
+ * - OOO copy of elems from the ring
+ */
+ } while (__atomic_compare_exchange_n(&r->hts_prod.ht.raw,
+ &op.raw, np.raw,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
+
+ *old_head = op.pos.head;
+ return n;
+}
+
+/**
+ * @internal This function updates the consumer head for dequeue
+ */
+static __rte_always_inline unsigned int
+__rte_ring_hts_move_cons_head(struct rte_ring *r, unsigned int num,
+ enum rte_ring_queue_behavior behavior, uint32_t *old_head,
+ uint32_t *entries)
+{
+ uint32_t n;
+ union __rte_ring_hts_pos np, op;
+
+ op.raw = __atomic_load_n(&r->hts_cons.ht.raw, __ATOMIC_ACQUIRE);
+
+ /* move cons.head atomically */
+ do {
+ /* Restore n as it may change every loop */
+ n = num;
+
+ /*
+ * wait for tail to be equal to head,
+ * make sure that we read cons head/tail *before*
+ * reading prod tail.
+ */
+ __rte_ring_hts_head_wait(&r->hts_cons, &op);
+
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1.
+ */
+ *entries = r->prod.tail - op.pos.head;
+
+ /* Set the actual entries for dequeue */
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ break;
+
+ np.pos.tail = op.pos.tail;
+ np.pos.head = op.pos.head + n;
+
+ /*
+ * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
+ * - OOO reads of prod tail value
+ * - OOO copy of elems from the ring
+ */
+ } while (__atomic_compare_exchange_n(&r->hts_cons.ht.raw,
+ &op.raw, np.raw,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
+
+ *old_head = op.pos.head;
+ return n;
+}
+
+/**
+ * @internal Enqueue several objects on the HTS ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
+ * @return
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_hts_enqueue_elem(struct rte_ring *r, const void *obj_table,
+ uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+ uint32_t *free_space)
+{
+ uint32_t free, head;
+
+ n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
+
+ if (n != 0) {
+ __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
+ __rte_ring_hts_update_tail(&r->hts_prod, head, n, 1);
+ }
+
+ if (free_space != NULL)
+ *free_space = free - n;
+ return n;
+}
+
+/**
+ * @internal Dequeue several objects from the HTS ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to pull from the ring.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_hts_dequeue_elem(struct rte_ring *r, void *obj_table,
+ uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+ uint32_t *available)
+{
+ uint32_t entries, head;
+
+ n = __rte_ring_hts_move_cons_head(r, n, behavior, &head, &entries);
+
+ if (n != 0) {
+ __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+ __rte_ring_hts_update_tail(&r->hts_cons, head, n, 0);
+ }
+
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+#endif /* _RTE_RING_HTS_ELEM_PVT_H_ */
extern "C" {
#endif
-#include <rte_ring_peek_c11_mem.h>
-
-/**
- * @internal This function moves prod head value.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_enqueue_start(struct rte_ring *r, uint32_t n,
- enum rte_ring_queue_behavior behavior, uint32_t *free_space)
-{
- uint32_t free, head, next;
-
- switch (r->prod.sync_type) {
- case RTE_RING_SYNC_ST:
- n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
- behavior, &head, &next, &free);
- break;
- case RTE_RING_SYNC_MT_HTS:
- n = __rte_ring_hts_move_prod_head(r, n, behavior,
- &head, &free);
- break;
- case RTE_RING_SYNC_MT:
- case RTE_RING_SYNC_MT_RTS:
- default:
- /* unsupported mode, shouldn't be here */
- RTE_ASSERT(0);
- n = 0;
- free = 0;
- }
-
- if (free_space != NULL)
- *free_space = free - n;
- return n;
-}
+#include <rte_ring_peek_elem_pvt.h>
/**
* Start to enqueue several objects on the ring.
rte_ring_enqueue_elem_finish(r, obj_table, sizeof(uintptr_t), n);
}
-/**
- * @internal This function moves cons head value and copies up to *n*
- * objects from the ring to the user provided obj_table.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_dequeue_start(struct rte_ring *r, void *obj_table,
- uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
- uint32_t *available)
-{
- uint32_t avail, head, next;
-
- switch (r->cons.sync_type) {
- case RTE_RING_SYNC_ST:
- n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
- behavior, &head, &next, &avail);
- break;
- case RTE_RING_SYNC_MT_HTS:
- n = __rte_ring_hts_move_cons_head(r, n, behavior,
- &head, &avail);
- break;
- case RTE_RING_SYNC_MT:
- case RTE_RING_SYNC_MT_RTS:
- default:
- /* unsupported mode, shouldn't be here */
- RTE_ASSERT(0);
- n = 0;
- avail = 0;
- }
-
- if (n != 0)
- __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
-
- if (available != NULL)
- *available = avail - n;
- return n;
-}
-
/**
* Start to dequeue several objects from the ring.
* Note that user has to call appropriate dequeue_finish()
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (c) 2010-2020 Intel Corporation
- * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- * Derived from FreeBSD's bufring.h
- * Used as BSD-3 Licensed with permission from Kip Macy.
- */
-
-#ifndef _RTE_RING_PEEK_C11_MEM_H_
-#define _RTE_RING_PEEK_C11_MEM_H_
-
-/**
- * @file rte_ring_peek_c11_mem.h
- * It is not recommended to include this file directly,
- * include <rte_ring.h> instead.
- * Contains internal helper functions for rte_ring peek API.
- * For more information please refer to <rte_ring_peek.h>.
- */
-
-/**
- * @internal get current tail value.
- * This function should be used only for single thread producer/consumer.
- * Check that user didn't request to move tail above the head.
- * In that situation:
- * - return zero, that will cause abort any pending changes and
- * return head to its previous position.
- * - throw an assert in debug mode.
- */
-static __rte_always_inline uint32_t
-__rte_ring_st_get_tail(struct rte_ring_headtail *ht, uint32_t *tail,
- uint32_t num)
-{
- uint32_t h, n, t;
-
- h = ht->head;
- t = ht->tail;
- n = h - t;
-
- RTE_ASSERT(n >= num);
- num = (n >= num) ? num : 0;
-
- *tail = t;
- return num;
-}
-
-/**
- * @internal set new values for head and tail.
- * This function should be used only for single thread producer/consumer.
- * Should be used only in conjunction with __rte_ring_st_get_tail.
- */
-static __rte_always_inline void
-__rte_ring_st_set_head_tail(struct rte_ring_headtail *ht, uint32_t tail,
- uint32_t num, uint32_t enqueue)
-{
- uint32_t pos;
-
- RTE_SET_USED(enqueue);
-
- pos = tail + num;
- ht->head = pos;
- __atomic_store_n(&ht->tail, pos, __ATOMIC_RELEASE);
-}
-
-/**
- * @internal get current tail value.
- * This function should be used only for producer/consumer in MT_HTS mode.
- * Check that user didn't request to move tail above the head.
- * In that situation:
- * - return zero, that will cause abort any pending changes and
- * return head to its previous position.
- * - throw an assert in debug mode.
- */
-static __rte_always_inline uint32_t
-__rte_ring_hts_get_tail(struct rte_ring_hts_headtail *ht, uint32_t *tail,
- uint32_t num)
-{
- uint32_t n;
- union __rte_ring_hts_pos p;
-
- p.raw = __atomic_load_n(&ht->ht.raw, __ATOMIC_RELAXED);
- n = p.pos.head - p.pos.tail;
-
- RTE_ASSERT(n >= num);
- num = (n >= num) ? num : 0;
-
- *tail = p.pos.tail;
- return num;
-}
-
-/**
- * @internal set new values for head and tail as one atomic 64 bit operation.
- * This function should be used only for producer/consumer in MT_HTS mode.
- * Should be used only in conjunction with __rte_ring_hts_get_tail.
- */
-static __rte_always_inline void
-__rte_ring_hts_set_head_tail(struct rte_ring_hts_headtail *ht, uint32_t tail,
- uint32_t num, uint32_t enqueue)
-{
- union __rte_ring_hts_pos p;
-
- RTE_SET_USED(enqueue);
-
- p.pos.head = tail + num;
- p.pos.tail = p.pos.head;
-
- __atomic_store_n(&ht->ht.raw, p.raw, __ATOMIC_RELEASE);
-}
-
-#endif /* _RTE_RING_PEEK_C11_MEM_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2010-2020 Intel Corporation
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_PEEK_ELEM_PVT_H_
+#define _RTE_RING_PEEK_ELEM_PVT_H_
+
+/**
+ * @file rte_ring_peek_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for rte_ring peek API.
+ * For more information please refer to <rte_ring_peek.h>.
+ */
+
+/**
+ * @internal get current tail value.
+ * This function should be used only for single thread producer/consumer.
+ * Check that user didn't request to move tail above the head.
+ * In that situation:
+ * - return zero, that will cause abort any pending changes and
+ * return head to its previous position.
+ * - throw an assert in debug mode.
+ */
+static __rte_always_inline uint32_t
+__rte_ring_st_get_tail(struct rte_ring_headtail *ht, uint32_t *tail,
+ uint32_t num)
+{
+ uint32_t h, n, t;
+
+ h = ht->head;
+ t = ht->tail;
+ n = h - t;
+
+ RTE_ASSERT(n >= num);
+ num = (n >= num) ? num : 0;
+
+ *tail = t;
+ return num;
+}
+
+/**
+ * @internal set new values for head and tail.
+ * This function should be used only for single thread producer/consumer.
+ * Should be used only in conjunction with __rte_ring_st_get_tail.
+ */
+static __rte_always_inline void
+__rte_ring_st_set_head_tail(struct rte_ring_headtail *ht, uint32_t tail,
+ uint32_t num, uint32_t enqueue)
+{
+ uint32_t pos;
+
+ RTE_SET_USED(enqueue);
+
+ pos = tail + num;
+ ht->head = pos;
+ __atomic_store_n(&ht->tail, pos, __ATOMIC_RELEASE);
+}
+
+/**
+ * @internal get current tail value.
+ * This function should be used only for producer/consumer in MT_HTS mode.
+ * Check that user didn't request to move tail above the head.
+ * In that situation:
+ * - return zero, that will cause abort any pending changes and
+ * return head to its previous position.
+ * - throw an assert in debug mode.
+ */
+static __rte_always_inline uint32_t
+__rte_ring_hts_get_tail(struct rte_ring_hts_headtail *ht, uint32_t *tail,
+ uint32_t num)
+{
+ uint32_t n;
+ union __rte_ring_hts_pos p;
+
+ p.raw = __atomic_load_n(&ht->ht.raw, __ATOMIC_RELAXED);
+ n = p.pos.head - p.pos.tail;
+
+ RTE_ASSERT(n >= num);
+ num = (n >= num) ? num : 0;
+
+ *tail = p.pos.tail;
+ return num;
+}
+
+/**
+ * @internal set new values for head and tail as one atomic 64 bit operation.
+ * This function should be used only for producer/consumer in MT_HTS mode.
+ * Should be used only in conjunction with __rte_ring_hts_get_tail.
+ */
+static __rte_always_inline void
+__rte_ring_hts_set_head_tail(struct rte_ring_hts_headtail *ht, uint32_t tail,
+ uint32_t num, uint32_t enqueue)
+{
+ union __rte_ring_hts_pos p;
+
+ RTE_SET_USED(enqueue);
+
+ p.pos.head = tail + num;
+ p.pos.tail = p.pos.head;
+
+ __atomic_store_n(&ht->ht.raw, p.raw, __ATOMIC_RELEASE);
+}
+
+/**
+ * @internal This function moves prod head value.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_enqueue_start(struct rte_ring *r, uint32_t n,
+ enum rte_ring_queue_behavior behavior, uint32_t *free_space)
+{
+ uint32_t free, head, next;
+
+ switch (r->prod.sync_type) {
+ case RTE_RING_SYNC_ST:
+ n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
+ behavior, &head, &next, &free);
+ break;
+ case RTE_RING_SYNC_MT_HTS:
+ n = __rte_ring_hts_move_prod_head(r, n, behavior,
+ &head, &free);
+ break;
+ case RTE_RING_SYNC_MT:
+ case RTE_RING_SYNC_MT_RTS:
+ default:
+ /* unsupported mode, shouldn't be here */
+ RTE_ASSERT(0);
+ n = 0;
+ free = 0;
+ }
+
+ if (free_space != NULL)
+ *free_space = free - n;
+ return n;
+}
+
+/**
+ * @internal This function moves cons head value and copies up to *n*
+ * objects from the ring to the user provided obj_table.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_dequeue_start(struct rte_ring *r, void *obj_table,
+ uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+ uint32_t *available)
+{
+ uint32_t avail, head, next;
+
+ switch (r->cons.sync_type) {
+ case RTE_RING_SYNC_ST:
+ n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
+ behavior, &head, &next, &avail);
+ break;
+ case RTE_RING_SYNC_MT_HTS:
+ n = __rte_ring_hts_move_cons_head(r, n, behavior,
+ &head, &avail);
+ break;
+ case RTE_RING_SYNC_MT:
+ case RTE_RING_SYNC_MT_RTS:
+ default:
+ /* unsupported mode, shouldn't be here */
+ RTE_ASSERT(0);
+ n = 0;
+ avail = 0;
+ }
+
+ if (n != 0)
+ __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+
+ if (available != NULL)
+ *available = avail - n;
+ return n;
+}
+
+#endif /* _RTE_RING_PEEK_ELEM_PVT_H_ */
extern "C" {
#endif
-#include <rte_ring_peek_c11_mem.h>
+#include <rte_ring_peek_elem_pvt.h>
/**
* Ring zero-copy information structure.
extern "C" {
#endif
-#include <rte_ring_rts_c11_mem.h>
-
-/**
- * @internal Enqueue several objects on the RTS ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
- * @param free_space
- * returns the amount of space after the enqueue operation has finished
- * @return
- * Actual number of objects enqueued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
- uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
- uint32_t *free_space)
-{
- uint32_t free, head;
-
- n = __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
-
- if (n != 0) {
- __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
- __rte_ring_rts_update_tail(&r->rts_prod);
- }
-
- if (free_space != NULL)
- *free_space = free - n;
- return n;
-}
-
-/**
- * @internal Dequeue several objects from the RTS ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of objects.
- * @param esize
- * The size of ring element, in bytes. It must be a multiple of 4.
- * This must be the same value used while creating the ring. Otherwise
- * the results are undefined.
- * @param n
- * The number of objects to pull from the ring.
- * @param behavior
- * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
- * @param available
- * returns the number of remaining ring entries after the dequeue has finished
- * @return
- * - Actual number of objects dequeued.
- * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
- */
-static __rte_always_inline unsigned int
-__rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
- uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
- uint32_t *available)
-{
- uint32_t entries, head;
-
- n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
-
- if (n != 0) {
- __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
- __rte_ring_rts_update_tail(&r->rts_cons);
- }
-
- if (available != NULL)
- *available = entries - n;
- return n;
-}
+#include <rte_ring_rts_elem_pvt.h>
/**
* Enqueue several objects on the RTS ring (multi-producers safe).
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (c) 2010-2020 Intel Corporation
- * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- * Derived from FreeBSD's bufring.h
- * Used as BSD-3 Licensed with permission from Kip Macy.
- */
-
-#ifndef _RTE_RING_RTS_C11_MEM_H_
-#define _RTE_RING_RTS_C11_MEM_H_
-
-/**
- * @file rte_ring_rts_c11_mem.h
- * It is not recommended to include this file directly,
- * include <rte_ring.h> instead.
- * Contains internal helper functions for Relaxed Tail Sync (RTS) ring mode.
- * For more information please refer to <rte_ring_rts.h>.
- */
-
-/**
- * @internal This function updates tail values.
- */
-static __rte_always_inline void
-__rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
-{
- union __rte_ring_rts_poscnt h, ot, nt;
-
- /*
- * If there are other enqueues/dequeues in progress that
- * might preceded us, then don't update tail with new value.
- */
-
- ot.raw = __atomic_load_n(&ht->tail.raw, __ATOMIC_ACQUIRE);
-
- do {
- /* on 32-bit systems we have to do atomic read here */
- h.raw = __atomic_load_n(&ht->head.raw, __ATOMIC_RELAXED);
-
- nt.raw = ot.raw;
- if (++nt.val.cnt == h.val.cnt)
- nt.val.pos = h.val.pos;
-
- } while (__atomic_compare_exchange_n(&ht->tail.raw, &ot.raw, nt.raw,
- 0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE) == 0);
-}
-
-/**
- * @internal This function waits till head/tail distance wouldn't
- * exceed pre-defined max value.
- */
-static __rte_always_inline void
-__rte_ring_rts_head_wait(const struct rte_ring_rts_headtail *ht,
- union __rte_ring_rts_poscnt *h)
-{
- uint32_t max;
-
- max = ht->htd_max;
-
- while (h->val.pos - ht->tail.val.pos > max) {
- rte_pause();
- h->raw = __atomic_load_n(&ht->head.raw, __ATOMIC_ACQUIRE);
- }
-}
-
-/**
- * @internal This function updates the producer head for enqueue.
- */
-static __rte_always_inline uint32_t
-__rte_ring_rts_move_prod_head(struct rte_ring *r, uint32_t num,
- enum rte_ring_queue_behavior behavior, uint32_t *old_head,
- uint32_t *free_entries)
-{
- uint32_t n;
- union __rte_ring_rts_poscnt nh, oh;
-
- const uint32_t capacity = r->capacity;
-
- oh.raw = __atomic_load_n(&r->rts_prod.head.raw, __ATOMIC_ACQUIRE);
-
- do {
- /* Reset n to the initial burst count */
- n = num;
-
- /*
- * wait for prod head/tail distance,
- * make sure that we read prod head *before*
- * reading cons tail.
- */
- __rte_ring_rts_head_wait(&r->rts_prod, &oh);
-
- /*
- * The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * *old_head > cons_tail). So 'free_entries' is always between 0
- * and capacity (which is < size).
- */
- *free_entries = capacity + r->cons.tail - oh.val.pos;
-
- /* check that we have enough room in ring */
- if (unlikely(n > *free_entries))
- n = (behavior == RTE_RING_QUEUE_FIXED) ?
- 0 : *free_entries;
-
- if (n == 0)
- break;
-
- nh.val.pos = oh.val.pos + n;
- nh.val.cnt = oh.val.cnt + 1;
-
- /*
- * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
- * - OOO reads of cons tail value
- * - OOO copy of elems to the ring
- */
- } while (__atomic_compare_exchange_n(&r->rts_prod.head.raw,
- &oh.raw, nh.raw,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
-
- *old_head = oh.val.pos;
- return n;
-}
-
-/**
- * @internal This function updates the consumer head for dequeue
- */
-static __rte_always_inline unsigned int
-__rte_ring_rts_move_cons_head(struct rte_ring *r, uint32_t num,
- enum rte_ring_queue_behavior behavior, uint32_t *old_head,
- uint32_t *entries)
-{
- uint32_t n;
- union __rte_ring_rts_poscnt nh, oh;
-
- oh.raw = __atomic_load_n(&r->rts_cons.head.raw, __ATOMIC_ACQUIRE);
-
- /* move cons.head atomically */
- do {
- /* Restore n as it may change every loop */
- n = num;
-
- /*
- * wait for cons head/tail distance,
- * make sure that we read cons head *before*
- * reading prod tail.
- */
- __rte_ring_rts_head_wait(&r->rts_cons, &oh);
-
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1.
- */
- *entries = r->prod.tail - oh.val.pos;
-
- /* Set the actual entries for dequeue */
- if (n > *entries)
- n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
-
- if (unlikely(n == 0))
- break;
-
- nh.val.pos = oh.val.pos + n;
- nh.val.cnt = oh.val.cnt + 1;
-
- /*
- * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
- * - OOO reads of prod tail value
- * - OOO copy of elems from the ring
- */
- } while (__atomic_compare_exchange_n(&r->rts_cons.head.raw,
- &oh.raw, nh.raw,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
-
- *old_head = oh.val.pos;
- return n;
-}
-
-#endif /* _RTE_RING_RTS_C11_MEM_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2010-2020 Intel Corporation
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ * Derived from FreeBSD's bufring.h
+ * Used as BSD-3 Licensed with permission from Kip Macy.
+ */
+
+#ifndef _RTE_RING_RTS_ELEM_PVT_H_
+#define _RTE_RING_RTS_ELEM_PVT_H_
+
+/**
+ * @file rte_ring_rts_elem_pvt.h
+ * It is not recommended to include this file directly,
+ * include <rte_ring.h> instead.
+ * Contains internal helper functions for Relaxed Tail Sync (RTS) ring mode.
+ * For more information please refer to <rte_ring_rts.h>.
+ */
+
+/**
+ * @internal This function updates tail values.
+ */
+static __rte_always_inline void
+__rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
+{
+ union __rte_ring_rts_poscnt h, ot, nt;
+
+ /*
+ * If there are other enqueues/dequeues in progress that
+ * might preceded us, then don't update tail with new value.
+ */
+
+ ot.raw = __atomic_load_n(&ht->tail.raw, __ATOMIC_ACQUIRE);
+
+ do {
+ /* on 32-bit systems we have to do atomic read here */
+ h.raw = __atomic_load_n(&ht->head.raw, __ATOMIC_RELAXED);
+
+ nt.raw = ot.raw;
+ if (++nt.val.cnt == h.val.cnt)
+ nt.val.pos = h.val.pos;
+
+ } while (__atomic_compare_exchange_n(&ht->tail.raw, &ot.raw, nt.raw,
+ 0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE) == 0);
+}
+
+/**
+ * @internal This function waits till head/tail distance wouldn't
+ * exceed pre-defined max value.
+ */
+static __rte_always_inline void
+__rte_ring_rts_head_wait(const struct rte_ring_rts_headtail *ht,
+ union __rte_ring_rts_poscnt *h)
+{
+ uint32_t max;
+
+ max = ht->htd_max;
+
+ while (h->val.pos - ht->tail.val.pos > max) {
+ rte_pause();
+ h->raw = __atomic_load_n(&ht->head.raw, __ATOMIC_ACQUIRE);
+ }
+}
+
+/**
+ * @internal This function updates the producer head for enqueue.
+ */
+static __rte_always_inline uint32_t
+__rte_ring_rts_move_prod_head(struct rte_ring *r, uint32_t num,
+ enum rte_ring_queue_behavior behavior, uint32_t *old_head,
+ uint32_t *free_entries)
+{
+ uint32_t n;
+ union __rte_ring_rts_poscnt nh, oh;
+
+ const uint32_t capacity = r->capacity;
+
+ oh.raw = __atomic_load_n(&r->rts_prod.head.raw, __ATOMIC_ACQUIRE);
+
+ do {
+ /* Reset n to the initial burst count */
+ n = num;
+
+ /*
+ * wait for prod head/tail distance,
+ * make sure that we read prod head *before*
+ * reading cons tail.
+ */
+ __rte_ring_rts_head_wait(&r->rts_prod, &oh);
+
+ /*
+ * The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * *old_head > cons_tail). So 'free_entries' is always between 0
+ * and capacity (which is < size).
+ */
+ *free_entries = capacity + r->cons.tail - oh.val.pos;
+
+ /* check that we have enough room in ring */
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ break;
+
+ nh.val.pos = oh.val.pos + n;
+ nh.val.cnt = oh.val.cnt + 1;
+
+ /*
+ * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
+ * - OOO reads of cons tail value
+ * - OOO copy of elems to the ring
+ */
+ } while (__atomic_compare_exchange_n(&r->rts_prod.head.raw,
+ &oh.raw, nh.raw,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
+
+ *old_head = oh.val.pos;
+ return n;
+}
+
+/**
+ * @internal This function updates the consumer head for dequeue
+ */
+static __rte_always_inline unsigned int
+__rte_ring_rts_move_cons_head(struct rte_ring *r, uint32_t num,
+ enum rte_ring_queue_behavior behavior, uint32_t *old_head,
+ uint32_t *entries)
+{
+ uint32_t n;
+ union __rte_ring_rts_poscnt nh, oh;
+
+ oh.raw = __atomic_load_n(&r->rts_cons.head.raw, __ATOMIC_ACQUIRE);
+
+ /* move cons.head atomically */
+ do {
+ /* Restore n as it may change every loop */
+ n = num;
+
+ /*
+ * wait for cons head/tail distance,
+ * make sure that we read cons head *before*
+ * reading prod tail.
+ */
+ __rte_ring_rts_head_wait(&r->rts_cons, &oh);
+
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1.
+ */
+ *entries = r->prod.tail - oh.val.pos;
+
+ /* Set the actual entries for dequeue */
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ break;
+
+ nh.val.pos = oh.val.pos + n;
+ nh.val.cnt = oh.val.cnt + 1;
+
+ /*
+ * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
+ * - OOO reads of prod tail value
+ * - OOO copy of elems from the ring
+ */
+ } while (__atomic_compare_exchange_n(&r->rts_cons.head.raw,
+ &oh.raw, nh.raw,
+ 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
+
+ *old_head = oh.val.pos;
+ return n;
+}
+
+/**
+ * @internal Enqueue several objects on the RTS ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to add in the ring from the obj_table.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
+ * @return
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
+ uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+ uint32_t *free_space)
+{
+ uint32_t free, head;
+
+ n = __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
+
+ if (n != 0) {
+ __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
+ __rte_ring_rts_update_tail(&r->rts_prod);
+ }
+
+ if (free_space != NULL)
+ *free_space = free - n;
+ return n;
+}
+
+/**
+ * @internal Dequeue several objects from the RTS ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @param obj_table
+ * A pointer to a table of objects.
+ * @param esize
+ * The size of ring element, in bytes. It must be a multiple of 4.
+ * This must be the same value used while creating the ring. Otherwise
+ * the results are undefined.
+ * @param n
+ * The number of objects to pull from the ring.
+ * @param behavior
+ * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
+ * @return
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
+ */
+static __rte_always_inline unsigned int
+__rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
+ uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
+ uint32_t *available)
+{
+ uint32_t entries, head;
+
+ n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
+
+ if (n != 0) {
+ __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
+ __rte_ring_rts_update_tail(&r->rts_cons);
+ }
+
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+#endif /* _RTE_RING_RTS_ELEM_PVT_H_ */