/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};
-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)
-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
+struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+
+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif
-struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t single; /**< True if single prod/cons */
+};
/**
* An RTE ring structure.
* next time the ABI changes
*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
+ uint32_t watermark; /**< Max items before EDQUOT in producer. */
/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
-
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
-/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
-
/**
* Calculate the memory size needed for a ring
*
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
+ const uint32_t size = r->size; \
uint32_t idx = prod_head & mask; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
* single and multi consumer dequeue functions */
#define DEQUEUE_PTRS() do { \
uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+ const uint32_t size = r->size; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
obj_table[i] = r->ring[idx]; \
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ unsigned int i;
+ uint32_t mask = r->mask;
int ret;
/* Avoid the unnecessary cmpset operation below, which is also
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
rte_smp_wmb();
/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
/*
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->prod.tail != prod_head)) {
+ while (unlikely(r->prod.tail != prod_head))
rte_pause();
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
r->prod.tail = prod_next;
return ret;
}
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
+ unsigned int i;
+ uint32_t mask = r->mask;
int ret;
prod_head = r->prod.head;
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
- }
else {
/* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
+ if (unlikely(free_entries == 0))
return 0;
- }
-
n = free_entries;
}
}
rte_smp_wmb();
/* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
+ else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
r->prod.tail = prod_next;
return ret;
uint32_t cons_next, entries;
const unsigned max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
+ unsigned int i;
+ uint32_t mask = r->mask;
/* Avoid the unnecessary cmpset operation below, which is also
* potentially harmful when n equals 0. */
/* Set the actual entries for dequeue */
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
- while (unlikely(r->cons.tail != cons_head)) {
+ while (unlikely(r->cons.tail != cons_head))
rte_pause();
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
+ unsigned int i;
+ uint32_t mask = r->mask;
cons_head = r->cons.head;
prod_tail = r->prod.tail;
entries = prod_tail - cons_head;
if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
+ if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
- }
else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
+ if (unlikely(entries == 0))
return 0;
- }
-
n = entries;
}
}
DEQUEUE_PTRS();
rte_smp_rmb();
- __RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_bulk(r, obj_table, n);
else
return rte_ring_mp_enqueue_bulk(r, obj_table, n);
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue(r, obj);
else
return rte_ring_mp_enqueue(r, obj);
static inline int __attribute__((always_inline))
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_bulk(r, obj_table, n);
else
return rte_ring_mc_dequeue_bulk(r, obj_table, n);
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue(r, obj_p);
else
return rte_ring_mc_dequeue(r, obj_p);
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}
/**
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}
/**
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
}
/**
static inline unsigned int
rte_ring_get_size(const struct rte_ring *r)
{
- return r->prod.size;
+ return r->size;
}
/**
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned n)
{
- if (r->prod.sp_enqueue)
+ if (r->prod.single)
return rte_ring_sp_enqueue_burst(r, obj_table, n);
else
return rte_ring_mp_enqueue_burst(r, obj_table, n);
static inline unsigned __attribute__((always_inline))
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
{
- if (r->cons.sc_dequeue)
+ if (r->cons.single)
return rte_ring_sc_dequeue_burst(r, obj_table, n);
else
return rte_ring_mc_dequeue_burst(r, obj_table, n);