4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Derived from FreeBSD's bufring.h
37 **************************************************************************
39 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40 * All rights reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions are met:
45 * 1. Redistributions of source code must retain the above copyright notice,
46 * this list of conditions and the following disclaimer.
48 * 2. The name of Kip Macy nor the names of other
49 * contributors may be used to endorse or promote products derived from
50 * this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
64 ***************************************************************************/
73 * The Ring Manager is a fixed-size queue, implemented as a table of
74 * pointers. Head and tail pointers are modified atomically, allowing
75 * concurrent access to it. It has the following features:
77 * - FIFO (First In First Out)
78 * - Maximum size is fixed; the pointers are stored in a table.
79 * - Lockless implementation.
80 * - Multi- or single-consumer dequeue.
81 * - Multi- or single-producer enqueue.
85 * Note: the ring implementation is not preemptable. A lcore must not
86 * be interrupted by another task that uses the same ring.
96 #include <sys/queue.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
104 enum rte_ring_queue_behavior {
105 RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
106 RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
109 #ifdef RTE_LIBRTE_RING_DEBUG
111 * A structure that stores the ring statistics (per-lcore).
113 struct rte_ring_debug_stats {
114 uint64_t enq_success_bulk; /**< Successful enqueues number. */
115 uint64_t enq_success_objs; /**< Objects successfully enqueued. */
116 uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
117 uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
118 uint64_t enq_fail_bulk; /**< Failed enqueues number. */
119 uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
120 uint64_t deq_success_bulk; /**< Successful dequeues number. */
121 uint64_t deq_success_objs; /**< Objects successfully dequeued. */
122 uint64_t deq_fail_bulk; /**< Failed dequeues number. */
123 uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
124 } __rte_cache_aligned;
127 #define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
128 #define RTE_RING_MZ_PREFIX "RG_"
131 * An RTE ring structure.
133 * The producer and the consumer have a head and a tail index. The particularity
134 * of these index is that they are not between 0 and size(ring). These indexes
135 * are between 0 and 2^32, and we mask their value when we access the ring[]
136 * field. Thanks to this assumption, we can do subtractions between 2 index
137 * values in a modulo-32bit base: that's why the overflow of the indexes is not
141 TAILQ_ENTRY(rte_ring) next; /**< Next in list. */
143 char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
144 int flags; /**< Flags supplied at creation. */
146 /** Ring producer status. */
148 uint32_t watermark; /**< Maximum items before EDQUOT. */
149 uint32_t sp_enqueue; /**< True, if single producer. */
150 uint32_t size; /**< Size of ring. */
151 uint32_t mask; /**< Mask (size-1) of ring. */
152 volatile uint32_t head; /**< Producer head. */
153 volatile uint32_t tail; /**< Producer tail. */
154 } prod __rte_cache_aligned;
156 /** Ring consumer status. */
158 uint32_t sc_dequeue; /**< True, if single consumer. */
159 uint32_t size; /**< Size of the ring. */
160 uint32_t mask; /**< Mask (size-1) of ring. */
161 volatile uint32_t head; /**< Consumer head. */
162 volatile uint32_t tail; /**< Consumer tail. */
163 #ifdef RTE_RING_SPLIT_PROD_CONS
164 } cons __rte_cache_aligned;
169 #ifdef RTE_LIBRTE_RING_DEBUG
170 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
173 void * ring[0] __rte_cache_aligned; /**< Memory space of ring starts here.
174 * not volatile so need to be careful
175 * about compiler re-ordering */
178 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
179 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
180 #define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
181 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
184 * @internal When debug is enabled, store ring statistics.
186 * A pointer to the ring.
188 * The name of the statistics field to increment in the ring.
190 * The number to add to the object-oriented statistics.
192 #ifdef RTE_LIBRTE_RING_DEBUG
193 #define __RING_STAT_ADD(r, name, n) do { \
194 unsigned __lcore_id = rte_lcore_id(); \
195 r->stats[__lcore_id].name##_objs += n; \
196 r->stats[__lcore_id].name##_bulk += 1; \
199 #define __RING_STAT_ADD(r, name, n) do {} while(0)
203 * Calculate the memory size needed for a ring
205 * This function returns the number of bytes needed for a ring, given
206 * the number of elements in it. This value is the sum of the size of
207 * the structure rte_ring and the size of the memory needed by the
208 * objects pointers. The value is aligned to a cache line size.
211 * The number of elements in the ring (must be a power of 2).
213 * - The memory size needed for the ring on success.
214 * - -EINVAL if count is not a power of 2.
216 ssize_t rte_ring_get_memsize(unsigned count);
219 * Initialize a ring structure.
221 * Initialize a ring structure in memory pointed by "r". The size of the
222 * memory area must be large enough to store the ring structure and the
223 * object table. It is advised to use rte_ring_get_memsize() to get the
226 * The ring size is set to *count*, which must be a power of two. Water
227 * marking is disabled by default. The real usable ring size is
228 * *count-1* instead of *count* to differentiate a free ring from an
231 * The ring is not added in RTE_TAILQ_RING global list. Indeed, the
232 * memory given by the caller may not be shareable among dpdk
236 * The pointer to the ring structure followed by the objects table.
238 * The name of the ring.
240 * The number of elements in the ring (must be a power of 2).
242 * An OR of the following:
243 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
244 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
245 * is "single-producer". Otherwise, it is "multi-producers".
246 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
247 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
248 * is "single-consumer". Otherwise, it is "multi-consumers".
250 * 0 on success, or a negative value on error.
252 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
256 * Create a new ring named *name* in memory.
258 * This function uses ``memzone_reserve()`` to allocate memory. Then it
259 * calls rte_ring_init() to initialize an empty ring.
261 * The new ring size is set to *count*, which must be a power of
262 * two. Water marking is disabled by default. The real usable ring size
263 * is *count-1* instead of *count* to differentiate a free ring from an
266 * The ring is added in RTE_TAILQ_RING list.
269 * The name of the ring.
271 * The size of the ring (must be a power of 2).
273 * The *socket_id* argument is the socket identifier in case of
274 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
275 * constraint for the reserved zone.
277 * An OR of the following:
278 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
279 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
280 * is "single-producer". Otherwise, it is "multi-producers".
281 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
282 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
283 * is "single-consumer". Otherwise, it is "multi-consumers".
285 * On success, the pointer to the new allocated ring. NULL on error with
286 * rte_errno set appropriately. Possible errno values include:
287 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
288 * - E_RTE_SECONDARY - function was called from a secondary process instance
289 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
290 * - EINVAL - count provided is not a power of 2
291 * - ENOSPC - the maximum number of memzones has already been allocated
292 * - EEXIST - a memzone with the same name already exists
293 * - ENOMEM - no appropriate memory area found in which to create memzone
295 struct rte_ring *rte_ring_create(const char *name, unsigned count,
296 int socket_id, unsigned flags);
299 * Change the high water mark.
301 * If *count* is 0, water marking is disabled. Otherwise, it is set to the
302 * *count* value. The *count* value must be greater than 0 and less
303 * than the ring size.
305 * This function can be called at any time (not necessarily at
309 * A pointer to the ring structure.
311 * The new water mark value.
313 * - 0: Success; water mark changed.
314 * - -EINVAL: Invalid water mark value.
316 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
319 * Dump the status of the ring to the console.
322 * A pointer to a file for output
324 * A pointer to the ring structure.
326 void rte_ring_dump(FILE *f, const struct rte_ring *r);
328 /* the actual enqueue of pointers on the ring.
329 * Placed here since identical code needed in both
330 * single and multi producer enqueue functions */
331 #define ENQUEUE_PTRS() do { \
332 const uint32_t size = r->prod.size; \
333 uint32_t idx = prod_head & mask; \
334 if (likely(idx + n < size)) { \
335 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
336 r->ring[idx] = obj_table[i]; \
337 r->ring[idx+1] = obj_table[i+1]; \
338 r->ring[idx+2] = obj_table[i+2]; \
339 r->ring[idx+3] = obj_table[i+3]; \
342 case 3: r->ring[idx++] = obj_table[i++]; \
343 case 2: r->ring[idx++] = obj_table[i++]; \
344 case 1: r->ring[idx++] = obj_table[i++]; \
347 for (i = 0; idx < size; i++, idx++)\
348 r->ring[idx] = obj_table[i]; \
349 for (idx = 0; i < n; i++, idx++) \
350 r->ring[idx] = obj_table[i]; \
354 /* the actual copy of pointers on the ring to obj_table.
355 * Placed here since identical code needed in both
356 * single and multi consumer dequeue functions */
357 #define DEQUEUE_PTRS() do { \
358 uint32_t idx = cons_head & mask; \
359 const uint32_t size = r->cons.size; \
360 if (likely(idx + n < size)) { \
361 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
362 obj_table[i] = r->ring[idx]; \
363 obj_table[i+1] = r->ring[idx+1]; \
364 obj_table[i+2] = r->ring[idx+2]; \
365 obj_table[i+3] = r->ring[idx+3]; \
368 case 3: obj_table[i++] = r->ring[idx++]; \
369 case 2: obj_table[i++] = r->ring[idx++]; \
370 case 1: obj_table[i++] = r->ring[idx++]; \
373 for (i = 0; idx < size; i++, idx++) \
374 obj_table[i] = r->ring[idx]; \
375 for (idx = 0; i < n; i++, idx++) \
376 obj_table[i] = r->ring[idx]; \
381 * @internal Enqueue several objects on the ring (multi-producers safe).
383 * This function uses a "compare and set" instruction to move the
384 * producer index atomically.
387 * A pointer to the ring structure.
389 * A pointer to a table of void * pointers (objects).
391 * The number of objects to add in the ring from the obj_table.
393 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
394 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
396 * Depend on the behavior value
397 * if behavior = RTE_RING_QUEUE_FIXED
398 * - 0: Success; objects enqueue.
399 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
400 * high water mark is exceeded.
401 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
402 * if behavior = RTE_RING_QUEUE_VARIABLE
403 * - n: Actual number of objects enqueued.
405 static inline int __attribute__((always_inline))
406 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
407 unsigned n, enum rte_ring_queue_behavior behavior)
409 uint32_t prod_head, prod_next;
410 uint32_t cons_tail, free_entries;
411 const unsigned max = n;
414 uint32_t mask = r->prod.mask;
417 /* move prod.head atomically */
419 /* Reset n to the initial burst count */
422 prod_head = r->prod.head;
423 cons_tail = r->cons.tail;
424 /* The subtraction is done between two unsigned 32bits value
425 * (the result is always modulo 32 bits even if we have
426 * prod_head > cons_tail). So 'free_entries' is always between 0
427 * and size(ring)-1. */
428 free_entries = (mask + cons_tail - prod_head);
430 /* check that we have enough room in ring */
431 if (unlikely(n > free_entries)) {
432 if (behavior == RTE_RING_QUEUE_FIXED) {
433 __RING_STAT_ADD(r, enq_fail, n);
437 /* No free entry available */
438 if (unlikely(free_entries == 0)) {
439 __RING_STAT_ADD(r, enq_fail, n);
447 prod_next = prod_head + n;
448 success = rte_atomic32_cmpset(&r->prod.head, prod_head,
450 } while (unlikely(success == 0));
452 /* write entries in ring */
454 rte_compiler_barrier();
456 /* if we exceed the watermark */
457 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
458 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
459 (int)(n | RTE_RING_QUOT_EXCEED);
460 __RING_STAT_ADD(r, enq_quota, n);
463 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
464 __RING_STAT_ADD(r, enq_success, n);
468 * If there are other enqueues in progress that preceded us,
469 * we need to wait for them to complete
471 while (unlikely(r->prod.tail != prod_head))
474 r->prod.tail = prod_next;
479 * @internal Enqueue several objects on a ring (NOT multi-producers safe).
482 * A pointer to the ring structure.
484 * A pointer to a table of void * pointers (objects).
486 * The number of objects to add in the ring from the obj_table.
488 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
489 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
491 * Depend on the behavior value
492 * if behavior = RTE_RING_QUEUE_FIXED
493 * - 0: Success; objects enqueue.
494 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
495 * high water mark is exceeded.
496 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
497 * if behavior = RTE_RING_QUEUE_VARIABLE
498 * - n: Actual number of objects enqueued.
500 static inline int __attribute__((always_inline))
501 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
502 unsigned n, enum rte_ring_queue_behavior behavior)
504 uint32_t prod_head, cons_tail;
505 uint32_t prod_next, free_entries;
507 uint32_t mask = r->prod.mask;
510 prod_head = r->prod.head;
511 cons_tail = r->cons.tail;
512 /* The subtraction is done between two unsigned 32bits value
513 * (the result is always modulo 32 bits even if we have
514 * prod_head > cons_tail). So 'free_entries' is always between 0
515 * and size(ring)-1. */
516 free_entries = mask + cons_tail - prod_head;
518 /* check that we have enough room in ring */
519 if (unlikely(n > free_entries)) {
520 if (behavior == RTE_RING_QUEUE_FIXED) {
521 __RING_STAT_ADD(r, enq_fail, n);
525 /* No free entry available */
526 if (unlikely(free_entries == 0)) {
527 __RING_STAT_ADD(r, enq_fail, n);
535 prod_next = prod_head + n;
536 r->prod.head = prod_next;
538 /* write entries in ring */
540 rte_compiler_barrier();
542 /* if we exceed the watermark */
543 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
544 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
545 (int)(n | RTE_RING_QUOT_EXCEED);
546 __RING_STAT_ADD(r, enq_quota, n);
549 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
550 __RING_STAT_ADD(r, enq_success, n);
553 r->prod.tail = prod_next;
558 * @internal Dequeue several objects from a ring (multi-consumers safe). When
559 * the request objects are more than the available objects, only dequeue the
560 * actual number of objects
562 * This function uses a "compare and set" instruction to move the
563 * consumer index atomically.
566 * A pointer to the ring structure.
568 * A pointer to a table of void * pointers (objects) that will be filled.
570 * The number of objects to dequeue from the ring to the obj_table.
572 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
573 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
575 * Depend on the behavior value
576 * if behavior = RTE_RING_QUEUE_FIXED
577 * - 0: Success; objects dequeued.
578 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
580 * if behavior = RTE_RING_QUEUE_VARIABLE
581 * - n: Actual number of objects dequeued.
584 static inline int __attribute__((always_inline))
585 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
586 unsigned n, enum rte_ring_queue_behavior behavior)
588 uint32_t cons_head, prod_tail;
589 uint32_t cons_next, entries;
590 const unsigned max = n;
593 uint32_t mask = r->prod.mask;
595 /* move cons.head atomically */
597 /* Restore n as it may change every loop */
600 cons_head = r->cons.head;
601 prod_tail = r->prod.tail;
602 /* The subtraction is done between two unsigned 32bits value
603 * (the result is always modulo 32 bits even if we have
604 * cons_head > prod_tail). So 'entries' is always between 0
605 * and size(ring)-1. */
606 entries = (prod_tail - cons_head);
608 /* Set the actual entries for dequeue */
610 if (behavior == RTE_RING_QUEUE_FIXED) {
611 __RING_STAT_ADD(r, deq_fail, n);
615 if (unlikely(entries == 0)){
616 __RING_STAT_ADD(r, deq_fail, n);
624 cons_next = cons_head + n;
625 success = rte_atomic32_cmpset(&r->cons.head, cons_head,
627 } while (unlikely(success == 0));
631 rte_compiler_barrier();
634 * If there are other dequeues in progress that preceded us,
635 * we need to wait for them to complete
637 while (unlikely(r->cons.tail != cons_head))
640 __RING_STAT_ADD(r, deq_success, n);
641 r->cons.tail = cons_next;
643 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
647 * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
648 * When the request objects are more than the available objects, only dequeue
649 * the actual number of objects
652 * A pointer to the ring structure.
654 * A pointer to a table of void * pointers (objects) that will be filled.
656 * The number of objects to dequeue from the ring to the obj_table.
658 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
659 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
661 * Depend on the behavior value
662 * if behavior = RTE_RING_QUEUE_FIXED
663 * - 0: Success; objects dequeued.
664 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
666 * if behavior = RTE_RING_QUEUE_VARIABLE
667 * - n: Actual number of objects dequeued.
669 static inline int __attribute__((always_inline))
670 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
671 unsigned n, enum rte_ring_queue_behavior behavior)
673 uint32_t cons_head, prod_tail;
674 uint32_t cons_next, entries;
676 uint32_t mask = r->prod.mask;
678 cons_head = r->cons.head;
679 prod_tail = r->prod.tail;
680 /* The subtraction is done between two unsigned 32bits value
681 * (the result is always modulo 32 bits even if we have
682 * cons_head > prod_tail). So 'entries' is always between 0
683 * and size(ring)-1. */
684 entries = prod_tail - cons_head;
687 if (behavior == RTE_RING_QUEUE_FIXED) {
688 __RING_STAT_ADD(r, deq_fail, n);
692 if (unlikely(entries == 0)){
693 __RING_STAT_ADD(r, deq_fail, n);
701 cons_next = cons_head + n;
702 r->cons.head = cons_next;
706 rte_compiler_barrier();
708 __RING_STAT_ADD(r, deq_success, n);
709 r->cons.tail = cons_next;
710 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
714 * Enqueue several objects on the ring (multi-producers safe).
716 * This function uses a "compare and set" instruction to move the
717 * producer index atomically.
720 * A pointer to the ring structure.
722 * A pointer to a table of void * pointers (objects).
724 * The number of objects to add in the ring from the obj_table.
726 * - 0: Success; objects enqueue.
727 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
728 * high water mark is exceeded.
729 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
731 static inline int __attribute__((always_inline))
732 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
735 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
739 * Enqueue several objects on a ring (NOT multi-producers safe).
742 * A pointer to the ring structure.
744 * A pointer to a table of void * pointers (objects).
746 * The number of objects to add in the ring from the obj_table.
748 * - 0: Success; objects enqueued.
749 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
750 * high water mark is exceeded.
751 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
753 static inline int __attribute__((always_inline))
754 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
757 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
761 * Enqueue several objects on a ring.
763 * This function calls the multi-producer or the single-producer
764 * version depending on the default behavior that was specified at
765 * ring creation time (see flags).
768 * A pointer to the ring structure.
770 * A pointer to a table of void * pointers (objects).
772 * The number of objects to add in the ring from the obj_table.
774 * - 0: Success; objects enqueued.
775 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
776 * high water mark is exceeded.
777 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
779 static inline int __attribute__((always_inline))
780 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
783 if (r->prod.sp_enqueue)
784 return rte_ring_sp_enqueue_bulk(r, obj_table, n);
786 return rte_ring_mp_enqueue_bulk(r, obj_table, n);
790 * Enqueue one object on a ring (multi-producers safe).
792 * This function uses a "compare and set" instruction to move the
793 * producer index atomically.
796 * A pointer to the ring structure.
798 * A pointer to the object to be added.
800 * - 0: Success; objects enqueued.
801 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
802 * high water mark is exceeded.
803 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
805 static inline int __attribute__((always_inline))
806 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
808 return rte_ring_mp_enqueue_bulk(r, &obj, 1);
812 * Enqueue one object on a ring (NOT multi-producers safe).
815 * A pointer to the ring structure.
817 * A pointer to the object to be added.
819 * - 0: Success; objects enqueued.
820 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
821 * high water mark is exceeded.
822 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
824 static inline int __attribute__((always_inline))
825 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
827 return rte_ring_sp_enqueue_bulk(r, &obj, 1);
831 * Enqueue one object on a ring.
833 * This function calls the multi-producer or the single-producer
834 * version, depending on the default behaviour that was specified at
835 * ring creation time (see flags).
838 * A pointer to the ring structure.
840 * A pointer to the object to be added.
842 * - 0: Success; objects enqueued.
843 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
844 * high water mark is exceeded.
845 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
847 static inline int __attribute__((always_inline))
848 rte_ring_enqueue(struct rte_ring *r, void *obj)
850 if (r->prod.sp_enqueue)
851 return rte_ring_sp_enqueue(r, obj);
853 return rte_ring_mp_enqueue(r, obj);
857 * Dequeue several objects from a ring (multi-consumers safe).
859 * This function uses a "compare and set" instruction to move the
860 * consumer index atomically.
863 * A pointer to the ring structure.
865 * A pointer to a table of void * pointers (objects) that will be filled.
867 * The number of objects to dequeue from the ring to the obj_table.
869 * - 0: Success; objects dequeued.
870 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
873 static inline int __attribute__((always_inline))
874 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
876 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
880 * Dequeue several objects from a ring (NOT multi-consumers safe).
883 * A pointer to the ring structure.
885 * A pointer to a table of void * pointers (objects) that will be filled.
887 * The number of objects to dequeue from the ring to the obj_table,
888 * must be strictly positive.
890 * - 0: Success; objects dequeued.
891 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
894 static inline int __attribute__((always_inline))
895 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
897 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
901 * Dequeue several objects from a ring.
903 * This function calls the multi-consumers or the single-consumer
904 * version, depending on the default behaviour that was specified at
905 * ring creation time (see flags).
908 * A pointer to the ring structure.
910 * A pointer to a table of void * pointers (objects) that will be filled.
912 * The number of objects to dequeue from the ring to the obj_table.
914 * - 0: Success; objects dequeued.
915 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
918 static inline int __attribute__((always_inline))
919 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
921 if (r->cons.sc_dequeue)
922 return rte_ring_sc_dequeue_bulk(r, obj_table, n);
924 return rte_ring_mc_dequeue_bulk(r, obj_table, n);
928 * Dequeue one object from a ring (multi-consumers safe).
930 * This function uses a "compare and set" instruction to move the
931 * consumer index atomically.
934 * A pointer to the ring structure.
936 * A pointer to a void * pointer (object) that will be filled.
938 * - 0: Success; objects dequeued.
939 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
942 static inline int __attribute__((always_inline))
943 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
945 return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
949 * Dequeue one object from a ring (NOT multi-consumers safe).
952 * A pointer to the ring structure.
954 * A pointer to a void * pointer (object) that will be filled.
956 * - 0: Success; objects dequeued.
957 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
960 static inline int __attribute__((always_inline))
961 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
963 return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
967 * Dequeue one object from a ring.
969 * This function calls the multi-consumers or the single-consumer
970 * version depending on the default behaviour that was specified at
971 * ring creation time (see flags).
974 * A pointer to the ring structure.
976 * A pointer to a void * pointer (object) that will be filled.
978 * - 0: Success, objects dequeued.
979 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
982 static inline int __attribute__((always_inline))
983 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
985 if (r->cons.sc_dequeue)
986 return rte_ring_sc_dequeue(r, obj_p);
988 return rte_ring_mc_dequeue(r, obj_p);
992 * Test if a ring is full.
995 * A pointer to the ring structure.
997 * - 1: The ring is full.
998 * - 0: The ring is not full.
1001 rte_ring_full(const struct rte_ring *r)
1003 uint32_t prod_tail = r->prod.tail;
1004 uint32_t cons_tail = r->cons.tail;
1005 return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
1009 * Test if a ring is empty.
1012 * A pointer to the ring structure.
1014 * - 1: The ring is empty.
1015 * - 0: The ring is not empty.
1018 rte_ring_empty(const struct rte_ring *r)
1020 uint32_t prod_tail = r->prod.tail;
1021 uint32_t cons_tail = r->cons.tail;
1022 return !!(cons_tail == prod_tail);
1026 * Return the number of entries in a ring.
1029 * A pointer to the ring structure.
1031 * The number of entries in the ring.
1033 static inline unsigned
1034 rte_ring_count(const struct rte_ring *r)
1036 uint32_t prod_tail = r->prod.tail;
1037 uint32_t cons_tail = r->cons.tail;
1038 return ((prod_tail - cons_tail) & r->prod.mask);
1042 * Return the number of free entries in a ring.
1045 * A pointer to the ring structure.
1047 * The number of free entries in the ring.
1049 static inline unsigned
1050 rte_ring_free_count(const struct rte_ring *r)
1052 uint32_t prod_tail = r->prod.tail;
1053 uint32_t cons_tail = r->cons.tail;
1054 return ((cons_tail - prod_tail - 1) & r->prod.mask);
1058 * Dump the status of all rings on the console
1061 * A pointer to a file for output
1063 void rte_ring_list_dump(FILE *f);
1066 * Search a ring from its name
1069 * The name of the ring.
1071 * The pointer to the ring matching the name, or NULL if not found,
1072 * with rte_errno set appropriately. Possible rte_errno values include:
1073 * - ENOENT - required entry not available to return.
1075 struct rte_ring *rte_ring_lookup(const char *name);
1078 * Enqueue several objects on the ring (multi-producers safe).
1080 * This function uses a "compare and set" instruction to move the
1081 * producer index atomically.
1084 * A pointer to the ring structure.
1086 * A pointer to a table of void * pointers (objects).
1088 * The number of objects to add in the ring from the obj_table.
1090 * - n: Actual number of objects enqueued.
1092 static inline int __attribute__((always_inline))
1093 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1096 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1100 * Enqueue several objects on a ring (NOT multi-producers safe).
1103 * A pointer to the ring structure.
1105 * A pointer to a table of void * pointers (objects).
1107 * The number of objects to add in the ring from the obj_table.
1109 * - n: Actual number of objects enqueued.
1111 static inline int __attribute__((always_inline))
1112 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1115 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1119 * Enqueue several objects on a ring.
1121 * This function calls the multi-producer or the single-producer
1122 * version depending on the default behavior that was specified at
1123 * ring creation time (see flags).
1126 * A pointer to the ring structure.
1128 * A pointer to a table of void * pointers (objects).
1130 * The number of objects to add in the ring from the obj_table.
1132 * - n: Actual number of objects enqueued.
1134 static inline int __attribute__((always_inline))
1135 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1138 if (r->prod.sp_enqueue)
1139 return rte_ring_sp_enqueue_burst(r, obj_table, n);
1141 return rte_ring_mp_enqueue_burst(r, obj_table, n);
1145 * Dequeue several objects from a ring (multi-consumers safe). When the request
1146 * objects are more than the available objects, only dequeue the actual number
1149 * This function uses a "compare and set" instruction to move the
1150 * consumer index atomically.
1153 * A pointer to the ring structure.
1155 * A pointer to a table of void * pointers (objects) that will be filled.
1157 * The number of objects to dequeue from the ring to the obj_table.
1159 * - n: Actual number of objects dequeued, 0 if ring is empty
1161 static inline int __attribute__((always_inline))
1162 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1164 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1168 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1169 * request objects are more than the available objects, only dequeue the
1170 * actual number of objects
1173 * A pointer to the ring structure.
1175 * A pointer to a table of void * pointers (objects) that will be filled.
1177 * The number of objects to dequeue from the ring to the obj_table.
1179 * - n: Actual number of objects dequeued, 0 if ring is empty
1181 static inline int __attribute__((always_inline))
1182 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1184 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1188 * Dequeue multiple objects from a ring up to a maximum number.
1190 * This function calls the multi-consumers or the single-consumer
1191 * version, depending on the default behaviour that was specified at
1192 * ring creation time (see flags).
1195 * A pointer to the ring structure.
1197 * A pointer to a table of void * pointers (objects) that will be filled.
1199 * The number of objects to dequeue from the ring to the obj_table.
1201 * - Number of objects dequeued, or a negative error code on error
1203 static inline int __attribute__((always_inline))
1204 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1206 if (r->cons.sc_dequeue)
1207 return rte_ring_sc_dequeue_burst(r, obj_table, n);
1209 return rte_ring_mc_dequeue_burst(r, obj_table, n);
1216 #endif /* _RTE_RING_H_ */