4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Derived from FreeBSD's bufring.h
37 **************************************************************************
39 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40 * All rights reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions are met:
45 * 1. Redistributions of source code must retain the above copyright notice,
46 * this list of conditions and the following disclaimer.
48 * 2. The name of Kip Macy nor the names of other
49 * contributors may be used to endorse or promote products derived from
50 * this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
64 ***************************************************************************/
73 * The Ring Manager is a fixed-size queue, implemented as a table of
74 * pointers. Head and tail pointers are modified atomically, allowing
75 * concurrent access to it. It has the following features:
77 * - FIFO (First In First Out)
78 * - Maximum size is fixed; the pointers are stored in a table.
79 * - Lockless implementation.
80 * - Multi- or single-consumer dequeue.
81 * - Multi- or single-producer enqueue.
85 * Note: the ring implementation is not preemptable. A lcore must not
86 * be interrupted by another task that uses the same ring.
96 #include <sys/queue.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
103 #include <rte_memzone.h>
105 #define RTE_TAILQ_RING_NAME "RTE_RING"
107 enum rte_ring_queue_behavior {
108 RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
109 RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
112 #define RTE_RING_MZ_PREFIX "RG_"
113 /**< The maximum length of a ring name. */
114 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
115 sizeof(RTE_RING_MZ_PREFIX) + 1)
117 #ifndef RTE_RING_PAUSE_REP_COUNT
118 #define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
119 * if RTE_RING_PAUSE_REP not defined. */
122 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
124 #if RTE_CACHE_LINE_SIZE < 128
125 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
126 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
128 #define PROD_ALIGN RTE_CACHE_LINE_SIZE
129 #define CONS_ALIGN RTE_CACHE_LINE_SIZE
132 /* structure to hold a pair of head/tail values and other metadata */
133 struct rte_ring_headtail {
134 volatile uint32_t head; /**< Prod/consumer head. */
135 volatile uint32_t tail; /**< Prod/consumer tail. */
136 uint32_t single; /**< True if single prod/cons */
140 * An RTE ring structure.
142 * The producer and the consumer have a head and a tail index. The particularity
143 * of these index is that they are not between 0 and size(ring). These indexes
144 * are between 0 and 2^32, and we mask their value when we access the ring[]
145 * field. Thanks to this assumption, we can do subtractions between 2 index
146 * values in a modulo-32bit base: that's why the overflow of the indexes is not
151 * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
152 * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
153 * next time the ABI changes
155 char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
156 int flags; /**< Flags supplied at creation. */
157 const struct rte_memzone *memzone;
158 /**< Memzone, if any, containing the rte_ring */
159 uint32_t size; /**< Size of ring. */
160 uint32_t mask; /**< Mask (size-1) of ring. */
161 uint32_t watermark; /**< Max items before EDQUOT in producer. */
163 /** Ring producer status. */
164 struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
166 /** Ring consumer status. */
167 struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
169 void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
170 * not volatile so need to be careful
171 * about compiler re-ordering */
174 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
175 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
176 #define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
177 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
180 * Calculate the memory size needed for a ring
182 * This function returns the number of bytes needed for a ring, given
183 * the number of elements in it. This value is the sum of the size of
184 * the structure rte_ring and the size of the memory needed by the
185 * objects pointers. The value is aligned to a cache line size.
188 * The number of elements in the ring (must be a power of 2).
190 * - The memory size needed for the ring on success.
191 * - -EINVAL if count is not a power of 2.
193 ssize_t rte_ring_get_memsize(unsigned count);
196 * Initialize a ring structure.
198 * Initialize a ring structure in memory pointed by "r". The size of the
199 * memory area must be large enough to store the ring structure and the
200 * object table. It is advised to use rte_ring_get_memsize() to get the
203 * The ring size is set to *count*, which must be a power of two. Water
204 * marking is disabled by default. The real usable ring size is
205 * *count-1* instead of *count* to differentiate a free ring from an
208 * The ring is not added in RTE_TAILQ_RING global list. Indeed, the
209 * memory given by the caller may not be shareable among dpdk
213 * The pointer to the ring structure followed by the objects table.
215 * The name of the ring.
217 * The number of elements in the ring (must be a power of 2).
219 * An OR of the following:
220 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
221 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
222 * is "single-producer". Otherwise, it is "multi-producers".
223 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
224 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
225 * is "single-consumer". Otherwise, it is "multi-consumers".
227 * 0 on success, or a negative value on error.
229 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
233 * Create a new ring named *name* in memory.
235 * This function uses ``memzone_reserve()`` to allocate memory. Then it
236 * calls rte_ring_init() to initialize an empty ring.
238 * The new ring size is set to *count*, which must be a power of
239 * two. Water marking is disabled by default. The real usable ring size
240 * is *count-1* instead of *count* to differentiate a free ring from an
243 * The ring is added in RTE_TAILQ_RING list.
246 * The name of the ring.
248 * The size of the ring (must be a power of 2).
250 * The *socket_id* argument is the socket identifier in case of
251 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
252 * constraint for the reserved zone.
254 * An OR of the following:
255 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
256 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
257 * is "single-producer". Otherwise, it is "multi-producers".
258 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
259 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
260 * is "single-consumer". Otherwise, it is "multi-consumers".
262 * On success, the pointer to the new allocated ring. NULL on error with
263 * rte_errno set appropriately. Possible errno values include:
264 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
265 * - E_RTE_SECONDARY - function was called from a secondary process instance
266 * - EINVAL - count provided is not a power of 2
267 * - ENOSPC - the maximum number of memzones has already been allocated
268 * - EEXIST - a memzone with the same name already exists
269 * - ENOMEM - no appropriate memory area found in which to create memzone
271 struct rte_ring *rte_ring_create(const char *name, unsigned count,
272 int socket_id, unsigned flags);
274 * De-allocate all memory used by the ring.
279 void rte_ring_free(struct rte_ring *r);
282 * Change the high water mark.
284 * If *count* is 0, water marking is disabled. Otherwise, it is set to the
285 * *count* value. The *count* value must be greater than 0 and less
286 * than the ring size.
288 * This function can be called at any time (not necessarily at
292 * A pointer to the ring structure.
294 * The new water mark value.
296 * - 0: Success; water mark changed.
297 * - -EINVAL: Invalid water mark value.
299 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
302 * Dump the status of the ring to a file.
305 * A pointer to a file for output
307 * A pointer to the ring structure.
309 void rte_ring_dump(FILE *f, const struct rte_ring *r);
311 /* the actual enqueue of pointers on the ring.
312 * Placed here since identical code needed in both
313 * single and multi producer enqueue functions */
314 #define ENQUEUE_PTRS() do { \
315 const uint32_t size = r->size; \
316 uint32_t idx = prod_head & mask; \
317 if (likely(idx + n < size)) { \
318 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
319 r->ring[idx] = obj_table[i]; \
320 r->ring[idx+1] = obj_table[i+1]; \
321 r->ring[idx+2] = obj_table[i+2]; \
322 r->ring[idx+3] = obj_table[i+3]; \
325 case 3: r->ring[idx++] = obj_table[i++]; \
326 case 2: r->ring[idx++] = obj_table[i++]; \
327 case 1: r->ring[idx++] = obj_table[i++]; \
330 for (i = 0; idx < size; i++, idx++)\
331 r->ring[idx] = obj_table[i]; \
332 for (idx = 0; i < n; i++, idx++) \
333 r->ring[idx] = obj_table[i]; \
337 /* the actual copy of pointers on the ring to obj_table.
338 * Placed here since identical code needed in both
339 * single and multi consumer dequeue functions */
340 #define DEQUEUE_PTRS() do { \
341 uint32_t idx = cons_head & mask; \
342 const uint32_t size = r->size; \
343 if (likely(idx + n < size)) { \
344 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
345 obj_table[i] = r->ring[idx]; \
346 obj_table[i+1] = r->ring[idx+1]; \
347 obj_table[i+2] = r->ring[idx+2]; \
348 obj_table[i+3] = r->ring[idx+3]; \
351 case 3: obj_table[i++] = r->ring[idx++]; \
352 case 2: obj_table[i++] = r->ring[idx++]; \
353 case 1: obj_table[i++] = r->ring[idx++]; \
356 for (i = 0; idx < size; i++, idx++) \
357 obj_table[i] = r->ring[idx]; \
358 for (idx = 0; i < n; i++, idx++) \
359 obj_table[i] = r->ring[idx]; \
364 * @internal Enqueue several objects on the ring (multi-producers safe).
366 * This function uses a "compare and set" instruction to move the
367 * producer index atomically.
370 * A pointer to the ring structure.
372 * A pointer to a table of void * pointers (objects).
374 * The number of objects to add in the ring from the obj_table.
376 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
377 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
379 * Depend on the behavior value
380 * if behavior = RTE_RING_QUEUE_FIXED
381 * - 0: Success; objects enqueue.
382 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
383 * high water mark is exceeded.
384 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
385 * if behavior = RTE_RING_QUEUE_VARIABLE
386 * - n: Actual number of objects enqueued.
388 static inline int __attribute__((always_inline))
389 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
390 unsigned n, enum rte_ring_queue_behavior behavior)
392 uint32_t prod_head, prod_next;
393 uint32_t cons_tail, free_entries;
394 const unsigned max = n;
397 uint32_t mask = r->mask;
400 /* Avoid the unnecessary cmpset operation below, which is also
401 * potentially harmful when n equals 0. */
405 /* move prod.head atomically */
407 /* Reset n to the initial burst count */
410 prod_head = r->prod.head;
411 cons_tail = r->cons.tail;
412 /* The subtraction is done between two unsigned 32bits value
413 * (the result is always modulo 32 bits even if we have
414 * prod_head > cons_tail). So 'free_entries' is always between 0
415 * and size(ring)-1. */
416 free_entries = (mask + cons_tail - prod_head);
418 /* check that we have enough room in ring */
419 if (unlikely(n > free_entries)) {
420 if (behavior == RTE_RING_QUEUE_FIXED)
423 /* No free entry available */
424 if (unlikely(free_entries == 0))
430 prod_next = prod_head + n;
431 success = rte_atomic32_cmpset(&r->prod.head, prod_head,
433 } while (unlikely(success == 0));
435 /* write entries in ring */
439 /* if we exceed the watermark */
440 if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
441 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
442 (int)(n | RTE_RING_QUOT_EXCEED);
444 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
447 * If there are other enqueues in progress that preceded us,
448 * we need to wait for them to complete
450 while (unlikely(r->prod.tail != prod_head)) {
453 /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
454 * for other thread finish. It gives pre-empted thread a chance
455 * to proceed and finish with ring dequeue operation. */
456 if (RTE_RING_PAUSE_REP_COUNT &&
457 ++rep == RTE_RING_PAUSE_REP_COUNT) {
462 r->prod.tail = prod_next;
467 * @internal Enqueue several objects on a ring (NOT multi-producers safe).
470 * A pointer to the ring structure.
472 * A pointer to a table of void * pointers (objects).
474 * The number of objects to add in the ring from the obj_table.
476 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
477 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
479 * Depend on the behavior value
480 * if behavior = RTE_RING_QUEUE_FIXED
481 * - 0: Success; objects enqueue.
482 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
483 * high water mark is exceeded.
484 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
485 * if behavior = RTE_RING_QUEUE_VARIABLE
486 * - n: Actual number of objects enqueued.
488 static inline int __attribute__((always_inline))
489 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
490 unsigned n, enum rte_ring_queue_behavior behavior)
492 uint32_t prod_head, cons_tail;
493 uint32_t prod_next, free_entries;
495 uint32_t mask = r->mask;
498 prod_head = r->prod.head;
499 cons_tail = r->cons.tail;
500 /* The subtraction is done between two unsigned 32bits value
501 * (the result is always modulo 32 bits even if we have
502 * prod_head > cons_tail). So 'free_entries' is always between 0
503 * and size(ring)-1. */
504 free_entries = mask + cons_tail - prod_head;
506 /* check that we have enough room in ring */
507 if (unlikely(n > free_entries)) {
508 if (behavior == RTE_RING_QUEUE_FIXED)
511 /* No free entry available */
512 if (unlikely(free_entries == 0))
518 prod_next = prod_head + n;
519 r->prod.head = prod_next;
521 /* write entries in ring */
525 /* if we exceed the watermark */
526 if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
527 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
528 (int)(n | RTE_RING_QUOT_EXCEED);
530 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
532 r->prod.tail = prod_next;
537 * @internal Dequeue several objects from a ring (multi-consumers safe). When
538 * the request objects are more than the available objects, only dequeue the
539 * actual number of objects
541 * This function uses a "compare and set" instruction to move the
542 * consumer index atomically.
545 * A pointer to the ring structure.
547 * A pointer to a table of void * pointers (objects) that will be filled.
549 * The number of objects to dequeue from the ring to the obj_table.
551 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
552 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
554 * Depend on the behavior value
555 * if behavior = RTE_RING_QUEUE_FIXED
556 * - 0: Success; objects dequeued.
557 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
559 * if behavior = RTE_RING_QUEUE_VARIABLE
560 * - n: Actual number of objects dequeued.
563 static inline int __attribute__((always_inline))
564 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
565 unsigned n, enum rte_ring_queue_behavior behavior)
567 uint32_t cons_head, prod_tail;
568 uint32_t cons_next, entries;
569 const unsigned max = n;
572 uint32_t mask = r->mask;
574 /* Avoid the unnecessary cmpset operation below, which is also
575 * potentially harmful when n equals 0. */
579 /* move cons.head atomically */
581 /* Restore n as it may change every loop */
584 cons_head = r->cons.head;
585 prod_tail = r->prod.tail;
586 /* The subtraction is done between two unsigned 32bits value
587 * (the result is always modulo 32 bits even if we have
588 * cons_head > prod_tail). So 'entries' is always between 0
589 * and size(ring)-1. */
590 entries = (prod_tail - cons_head);
592 /* Set the actual entries for dequeue */
594 if (behavior == RTE_RING_QUEUE_FIXED)
597 if (unlikely(entries == 0))
603 cons_next = cons_head + n;
604 success = rte_atomic32_cmpset(&r->cons.head, cons_head,
606 } while (unlikely(success == 0));
613 * If there are other dequeues in progress that preceded us,
614 * we need to wait for them to complete
616 while (unlikely(r->cons.tail != cons_head)) {
619 /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
620 * for other thread finish. It gives pre-empted thread a chance
621 * to proceed and finish with ring dequeue operation. */
622 if (RTE_RING_PAUSE_REP_COUNT &&
623 ++rep == RTE_RING_PAUSE_REP_COUNT) {
628 r->cons.tail = cons_next;
630 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
634 * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
635 * When the request objects are more than the available objects, only dequeue
636 * the actual number of objects
639 * A pointer to the ring structure.
641 * A pointer to a table of void * pointers (objects) that will be filled.
643 * The number of objects to dequeue from the ring to the obj_table.
645 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
646 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
648 * Depend on the behavior value
649 * if behavior = RTE_RING_QUEUE_FIXED
650 * - 0: Success; objects dequeued.
651 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
653 * if behavior = RTE_RING_QUEUE_VARIABLE
654 * - n: Actual number of objects dequeued.
656 static inline int __attribute__((always_inline))
657 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
658 unsigned n, enum rte_ring_queue_behavior behavior)
660 uint32_t cons_head, prod_tail;
661 uint32_t cons_next, entries;
663 uint32_t mask = r->mask;
665 cons_head = r->cons.head;
666 prod_tail = r->prod.tail;
667 /* The subtraction is done between two unsigned 32bits value
668 * (the result is always modulo 32 bits even if we have
669 * cons_head > prod_tail). So 'entries' is always between 0
670 * and size(ring)-1. */
671 entries = prod_tail - cons_head;
674 if (behavior == RTE_RING_QUEUE_FIXED)
677 if (unlikely(entries == 0))
683 cons_next = cons_head + n;
684 r->cons.head = cons_next;
690 r->cons.tail = cons_next;
691 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
695 * Enqueue several objects on the ring (multi-producers safe).
697 * This function uses a "compare and set" instruction to move the
698 * producer index atomically.
701 * A pointer to the ring structure.
703 * A pointer to a table of void * pointers (objects).
705 * The number of objects to add in the ring from the obj_table.
707 * - 0: Success; objects enqueue.
708 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
709 * high water mark is exceeded.
710 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
712 static inline int __attribute__((always_inline))
713 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
716 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
720 * Enqueue several objects on a ring (NOT multi-producers safe).
723 * A pointer to the ring structure.
725 * A pointer to a table of void * pointers (objects).
727 * The number of objects to add in the ring from the obj_table.
729 * - 0: Success; objects enqueued.
730 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
731 * high water mark is exceeded.
732 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
734 static inline int __attribute__((always_inline))
735 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
738 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
742 * Enqueue several objects on a ring.
744 * This function calls the multi-producer or the single-producer
745 * version depending on the default behavior that was specified at
746 * ring creation time (see flags).
749 * A pointer to the ring structure.
751 * A pointer to a table of void * pointers (objects).
753 * The number of objects to add in the ring from the obj_table.
755 * - 0: Success; objects enqueued.
756 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
757 * high water mark is exceeded.
758 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
760 static inline int __attribute__((always_inline))
761 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
765 return rte_ring_sp_enqueue_bulk(r, obj_table, n);
767 return rte_ring_mp_enqueue_bulk(r, obj_table, n);
771 * Enqueue one object on a ring (multi-producers safe).
773 * This function uses a "compare and set" instruction to move the
774 * producer index atomically.
777 * A pointer to the ring structure.
779 * A pointer to the object to be added.
781 * - 0: Success; objects enqueued.
782 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
783 * high water mark is exceeded.
784 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
786 static inline int __attribute__((always_inline))
787 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
789 return rte_ring_mp_enqueue_bulk(r, &obj, 1);
793 * Enqueue one object on a ring (NOT multi-producers safe).
796 * A pointer to the ring structure.
798 * A pointer to the object to be added.
800 * - 0: Success; objects enqueued.
801 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
802 * high water mark is exceeded.
803 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
805 static inline int __attribute__((always_inline))
806 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
808 return rte_ring_sp_enqueue_bulk(r, &obj, 1);
812 * Enqueue one object on a ring.
814 * This function calls the multi-producer or the single-producer
815 * version, depending on the default behaviour that was specified at
816 * ring creation time (see flags).
819 * A pointer to the ring structure.
821 * A pointer to the object to be added.
823 * - 0: Success; objects enqueued.
824 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
825 * high water mark is exceeded.
826 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
828 static inline int __attribute__((always_inline))
829 rte_ring_enqueue(struct rte_ring *r, void *obj)
832 return rte_ring_sp_enqueue(r, obj);
834 return rte_ring_mp_enqueue(r, obj);
838 * Dequeue several objects from a ring (multi-consumers safe).
840 * This function uses a "compare and set" instruction to move the
841 * consumer index atomically.
844 * A pointer to the ring structure.
846 * A pointer to a table of void * pointers (objects) that will be filled.
848 * The number of objects to dequeue from the ring to the obj_table.
850 * - 0: Success; objects dequeued.
851 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
854 static inline int __attribute__((always_inline))
855 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
857 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
861 * Dequeue several objects from a ring (NOT multi-consumers safe).
864 * A pointer to the ring structure.
866 * A pointer to a table of void * pointers (objects) that will be filled.
868 * The number of objects to dequeue from the ring to the obj_table,
869 * must be strictly positive.
871 * - 0: Success; objects dequeued.
872 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
875 static inline int __attribute__((always_inline))
876 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
878 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
882 * Dequeue several objects from a ring.
884 * This function calls the multi-consumers or the single-consumer
885 * version, depending on the default behaviour that was specified at
886 * ring creation time (see flags).
889 * A pointer to the ring structure.
891 * A pointer to a table of void * pointers (objects) that will be filled.
893 * The number of objects to dequeue from the ring to the obj_table.
895 * - 0: Success; objects dequeued.
896 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
899 static inline int __attribute__((always_inline))
900 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
903 return rte_ring_sc_dequeue_bulk(r, obj_table, n);
905 return rte_ring_mc_dequeue_bulk(r, obj_table, n);
909 * Dequeue one object from a ring (multi-consumers safe).
911 * This function uses a "compare and set" instruction to move the
912 * consumer index atomically.
915 * A pointer to the ring structure.
917 * A pointer to a void * pointer (object) that will be filled.
919 * - 0: Success; objects dequeued.
920 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
923 static inline int __attribute__((always_inline))
924 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
926 return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
930 * Dequeue one object from a ring (NOT multi-consumers safe).
933 * A pointer to the ring structure.
935 * A pointer to a void * pointer (object) that will be filled.
937 * - 0: Success; objects dequeued.
938 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
941 static inline int __attribute__((always_inline))
942 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
944 return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
948 * Dequeue one object from a ring.
950 * This function calls the multi-consumers or the single-consumer
951 * version depending on the default behaviour that was specified at
952 * ring creation time (see flags).
955 * A pointer to the ring structure.
957 * A pointer to a void * pointer (object) that will be filled.
959 * - 0: Success, objects dequeued.
960 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
963 static inline int __attribute__((always_inline))
964 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
967 return rte_ring_sc_dequeue(r, obj_p);
969 return rte_ring_mc_dequeue(r, obj_p);
973 * Test if a ring is full.
976 * A pointer to the ring structure.
978 * - 1: The ring is full.
979 * - 0: The ring is not full.
982 rte_ring_full(const struct rte_ring *r)
984 uint32_t prod_tail = r->prod.tail;
985 uint32_t cons_tail = r->cons.tail;
986 return ((cons_tail - prod_tail - 1) & r->mask) == 0;
990 * Test if a ring is empty.
993 * A pointer to the ring structure.
995 * - 1: The ring is empty.
996 * - 0: The ring is not empty.
999 rte_ring_empty(const struct rte_ring *r)
1001 uint32_t prod_tail = r->prod.tail;
1002 uint32_t cons_tail = r->cons.tail;
1003 return !!(cons_tail == prod_tail);
1007 * Return the number of entries in a ring.
1010 * A pointer to the ring structure.
1012 * The number of entries in the ring.
1014 static inline unsigned
1015 rte_ring_count(const struct rte_ring *r)
1017 uint32_t prod_tail = r->prod.tail;
1018 uint32_t cons_tail = r->cons.tail;
1019 return (prod_tail - cons_tail) & r->mask;
1023 * Return the number of free entries in a ring.
1026 * A pointer to the ring structure.
1028 * The number of free entries in the ring.
1030 static inline unsigned
1031 rte_ring_free_count(const struct rte_ring *r)
1033 uint32_t prod_tail = r->prod.tail;
1034 uint32_t cons_tail = r->cons.tail;
1035 return (cons_tail - prod_tail - 1) & r->mask;
1039 * Return the size of the ring.
1042 * A pointer to the ring structure.
1044 * The number of elements which can be stored in the ring.
1046 static inline unsigned int
1047 rte_ring_get_size(const struct rte_ring *r)
1053 * Dump the status of all rings on the console
1056 * A pointer to a file for output
1058 void rte_ring_list_dump(FILE *f);
1061 * Search a ring from its name
1064 * The name of the ring.
1066 * The pointer to the ring matching the name, or NULL if not found,
1067 * with rte_errno set appropriately. Possible rte_errno values include:
1068 * - ENOENT - required entry not available to return.
1070 struct rte_ring *rte_ring_lookup(const char *name);
1073 * Enqueue several objects on the ring (multi-producers safe).
1075 * This function uses a "compare and set" instruction to move the
1076 * producer index atomically.
1079 * A pointer to the ring structure.
1081 * A pointer to a table of void * pointers (objects).
1083 * The number of objects to add in the ring from the obj_table.
1085 * - n: Actual number of objects enqueued.
1087 static inline unsigned __attribute__((always_inline))
1088 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1091 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1095 * Enqueue several objects on a ring (NOT multi-producers safe).
1098 * A pointer to the ring structure.
1100 * A pointer to a table of void * pointers (objects).
1102 * The number of objects to add in the ring from the obj_table.
1104 * - n: Actual number of objects enqueued.
1106 static inline unsigned __attribute__((always_inline))
1107 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1110 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1114 * Enqueue several objects on a ring.
1116 * This function calls the multi-producer or the single-producer
1117 * version depending on the default behavior that was specified at
1118 * ring creation time (see flags).
1121 * A pointer to the ring structure.
1123 * A pointer to a table of void * pointers (objects).
1125 * The number of objects to add in the ring from the obj_table.
1127 * - n: Actual number of objects enqueued.
1129 static inline unsigned __attribute__((always_inline))
1130 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1134 return rte_ring_sp_enqueue_burst(r, obj_table, n);
1136 return rte_ring_mp_enqueue_burst(r, obj_table, n);
1140 * Dequeue several objects from a ring (multi-consumers safe). When the request
1141 * objects are more than the available objects, only dequeue the actual number
1144 * This function uses a "compare and set" instruction to move the
1145 * consumer index atomically.
1148 * A pointer to the ring structure.
1150 * A pointer to a table of void * pointers (objects) that will be filled.
1152 * The number of objects to dequeue from the ring to the obj_table.
1154 * - n: Actual number of objects dequeued, 0 if ring is empty
1156 static inline unsigned __attribute__((always_inline))
1157 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1159 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1163 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1164 * request objects are more than the available objects, only dequeue the
1165 * actual number of objects
1168 * A pointer to the ring structure.
1170 * A pointer to a table of void * pointers (objects) that will be filled.
1172 * The number of objects to dequeue from the ring to the obj_table.
1174 * - n: Actual number of objects dequeued, 0 if ring is empty
1176 static inline unsigned __attribute__((always_inline))
1177 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1179 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1183 * Dequeue multiple objects from a ring up to a maximum number.
1185 * This function calls the multi-consumers or the single-consumer
1186 * version, depending on the default behaviour that was specified at
1187 * ring creation time (see flags).
1190 * A pointer to the ring structure.
1192 * A pointer to a table of void * pointers (objects) that will be filled.
1194 * The number of objects to dequeue from the ring to the obj_table.
1196 * - Number of objects dequeued
1198 static inline unsigned __attribute__((always_inline))
1199 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1202 return rte_ring_sc_dequeue_burst(r, obj_table, n);
1204 return rte_ring_mc_dequeue_burst(r, obj_table, n);
1211 #endif /* _RTE_RING_H_ */