4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Derived from FreeBSD's bufring.h
38 **************************************************************************
40 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
41 * All rights reserved.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions are met:
46 * 1. Redistributions of source code must retain the above copyright notice,
47 * this list of conditions and the following disclaimer.
49 * 2. The name of Kip Macy nor the names of other
50 * contributors may be used to endorse or promote products derived from
51 * this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
54 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
57 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
65 ***************************************************************************/
74 * The Ring Manager is a fixed-size queue, implemented as a table of
75 * pointers. Head and tail pointers are modified atomically, allowing
76 * concurrent access to it. It has the following features:
78 * - FIFO (First In First Out)
79 * - Maximum size is fixed; the pointers are stored in a table.
80 * - Lockless implementation.
81 * - Multi- or single-consumer dequeue.
82 * - Multi- or single-producer enqueue.
86 * Note: the ring implementation is not preemptable. A lcore must not
87 * be interrupted by another task that uses the same ring.
96 #include <sys/queue.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
104 enum rte_ring_queue_behavior {
105 RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
106 RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
109 #ifdef RTE_LIBRTE_RING_DEBUG
111 * A structure that stores the ring statistics (per-lcore).
113 struct rte_ring_debug_stats {
114 uint64_t enq_success_bulk; /**< Successful enqueues number. */
115 uint64_t enq_success_objs; /**< Objects successfully enqueued. */
116 uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
117 uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
118 uint64_t enq_fail_bulk; /**< Failed enqueues number. */
119 uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
120 uint64_t deq_success_bulk; /**< Successful dequeues number. */
121 uint64_t deq_success_objs; /**< Objects successfully dequeued. */
122 uint64_t deq_fail_bulk; /**< Failed dequeues number. */
123 uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
124 } __rte_cache_aligned;
127 #define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
130 * An RTE ring structure.
132 * The producer and the consumer have a head and a tail index. The particularity
133 * of these index is that they are not between 0 and size(ring). These indexes
134 * are between 0 and 2^32, and we mask their value when we access the ring[]
135 * field. Thanks to this assumption, we can do subtractions between 2 index
136 * values in a modulo-32bit base: that's why the overflow of the indexes is not
140 TAILQ_ENTRY(rte_ring) next; /**< Next in list. */
142 char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
143 int flags; /**< Flags supplied at creation. */
145 /** Ring producer status. */
147 uint32_t watermark; /**< Maximum items before EDQUOT. */
148 uint32_t sp_enqueue; /**< True, if single producer. */
149 uint32_t size; /**< Size of ring. */
150 uint32_t mask; /**< Mask (size-1) of ring. */
151 volatile uint32_t head; /**< Producer head. */
152 volatile uint32_t tail; /**< Producer tail. */
153 } prod __rte_cache_aligned;
155 /** Ring consumer status. */
157 uint32_t sc_dequeue; /**< True, if single consumer. */
158 uint32_t size; /**< Size of the ring. */
159 uint32_t mask; /**< Mask (size-1) of ring. */
160 volatile uint32_t head; /**< Consumer head. */
161 volatile uint32_t tail; /**< Consumer tail. */
162 #ifdef RTE_RING_SPLIT_PROD_CONS
163 } cons __rte_cache_aligned;
168 #ifdef RTE_LIBRTE_RING_DEBUG
169 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
172 void * ring[0] __rte_cache_aligned; /**< Memory space of ring starts here.
173 * not volatile so need to be careful
174 * about compiler re-ordering */
177 /* dummy assembly operation to prevent compiler re-ordering of instructions */
178 #define COMPILER_BARRIER() do { asm volatile("" ::: "memory"); } while(0)
180 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
181 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
182 #define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
183 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
186 * @internal When debug is enabled, store ring statistics.
188 * A pointer to the ring.
190 * The name of the statistics field to increment in the ring.
192 * The number to add to the object-oriented statistics.
194 #ifdef RTE_LIBRTE_RING_DEBUG
195 #define __RING_STAT_ADD(r, name, n) do { \
196 unsigned __lcore_id = rte_lcore_id(); \
197 r->stats[__lcore_id].name##_objs += n; \
198 r->stats[__lcore_id].name##_bulk += 1; \
201 #define __RING_STAT_ADD(r, name, n) do {} while(0)
205 * Create a new ring named *name* in memory.
207 * This function uses ``memzone_reserve()`` to allocate memory. Its size is
208 * set to *count*, which must be a power of two. Water marking is
209 * disabled by default.
210 * Note that the real usable ring size is *count-1* instead of
214 * The name of the ring.
216 * The size of the ring (must be a power of 2).
218 * The *socket_id* argument is the socket identifier in case of
219 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
220 * constraint for the reserved zone.
222 * An OR of the following:
223 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
224 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
225 * is "single-producer". Otherwise, it is "multi-producers".
226 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
227 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
228 * is "single-consumer". Otherwise, it is "multi-consumers".
230 * On success, the pointer to the new allocated ring. NULL on error with
231 * rte_errno set appropriately. Possible errno values include:
232 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
233 * - E_RTE_SECONDARY - function was called from a secondary process instance
234 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
235 * - EINVAL - count provided is not a power of 2
236 * - ENOSPC - the maximum number of memzones has already been allocated
237 * - EEXIST - a memzone with the same name already exists
238 * - ENOMEM - no appropriate memory area found in which to create memzone
240 struct rte_ring *rte_ring_create(const char *name, unsigned count,
241 int socket_id, unsigned flags);
244 * Change the high water mark.
246 * If *count* is 0, water marking is disabled. Otherwise, it is set to the
247 * *count* value. The *count* value must be greater than 0 and less
248 * than the ring size.
250 * This function can be called at any time (not necessarily at
254 * A pointer to the ring structure.
256 * The new water mark value.
258 * - 0: Success; water mark changed.
259 * - -EINVAL: Invalid water mark value.
261 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
264 * Dump the status of the ring to the console.
267 * A pointer to the ring structure.
269 void rte_ring_dump(const struct rte_ring *r);
271 /* the actual enqueue of pointers on the ring.
272 * Placed here since identical code needed in both
273 * single and multi producer enqueue functions */
274 #define ENQUEUE_PTRS() do { \
275 const uint32_t size = r->prod.size; \
276 uint32_t idx = prod_head & mask; \
277 if (likely(idx + n < size)) { \
278 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
279 r->ring[idx] = obj_table[i]; \
280 r->ring[idx+1] = obj_table[i+1]; \
281 r->ring[idx+2] = obj_table[i+2]; \
282 r->ring[idx+3] = obj_table[i+3]; \
285 case 3: r->ring[idx++] = obj_table[i++]; \
286 case 2: r->ring[idx++] = obj_table[i++]; \
287 case 1: r->ring[idx++] = obj_table[i++]; \
290 for (i = 0; idx < size; i++, idx++)\
291 r->ring[idx] = obj_table[i]; \
292 for (idx = 0; i < n; i++, idx++) \
293 r->ring[idx] = obj_table[i]; \
297 /* the actual copy of pointers on the ring to obj_table.
298 * Placed here since identical code needed in both
299 * single and multi consumer dequeue functions */
300 #define DEQUEUE_PTRS() do { \
301 uint32_t idx = cons_head & mask; \
302 const uint32_t size = r->cons.size; \
303 if (likely(idx + n < size)) { \
304 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
305 obj_table[i] = r->ring[idx]; \
306 obj_table[i+1] = r->ring[idx+1]; \
307 obj_table[i+2] = r->ring[idx+2]; \
308 obj_table[i+3] = r->ring[idx+3]; \
311 case 3: obj_table[i++] = r->ring[idx++]; \
312 case 2: obj_table[i++] = r->ring[idx++]; \
313 case 1: obj_table[i++] = r->ring[idx++]; \
316 for (i = 0; idx < size; i++, idx++) \
317 obj_table[i] = r->ring[idx]; \
318 for (idx = 0; i < n; i++, idx++) \
319 obj_table[i] = r->ring[idx]; \
324 * @internal Enqueue several objects on the ring (multi-producers safe).
326 * This function uses a "compare and set" instruction to move the
327 * producer index atomically.
330 * A pointer to the ring structure.
332 * A pointer to a table of void * pointers (objects).
334 * The number of objects to add in the ring from the obj_table.
336 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
337 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
339 * Depend on the behavior value
340 * if behavior = RTE_RING_QUEUE_FIXED
341 * - 0: Success; objects enqueue.
342 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
343 * high water mark is exceeded.
344 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
345 * if behavior = RTE_RING_QUEUE_VARIABLE
346 * - n: Actual number of objects enqueued.
348 static inline int __attribute__((always_inline))
349 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
350 unsigned n, enum rte_ring_queue_behavior behavior)
352 uint32_t prod_head, prod_next;
353 uint32_t cons_tail, free_entries;
354 const unsigned max = n;
357 uint32_t mask = r->prod.mask;
360 /* move prod.head atomically */
362 /* Reset n to the initial burst count */
365 prod_head = r->prod.head;
366 cons_tail = r->cons.tail;
367 /* The subtraction is done between two unsigned 32bits value
368 * (the result is always modulo 32 bits even if we have
369 * prod_head > cons_tail). So 'free_entries' is always between 0
370 * and size(ring)-1. */
371 free_entries = (mask + cons_tail - prod_head);
373 /* check that we have enough room in ring */
374 if (unlikely(n > free_entries)) {
375 if (behavior == RTE_RING_QUEUE_FIXED) {
376 __RING_STAT_ADD(r, enq_fail, n);
380 /* No free entry available */
381 if (unlikely(free_entries == 0)) {
382 __RING_STAT_ADD(r, enq_fail, n);
390 prod_next = prod_head + n;
391 success = rte_atomic32_cmpset(&r->prod.head, prod_head,
393 } while (unlikely(success == 0));
395 /* write entries in ring */
399 /* if we exceed the watermark */
400 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
401 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
402 (int)(n | RTE_RING_QUOT_EXCEED);
403 __RING_STAT_ADD(r, enq_quota, n);
406 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
407 __RING_STAT_ADD(r, enq_success, n);
411 * If there are other enqueues in progress that preceeded us,
412 * we need to wait for them to complete
414 while (unlikely(r->prod.tail != prod_head))
417 r->prod.tail = prod_next;
422 * @internal Enqueue several objects on a ring (NOT multi-producers safe).
425 * A pointer to the ring structure.
427 * A pointer to a table of void * pointers (objects).
429 * The number of objects to add in the ring from the obj_table.
431 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
432 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
434 * Depend on the behavior value
435 * if behavior = RTE_RING_QUEUE_FIXED
436 * - 0: Success; objects enqueue.
437 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
438 * high water mark is exceeded.
439 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
440 * if behavior = RTE_RING_QUEUE_VARIABLE
441 * - n: Actual number of objects enqueued.
443 static inline int __attribute__((always_inline))
444 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
445 unsigned n, enum rte_ring_queue_behavior behavior)
447 uint32_t prod_head, cons_tail;
448 uint32_t prod_next, free_entries;
450 uint32_t mask = r->prod.mask;
453 prod_head = r->prod.head;
454 cons_tail = r->cons.tail;
455 /* The subtraction is done between two unsigned 32bits value
456 * (the result is always modulo 32 bits even if we have
457 * prod_head > cons_tail). So 'free_entries' is always between 0
458 * and size(ring)-1. */
459 free_entries = mask + cons_tail - prod_head;
461 /* check that we have enough room in ring */
462 if (unlikely(n > free_entries)) {
463 if (behavior == RTE_RING_QUEUE_FIXED) {
464 __RING_STAT_ADD(r, enq_fail, n);
468 /* No free entry available */
469 if (unlikely(free_entries == 0)) {
470 __RING_STAT_ADD(r, enq_fail, n);
478 prod_next = prod_head + n;
479 r->prod.head = prod_next;
481 /* write entries in ring */
485 /* if we exceed the watermark */
486 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
487 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
488 (int)(n | RTE_RING_QUOT_EXCEED);
489 __RING_STAT_ADD(r, enq_quota, n);
492 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
493 __RING_STAT_ADD(r, enq_success, n);
496 r->prod.tail = prod_next;
501 * @internal Dequeue several objects from a ring (multi-consumers safe). When
502 * the request objects are more than the available objects, only dequeue the
503 * actual number of objects
505 * This function uses a "compare and set" instruction to move the
506 * consumer index atomically.
509 * A pointer to the ring structure.
511 * A pointer to a table of void * pointers (objects) that will be filled.
513 * The number of objects to dequeue from the ring to the obj_table.
515 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
516 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
518 * Depend on the behavior value
519 * if behavior = RTE_RING_QUEUE_FIXED
520 * - 0: Success; objects dequeued.
521 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
523 * if behavior = RTE_RING_QUEUE_VARIABLE
524 * - n: Actual number of objects dequeued.
527 static inline int __attribute__((always_inline))
528 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
529 unsigned n, enum rte_ring_queue_behavior behavior)
531 uint32_t cons_head, prod_tail;
532 uint32_t cons_next, entries;
533 const unsigned max = n;
536 uint32_t mask = r->prod.mask;
538 /* move cons.head atomically */
540 /* Restore n as it may change every loop */
543 cons_head = r->cons.head;
544 prod_tail = r->prod.tail;
545 /* The subtraction is done between two unsigned 32bits value
546 * (the result is always modulo 32 bits even if we have
547 * cons_head > prod_tail). So 'entries' is always between 0
548 * and size(ring)-1. */
549 entries = (prod_tail - cons_head);
551 /* Set the actual entries for dequeue */
553 if (behavior == RTE_RING_QUEUE_FIXED) {
554 __RING_STAT_ADD(r, deq_fail, n);
558 if (unlikely(entries == 0)){
559 __RING_STAT_ADD(r, deq_fail, n);
567 cons_next = cons_head + n;
568 success = rte_atomic32_cmpset(&r->cons.head, cons_head,
570 } while (unlikely(success == 0));
577 * If there are other dequeues in progress that preceded us,
578 * we need to wait for them to complete
580 while (unlikely(r->cons.tail != cons_head))
583 __RING_STAT_ADD(r, deq_success, n);
584 r->cons.tail = cons_next;
586 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
590 * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
591 * When the request objects are more than the available objects, only dequeue
592 * the actual number of objects
595 * A pointer to the ring structure.
597 * A pointer to a table of void * pointers (objects) that will be filled.
599 * The number of objects to dequeue from the ring to the obj_table.
601 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
602 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
604 * Depend on the behavior value
605 * if behavior = RTE_RING_QUEUE_FIXED
606 * - 0: Success; objects dequeued.
607 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
609 * if behavior = RTE_RING_QUEUE_VARIABLE
610 * - n: Actual number of objects dequeued.
612 static inline int __attribute__((always_inline))
613 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
614 unsigned n, enum rte_ring_queue_behavior behavior)
616 uint32_t cons_head, prod_tail;
617 uint32_t cons_next, entries;
619 uint32_t mask = r->prod.mask;
621 cons_head = r->cons.head;
622 prod_tail = r->prod.tail;
623 /* The subtraction is done between two unsigned 32bits value
624 * (the result is always modulo 32 bits even if we have
625 * cons_head > prod_tail). So 'entries' is always between 0
626 * and size(ring)-1. */
627 entries = prod_tail - cons_head;
630 if (behavior == RTE_RING_QUEUE_FIXED) {
631 __RING_STAT_ADD(r, deq_fail, n);
635 if (unlikely(entries == 0)){
636 __RING_STAT_ADD(r, deq_fail, n);
644 cons_next = cons_head + n;
645 r->cons.head = cons_next;
651 __RING_STAT_ADD(r, deq_success, n);
652 r->cons.tail = cons_next;
653 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
657 * Enqueue several objects on the ring (multi-producers safe).
659 * This function uses a "compare and set" instruction to move the
660 * producer index atomically.
663 * A pointer to the ring structure.
665 * A pointer to a table of void * pointers (objects).
667 * The number of objects to add in the ring from the obj_table.
669 * - 0: Success; objects enqueue.
670 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
671 * high water mark is exceeded.
672 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
674 static inline int __attribute__((always_inline))
675 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
678 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
682 * Enqueue several objects on a ring (NOT multi-producers safe).
685 * A pointer to the ring structure.
687 * A pointer to a table of void * pointers (objects).
689 * The number of objects to add in the ring from the obj_table.
691 * - 0: Success; objects enqueued.
692 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
693 * high water mark is exceeded.
694 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
696 static inline int __attribute__((always_inline))
697 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
700 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
704 * Enqueue several objects on a ring.
706 * This function calls the multi-producer or the single-producer
707 * version depending on the default behavior that was specified at
708 * ring creation time (see flags).
711 * A pointer to the ring structure.
713 * A pointer to a table of void * pointers (objects).
715 * The number of objects to add in the ring from the obj_table.
717 * - 0: Success; objects enqueued.
718 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
719 * high water mark is exceeded.
720 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
722 static inline int __attribute__((always_inline))
723 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
726 if (r->prod.sp_enqueue)
727 return rte_ring_sp_enqueue_bulk(r, obj_table, n);
729 return rte_ring_mp_enqueue_bulk(r, obj_table, n);
733 * Enqueue one object on a ring (multi-producers safe).
735 * This function uses a "compare and set" instruction to move the
736 * producer index atomically.
739 * A pointer to the ring structure.
741 * A pointer to the object to be added.
743 * - 0: Success; objects enqueued.
744 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
745 * high water mark is exceeded.
746 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
748 static inline int __attribute__((always_inline))
749 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
751 return rte_ring_mp_enqueue_bulk(r, &obj, 1);
755 * Enqueue one object on a ring (NOT multi-producers safe).
758 * A pointer to the ring structure.
760 * A pointer to the object to be added.
762 * - 0: Success; objects enqueued.
763 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
764 * high water mark is exceeded.
765 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
767 static inline int __attribute__((always_inline))
768 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
770 return rte_ring_sp_enqueue_bulk(r, &obj, 1);
774 * Enqueue one object on a ring.
776 * This function calls the multi-producer or the single-producer
777 * version, depending on the default behaviour that was specified at
778 * ring creation time (see flags).
781 * A pointer to the ring structure.
783 * A pointer to the object to be added.
785 * - 0: Success; objects enqueued.
786 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
787 * high water mark is exceeded.
788 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
790 static inline int __attribute__((always_inline))
791 rte_ring_enqueue(struct rte_ring *r, void *obj)
793 if (r->prod.sp_enqueue)
794 return rte_ring_sp_enqueue(r, obj);
796 return rte_ring_mp_enqueue(r, obj);
800 * Dequeue several objects from a ring (multi-consumers safe).
802 * This function uses a "compare and set" instruction to move the
803 * consumer index atomically.
806 * A pointer to the ring structure.
808 * A pointer to a table of void * pointers (objects) that will be filled.
810 * The number of objects to dequeue from the ring to the obj_table.
812 * - 0: Success; objects dequeued.
813 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
816 static inline int __attribute__((always_inline))
817 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
819 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
823 * Dequeue several objects from a ring (NOT multi-consumers safe).
826 * A pointer to the ring structure.
828 * A pointer to a table of void * pointers (objects) that will be filled.
830 * The number of objects to dequeue from the ring to the obj_table,
831 * must be strictly positive.
833 * - 0: Success; objects dequeued.
834 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
837 static inline int __attribute__((always_inline))
838 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
840 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
844 * Dequeue several objects from a ring.
846 * This function calls the multi-consumers or the single-consumer
847 * version, depending on the default behaviour that was specified at
848 * ring creation time (see flags).
851 * A pointer to the ring structure.
853 * A pointer to a table of void * pointers (objects) that will be filled.
855 * The number of objects to dequeue from the ring to the obj_table.
857 * - 0: Success; objects dequeued.
858 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
861 static inline int __attribute__((always_inline))
862 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
864 if (r->cons.sc_dequeue)
865 return rte_ring_sc_dequeue_bulk(r, obj_table, n);
867 return rte_ring_mc_dequeue_bulk(r, obj_table, n);
871 * Dequeue one object from a ring (multi-consumers safe).
873 * This function uses a "compare and set" instruction to move the
874 * consumer index atomically.
877 * A pointer to the ring structure.
879 * A pointer to a void * pointer (object) that will be filled.
881 * - 0: Success; objects dequeued.
882 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
885 static inline int __attribute__((always_inline))
886 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
888 return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
892 * Dequeue one object from a ring (NOT multi-consumers safe).
895 * A pointer to the ring structure.
897 * A pointer to a void * pointer (object) that will be filled.
899 * - 0: Success; objects dequeued.
900 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
903 static inline int __attribute__((always_inline))
904 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
906 return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
910 * Dequeue one object from a ring.
912 * This function calls the multi-consumers or the single-consumer
913 * version depending on the default behaviour that was specified at
914 * ring creation time (see flags).
917 * A pointer to the ring structure.
919 * A pointer to a void * pointer (object) that will be filled.
921 * - 0: Success, objects dequeued.
922 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
925 static inline int __attribute__((always_inline))
926 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
928 if (r->cons.sc_dequeue)
929 return rte_ring_sc_dequeue(r, obj_p);
931 return rte_ring_mc_dequeue(r, obj_p);
935 * Test if a ring is full.
938 * A pointer to the ring structure.
940 * - 1: The ring is full.
941 * - 0: The ring is not full.
944 rte_ring_full(const struct rte_ring *r)
946 uint32_t prod_tail = r->prod.tail;
947 uint32_t cons_tail = r->cons.tail;
948 return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
952 * Test if a ring is empty.
955 * A pointer to the ring structure.
957 * - 1: The ring is empty.
958 * - 0: The ring is not empty.
961 rte_ring_empty(const struct rte_ring *r)
963 uint32_t prod_tail = r->prod.tail;
964 uint32_t cons_tail = r->cons.tail;
965 return !!(cons_tail == prod_tail);
969 * Return the number of entries in a ring.
972 * A pointer to the ring structure.
974 * The number of entries in the ring.
976 static inline unsigned
977 rte_ring_count(const struct rte_ring *r)
979 uint32_t prod_tail = r->prod.tail;
980 uint32_t cons_tail = r->cons.tail;
981 return ((prod_tail - cons_tail) & r->prod.mask);
985 * Return the number of free entries in a ring.
988 * A pointer to the ring structure.
990 * The number of free entries in the ring.
992 static inline unsigned
993 rte_ring_free_count(const struct rte_ring *r)
995 uint32_t prod_tail = r->prod.tail;
996 uint32_t cons_tail = r->cons.tail;
997 return ((cons_tail - prod_tail - 1) & r->prod.mask);
1001 * Dump the status of all rings on the console
1003 void rte_ring_list_dump(void);
1006 * Search a ring from its name
1009 * The name of the ring.
1011 * The pointer to the ring matching the name, or NULL if not found,
1012 * with rte_errno set appropriately. Possible rte_errno values include:
1013 * - ENOENT - required entry not available to return.
1015 struct rte_ring *rte_ring_lookup(const char *name);
1018 * Enqueue several objects on the ring (multi-producers safe).
1020 * This function uses a "compare and set" instruction to move the
1021 * producer index atomically.
1024 * A pointer to the ring structure.
1026 * A pointer to a table of void * pointers (objects).
1028 * The number of objects to add in the ring from the obj_table.
1030 * - n: Actual number of objects enqueued.
1032 static inline int __attribute__((always_inline))
1033 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1036 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1040 * Enqueue several objects on a ring (NOT multi-producers safe).
1043 * A pointer to the ring structure.
1045 * A pointer to a table of void * pointers (objects).
1047 * The number of objects to add in the ring from the obj_table.
1049 * - n: Actual number of objects enqueued.
1051 static inline int __attribute__((always_inline))
1052 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1055 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1059 * Enqueue several objects on a ring.
1061 * This function calls the multi-producer or the single-producer
1062 * version depending on the default behavior that was specified at
1063 * ring creation time (see flags).
1066 * A pointer to the ring structure.
1068 * A pointer to a table of void * pointers (objects).
1070 * The number of objects to add in the ring from the obj_table.
1072 * - n: Actual number of objects enqueued.
1074 static inline int __attribute__((always_inline))
1075 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1078 if (r->prod.sp_enqueue)
1079 return rte_ring_sp_enqueue_burst(r, obj_table, n);
1081 return rte_ring_mp_enqueue_burst(r, obj_table, n);
1085 * Dequeue several objects from a ring (multi-consumers safe). When the request
1086 * objects are more than the available objects, only dequeue the actual number
1089 * This function uses a "compare and set" instruction to move the
1090 * consumer index atomically.
1093 * A pointer to the ring structure.
1095 * A pointer to a table of void * pointers (objects) that will be filled.
1097 * The number of objects to dequeue from the ring to the obj_table.
1099 * - n: Actual number of objects dequeued, 0 if ring is empty
1101 static inline int __attribute__((always_inline))
1102 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1104 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1108 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1109 * request objects are more than the available objects, only dequeue the
1110 * actual number of objects
1113 * A pointer to the ring structure.
1115 * A pointer to a table of void * pointers (objects) that will be filled.
1117 * The number of objects to dequeue from the ring to the obj_table.
1119 * - n: Actual number of objects dequeued, 0 if ring is empty
1121 static inline int __attribute__((always_inline))
1122 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1124 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1128 * Dequeue multiple objects from a ring up to a maximum number.
1130 * This function calls the multi-consumers or the single-consumer
1131 * version, depending on the default behaviour that was specified at
1132 * ring creation time (see flags).
1135 * A pointer to the ring structure.
1137 * A pointer to a table of void * pointers (objects) that will be filled.
1139 * The number of objects to dequeue from the ring to the obj_table.
1141 * - Number of objects dequeued, or a negative error code on error
1143 static inline int __attribute__((always_inline))
1144 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1146 if (r->cons.sc_dequeue)
1147 return rte_ring_sc_dequeue_burst(r, obj_table, n);
1149 return rte_ring_mc_dequeue_burst(r, obj_table, n);
1156 #endif /* _RTE_RING_H_ */