4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Derived from FreeBSD's bufring.h
37 **************************************************************************
39 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40 * All rights reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions are met:
45 * 1. Redistributions of source code must retain the above copyright notice,
46 * this list of conditions and the following disclaimer.
48 * 2. The name of Kip Macy nor the names of other
49 * contributors may be used to endorse or promote products derived from
50 * this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
64 ***************************************************************************/
73 * The Ring Manager is a fixed-size queue, implemented as a table of
74 * pointers. Head and tail pointers are modified atomically, allowing
75 * concurrent access to it. It has the following features:
77 * - FIFO (First In First Out)
78 * - Maximum size is fixed; the pointers are stored in a table.
79 * - Lockless implementation.
80 * - Multi- or single-consumer dequeue.
81 * - Multi- or single-producer enqueue.
85 * Note: the ring implementation is not preemptable. A lcore must not
86 * be interrupted by another task that uses the same ring.
95 #include <sys/queue.h>
97 #include <rte_common.h>
98 #include <rte_memory.h>
99 #include <rte_lcore.h>
100 #include <rte_atomic.h>
101 #include <rte_branch_prediction.h>
103 enum rte_ring_queue_behavior {
104 RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
105 RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
108 #ifdef RTE_LIBRTE_RING_DEBUG
110 * A structure that stores the ring statistics (per-lcore).
112 struct rte_ring_debug_stats {
113 uint64_t enq_success_bulk; /**< Successful enqueues number. */
114 uint64_t enq_success_objs; /**< Objects successfully enqueued. */
115 uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
116 uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
117 uint64_t enq_fail_bulk; /**< Failed enqueues number. */
118 uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
119 uint64_t deq_success_bulk; /**< Successful dequeues number. */
120 uint64_t deq_success_objs; /**< Objects successfully dequeued. */
121 uint64_t deq_fail_bulk; /**< Failed dequeues number. */
122 uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
123 } __rte_cache_aligned;
126 #define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
127 #define RTE_RING_MZ_PREFIX "RG_"
130 * An RTE ring structure.
132 * The producer and the consumer have a head and a tail index. The particularity
133 * of these index is that they are not between 0 and size(ring). These indexes
134 * are between 0 and 2^32, and we mask their value when we access the ring[]
135 * field. Thanks to this assumption, we can do subtractions between 2 index
136 * values in a modulo-32bit base: that's why the overflow of the indexes is not
140 TAILQ_ENTRY(rte_ring) next; /**< Next in list. */
142 char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
143 int flags; /**< Flags supplied at creation. */
145 /** Ring producer status. */
147 uint32_t watermark; /**< Maximum items before EDQUOT. */
148 uint32_t sp_enqueue; /**< True, if single producer. */
149 uint32_t size; /**< Size of ring. */
150 uint32_t mask; /**< Mask (size-1) of ring. */
151 volatile uint32_t head; /**< Producer head. */
152 volatile uint32_t tail; /**< Producer tail. */
153 } prod __rte_cache_aligned;
155 /** Ring consumer status. */
157 uint32_t sc_dequeue; /**< True, if single consumer. */
158 uint32_t size; /**< Size of the ring. */
159 uint32_t mask; /**< Mask (size-1) of ring. */
160 volatile uint32_t head; /**< Consumer head. */
161 volatile uint32_t tail; /**< Consumer tail. */
162 #ifdef RTE_RING_SPLIT_PROD_CONS
163 } cons __rte_cache_aligned;
168 #ifdef RTE_LIBRTE_RING_DEBUG
169 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
172 void * ring[0] __rte_cache_aligned; /**< Memory space of ring starts here.
173 * not volatile so need to be careful
174 * about compiler re-ordering */
177 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
178 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
179 #define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
180 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
183 * @internal When debug is enabled, store ring statistics.
185 * A pointer to the ring.
187 * The name of the statistics field to increment in the ring.
189 * The number to add to the object-oriented statistics.
191 #ifdef RTE_LIBRTE_RING_DEBUG
192 #define __RING_STAT_ADD(r, name, n) do { \
193 unsigned __lcore_id = rte_lcore_id(); \
194 r->stats[__lcore_id].name##_objs += n; \
195 r->stats[__lcore_id].name##_bulk += 1; \
198 #define __RING_STAT_ADD(r, name, n) do {} while(0)
202 * Create a new ring named *name* in memory.
204 * This function uses ``memzone_reserve()`` to allocate memory. Its size is
205 * set to *count*, which must be a power of two. Water marking is
206 * disabled by default.
207 * Note that the real usable ring size is *count-1* instead of
211 * The name of the ring.
213 * The size of the ring (must be a power of 2).
215 * The *socket_id* argument is the socket identifier in case of
216 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
217 * constraint for the reserved zone.
219 * An OR of the following:
220 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
221 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
222 * is "single-producer". Otherwise, it is "multi-producers".
223 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
224 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
225 * is "single-consumer". Otherwise, it is "multi-consumers".
227 * On success, the pointer to the new allocated ring. NULL on error with
228 * rte_errno set appropriately. Possible errno values include:
229 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
230 * - E_RTE_SECONDARY - function was called from a secondary process instance
231 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
232 * - EINVAL - count provided is not a power of 2
233 * - ENOSPC - the maximum number of memzones has already been allocated
234 * - EEXIST - a memzone with the same name already exists
235 * - ENOMEM - no appropriate memory area found in which to create memzone
237 struct rte_ring *rte_ring_create(const char *name, unsigned count,
238 int socket_id, unsigned flags);
241 * Change the high water mark.
243 * If *count* is 0, water marking is disabled. Otherwise, it is set to the
244 * *count* value. The *count* value must be greater than 0 and less
245 * than the ring size.
247 * This function can be called at any time (not necessarily at
251 * A pointer to the ring structure.
253 * The new water mark value.
255 * - 0: Success; water mark changed.
256 * - -EINVAL: Invalid water mark value.
258 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
261 * Dump the status of the ring to the console.
264 * A pointer to the ring structure.
266 void rte_ring_dump(const struct rte_ring *r);
268 /* the actual enqueue of pointers on the ring.
269 * Placed here since identical code needed in both
270 * single and multi producer enqueue functions */
271 #define ENQUEUE_PTRS() do { \
272 const uint32_t size = r->prod.size; \
273 uint32_t idx = prod_head & mask; \
274 if (likely(idx + n < size)) { \
275 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
276 r->ring[idx] = obj_table[i]; \
277 r->ring[idx+1] = obj_table[i+1]; \
278 r->ring[idx+2] = obj_table[i+2]; \
279 r->ring[idx+3] = obj_table[i+3]; \
282 case 3: r->ring[idx++] = obj_table[i++]; \
283 case 2: r->ring[idx++] = obj_table[i++]; \
284 case 1: r->ring[idx++] = obj_table[i++]; \
287 for (i = 0; idx < size; i++, idx++)\
288 r->ring[idx] = obj_table[i]; \
289 for (idx = 0; i < n; i++, idx++) \
290 r->ring[idx] = obj_table[i]; \
294 /* the actual copy of pointers on the ring to obj_table.
295 * Placed here since identical code needed in both
296 * single and multi consumer dequeue functions */
297 #define DEQUEUE_PTRS() do { \
298 uint32_t idx = cons_head & mask; \
299 const uint32_t size = r->cons.size; \
300 if (likely(idx + n < size)) { \
301 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
302 obj_table[i] = r->ring[idx]; \
303 obj_table[i+1] = r->ring[idx+1]; \
304 obj_table[i+2] = r->ring[idx+2]; \
305 obj_table[i+3] = r->ring[idx+3]; \
308 case 3: obj_table[i++] = r->ring[idx++]; \
309 case 2: obj_table[i++] = r->ring[idx++]; \
310 case 1: obj_table[i++] = r->ring[idx++]; \
313 for (i = 0; idx < size; i++, idx++) \
314 obj_table[i] = r->ring[idx]; \
315 for (idx = 0; i < n; i++, idx++) \
316 obj_table[i] = r->ring[idx]; \
321 * @internal Enqueue several objects on the ring (multi-producers safe).
323 * This function uses a "compare and set" instruction to move the
324 * producer index atomically.
327 * A pointer to the ring structure.
329 * A pointer to a table of void * pointers (objects).
331 * The number of objects to add in the ring from the obj_table.
333 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
334 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
336 * Depend on the behavior value
337 * if behavior = RTE_RING_QUEUE_FIXED
338 * - 0: Success; objects enqueue.
339 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
340 * high water mark is exceeded.
341 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
342 * if behavior = RTE_RING_QUEUE_VARIABLE
343 * - n: Actual number of objects enqueued.
345 static inline int __attribute__((always_inline))
346 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
347 unsigned n, enum rte_ring_queue_behavior behavior)
349 uint32_t prod_head, prod_next;
350 uint32_t cons_tail, free_entries;
351 const unsigned max = n;
354 uint32_t mask = r->prod.mask;
357 /* move prod.head atomically */
359 /* Reset n to the initial burst count */
362 prod_head = r->prod.head;
363 cons_tail = r->cons.tail;
364 /* The subtraction is done between two unsigned 32bits value
365 * (the result is always modulo 32 bits even if we have
366 * prod_head > cons_tail). So 'free_entries' is always between 0
367 * and size(ring)-1. */
368 free_entries = (mask + cons_tail - prod_head);
370 /* check that we have enough room in ring */
371 if (unlikely(n > free_entries)) {
372 if (behavior == RTE_RING_QUEUE_FIXED) {
373 __RING_STAT_ADD(r, enq_fail, n);
377 /* No free entry available */
378 if (unlikely(free_entries == 0)) {
379 __RING_STAT_ADD(r, enq_fail, n);
387 prod_next = prod_head + n;
388 success = rte_atomic32_cmpset(&r->prod.head, prod_head,
390 } while (unlikely(success == 0));
392 /* write entries in ring */
394 rte_compiler_barrier();
396 /* if we exceed the watermark */
397 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
398 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
399 (int)(n | RTE_RING_QUOT_EXCEED);
400 __RING_STAT_ADD(r, enq_quota, n);
403 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
404 __RING_STAT_ADD(r, enq_success, n);
408 * If there are other enqueues in progress that preceeded us,
409 * we need to wait for them to complete
411 while (unlikely(r->prod.tail != prod_head))
414 r->prod.tail = prod_next;
419 * @internal Enqueue several objects on a ring (NOT multi-producers safe).
422 * A pointer to the ring structure.
424 * A pointer to a table of void * pointers (objects).
426 * The number of objects to add in the ring from the obj_table.
428 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
429 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
431 * Depend on the behavior value
432 * if behavior = RTE_RING_QUEUE_FIXED
433 * - 0: Success; objects enqueue.
434 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
435 * high water mark is exceeded.
436 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
437 * if behavior = RTE_RING_QUEUE_VARIABLE
438 * - n: Actual number of objects enqueued.
440 static inline int __attribute__((always_inline))
441 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
442 unsigned n, enum rte_ring_queue_behavior behavior)
444 uint32_t prod_head, cons_tail;
445 uint32_t prod_next, free_entries;
447 uint32_t mask = r->prod.mask;
450 prod_head = r->prod.head;
451 cons_tail = r->cons.tail;
452 /* The subtraction is done between two unsigned 32bits value
453 * (the result is always modulo 32 bits even if we have
454 * prod_head > cons_tail). So 'free_entries' is always between 0
455 * and size(ring)-1. */
456 free_entries = mask + cons_tail - prod_head;
458 /* check that we have enough room in ring */
459 if (unlikely(n > free_entries)) {
460 if (behavior == RTE_RING_QUEUE_FIXED) {
461 __RING_STAT_ADD(r, enq_fail, n);
465 /* No free entry available */
466 if (unlikely(free_entries == 0)) {
467 __RING_STAT_ADD(r, enq_fail, n);
475 prod_next = prod_head + n;
476 r->prod.head = prod_next;
478 /* write entries in ring */
480 rte_compiler_barrier();
482 /* if we exceed the watermark */
483 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
484 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
485 (int)(n | RTE_RING_QUOT_EXCEED);
486 __RING_STAT_ADD(r, enq_quota, n);
489 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
490 __RING_STAT_ADD(r, enq_success, n);
493 r->prod.tail = prod_next;
498 * @internal Dequeue several objects from a ring (multi-consumers safe). When
499 * the request objects are more than the available objects, only dequeue the
500 * actual number of objects
502 * This function uses a "compare and set" instruction to move the
503 * consumer index atomically.
506 * A pointer to the ring structure.
508 * A pointer to a table of void * pointers (objects) that will be filled.
510 * The number of objects to dequeue from the ring to the obj_table.
512 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
513 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
515 * Depend on the behavior value
516 * if behavior = RTE_RING_QUEUE_FIXED
517 * - 0: Success; objects dequeued.
518 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
520 * if behavior = RTE_RING_QUEUE_VARIABLE
521 * - n: Actual number of objects dequeued.
524 static inline int __attribute__((always_inline))
525 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
526 unsigned n, enum rte_ring_queue_behavior behavior)
528 uint32_t cons_head, prod_tail;
529 uint32_t cons_next, entries;
530 const unsigned max = n;
533 uint32_t mask = r->prod.mask;
535 /* move cons.head atomically */
537 /* Restore n as it may change every loop */
540 cons_head = r->cons.head;
541 prod_tail = r->prod.tail;
542 /* The subtraction is done between two unsigned 32bits value
543 * (the result is always modulo 32 bits even if we have
544 * cons_head > prod_tail). So 'entries' is always between 0
545 * and size(ring)-1. */
546 entries = (prod_tail - cons_head);
548 /* Set the actual entries for dequeue */
550 if (behavior == RTE_RING_QUEUE_FIXED) {
551 __RING_STAT_ADD(r, deq_fail, n);
555 if (unlikely(entries == 0)){
556 __RING_STAT_ADD(r, deq_fail, n);
564 cons_next = cons_head + n;
565 success = rte_atomic32_cmpset(&r->cons.head, cons_head,
567 } while (unlikely(success == 0));
571 rte_compiler_barrier();
574 * If there are other dequeues in progress that preceded us,
575 * we need to wait for them to complete
577 while (unlikely(r->cons.tail != cons_head))
580 __RING_STAT_ADD(r, deq_success, n);
581 r->cons.tail = cons_next;
583 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
587 * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
588 * When the request objects are more than the available objects, only dequeue
589 * the actual number of objects
592 * A pointer to the ring structure.
594 * A pointer to a table of void * pointers (objects) that will be filled.
596 * The number of objects to dequeue from the ring to the obj_table.
598 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
599 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
601 * Depend on the behavior value
602 * if behavior = RTE_RING_QUEUE_FIXED
603 * - 0: Success; objects dequeued.
604 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
606 * if behavior = RTE_RING_QUEUE_VARIABLE
607 * - n: Actual number of objects dequeued.
609 static inline int __attribute__((always_inline))
610 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
611 unsigned n, enum rte_ring_queue_behavior behavior)
613 uint32_t cons_head, prod_tail;
614 uint32_t cons_next, entries;
616 uint32_t mask = r->prod.mask;
618 cons_head = r->cons.head;
619 prod_tail = r->prod.tail;
620 /* The subtraction is done between two unsigned 32bits value
621 * (the result is always modulo 32 bits even if we have
622 * cons_head > prod_tail). So 'entries' is always between 0
623 * and size(ring)-1. */
624 entries = prod_tail - cons_head;
627 if (behavior == RTE_RING_QUEUE_FIXED) {
628 __RING_STAT_ADD(r, deq_fail, n);
632 if (unlikely(entries == 0)){
633 __RING_STAT_ADD(r, deq_fail, n);
641 cons_next = cons_head + n;
642 r->cons.head = cons_next;
646 rte_compiler_barrier();
648 __RING_STAT_ADD(r, deq_success, n);
649 r->cons.tail = cons_next;
650 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
654 * Enqueue several objects on the ring (multi-producers safe).
656 * This function uses a "compare and set" instruction to move the
657 * producer index atomically.
660 * A pointer to the ring structure.
662 * A pointer to a table of void * pointers (objects).
664 * The number of objects to add in the ring from the obj_table.
666 * - 0: Success; objects enqueue.
667 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
668 * high water mark is exceeded.
669 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
671 static inline int __attribute__((always_inline))
672 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
675 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
679 * Enqueue several objects on a ring (NOT multi-producers safe).
682 * A pointer to the ring structure.
684 * A pointer to a table of void * pointers (objects).
686 * The number of objects to add in the ring from the obj_table.
688 * - 0: Success; objects enqueued.
689 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
690 * high water mark is exceeded.
691 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
693 static inline int __attribute__((always_inline))
694 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
697 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
701 * Enqueue several objects on a ring.
703 * This function calls the multi-producer or the single-producer
704 * version depending on the default behavior that was specified at
705 * ring creation time (see flags).
708 * A pointer to the ring structure.
710 * A pointer to a table of void * pointers (objects).
712 * The number of objects to add in the ring from the obj_table.
714 * - 0: Success; objects enqueued.
715 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
716 * high water mark is exceeded.
717 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
719 static inline int __attribute__((always_inline))
720 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
723 if (r->prod.sp_enqueue)
724 return rte_ring_sp_enqueue_bulk(r, obj_table, n);
726 return rte_ring_mp_enqueue_bulk(r, obj_table, n);
730 * Enqueue one object on a ring (multi-producers safe).
732 * This function uses a "compare and set" instruction to move the
733 * producer index atomically.
736 * A pointer to the ring structure.
738 * A pointer to the object to be added.
740 * - 0: Success; objects enqueued.
741 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
742 * high water mark is exceeded.
743 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
745 static inline int __attribute__((always_inline))
746 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
748 return rte_ring_mp_enqueue_bulk(r, &obj, 1);
752 * Enqueue one object on a ring (NOT multi-producers safe).
755 * A pointer to the ring structure.
757 * A pointer to the object to be added.
759 * - 0: Success; objects enqueued.
760 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
761 * high water mark is exceeded.
762 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
764 static inline int __attribute__((always_inline))
765 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
767 return rte_ring_sp_enqueue_bulk(r, &obj, 1);
771 * Enqueue one object on a ring.
773 * This function calls the multi-producer or the single-producer
774 * version, depending on the default behaviour that was specified at
775 * ring creation time (see flags).
778 * A pointer to the ring structure.
780 * A pointer to the object to be added.
782 * - 0: Success; objects enqueued.
783 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
784 * high water mark is exceeded.
785 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
787 static inline int __attribute__((always_inline))
788 rte_ring_enqueue(struct rte_ring *r, void *obj)
790 if (r->prod.sp_enqueue)
791 return rte_ring_sp_enqueue(r, obj);
793 return rte_ring_mp_enqueue(r, obj);
797 * Dequeue several objects from a ring (multi-consumers safe).
799 * This function uses a "compare and set" instruction to move the
800 * consumer index atomically.
803 * A pointer to the ring structure.
805 * A pointer to a table of void * pointers (objects) that will be filled.
807 * The number of objects to dequeue from the ring to the obj_table.
809 * - 0: Success; objects dequeued.
810 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
813 static inline int __attribute__((always_inline))
814 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
816 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
820 * Dequeue several objects from a ring (NOT multi-consumers safe).
823 * A pointer to the ring structure.
825 * A pointer to a table of void * pointers (objects) that will be filled.
827 * The number of objects to dequeue from the ring to the obj_table,
828 * must be strictly positive.
830 * - 0: Success; objects dequeued.
831 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
834 static inline int __attribute__((always_inline))
835 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
837 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
841 * Dequeue several objects from a ring.
843 * This function calls the multi-consumers or the single-consumer
844 * version, depending on the default behaviour that was specified at
845 * ring creation time (see flags).
848 * A pointer to the ring structure.
850 * A pointer to a table of void * pointers (objects) that will be filled.
852 * The number of objects to dequeue from the ring to the obj_table.
854 * - 0: Success; objects dequeued.
855 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
858 static inline int __attribute__((always_inline))
859 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
861 if (r->cons.sc_dequeue)
862 return rte_ring_sc_dequeue_bulk(r, obj_table, n);
864 return rte_ring_mc_dequeue_bulk(r, obj_table, n);
868 * Dequeue one object from a ring (multi-consumers safe).
870 * This function uses a "compare and set" instruction to move the
871 * consumer index atomically.
874 * A pointer to the ring structure.
876 * A pointer to a void * pointer (object) that will be filled.
878 * - 0: Success; objects dequeued.
879 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
882 static inline int __attribute__((always_inline))
883 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
885 return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
889 * Dequeue one object from a ring (NOT multi-consumers safe).
892 * A pointer to the ring structure.
894 * A pointer to a void * pointer (object) that will be filled.
896 * - 0: Success; objects dequeued.
897 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
900 static inline int __attribute__((always_inline))
901 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
903 return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
907 * Dequeue one object from a ring.
909 * This function calls the multi-consumers or the single-consumer
910 * version depending on the default behaviour that was specified at
911 * ring creation time (see flags).
914 * A pointer to the ring structure.
916 * A pointer to a void * pointer (object) that will be filled.
918 * - 0: Success, objects dequeued.
919 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
922 static inline int __attribute__((always_inline))
923 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
925 if (r->cons.sc_dequeue)
926 return rte_ring_sc_dequeue(r, obj_p);
928 return rte_ring_mc_dequeue(r, obj_p);
932 * Test if a ring is full.
935 * A pointer to the ring structure.
937 * - 1: The ring is full.
938 * - 0: The ring is not full.
941 rte_ring_full(const struct rte_ring *r)
943 uint32_t prod_tail = r->prod.tail;
944 uint32_t cons_tail = r->cons.tail;
945 return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
949 * Test if a ring is empty.
952 * A pointer to the ring structure.
954 * - 1: The ring is empty.
955 * - 0: The ring is not empty.
958 rte_ring_empty(const struct rte_ring *r)
960 uint32_t prod_tail = r->prod.tail;
961 uint32_t cons_tail = r->cons.tail;
962 return !!(cons_tail == prod_tail);
966 * Return the number of entries in a ring.
969 * A pointer to the ring structure.
971 * The number of entries in the ring.
973 static inline unsigned
974 rte_ring_count(const struct rte_ring *r)
976 uint32_t prod_tail = r->prod.tail;
977 uint32_t cons_tail = r->cons.tail;
978 return ((prod_tail - cons_tail) & r->prod.mask);
982 * Return the number of free entries in a ring.
985 * A pointer to the ring structure.
987 * The number of free entries in the ring.
989 static inline unsigned
990 rte_ring_free_count(const struct rte_ring *r)
992 uint32_t prod_tail = r->prod.tail;
993 uint32_t cons_tail = r->cons.tail;
994 return ((cons_tail - prod_tail - 1) & r->prod.mask);
998 * Dump the status of all rings on the console
1000 void rte_ring_list_dump(void);
1003 * Search a ring from its name
1006 * The name of the ring.
1008 * The pointer to the ring matching the name, or NULL if not found,
1009 * with rte_errno set appropriately. Possible rte_errno values include:
1010 * - ENOENT - required entry not available to return.
1012 struct rte_ring *rte_ring_lookup(const char *name);
1015 * Enqueue several objects on the ring (multi-producers safe).
1017 * This function uses a "compare and set" instruction to move the
1018 * producer index atomically.
1021 * A pointer to the ring structure.
1023 * A pointer to a table of void * pointers (objects).
1025 * The number of objects to add in the ring from the obj_table.
1027 * - n: Actual number of objects enqueued.
1029 static inline int __attribute__((always_inline))
1030 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1033 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1037 * Enqueue several objects on a ring (NOT multi-producers safe).
1040 * A pointer to the ring structure.
1042 * A pointer to a table of void * pointers (objects).
1044 * The number of objects to add in the ring from the obj_table.
1046 * - n: Actual number of objects enqueued.
1048 static inline int __attribute__((always_inline))
1049 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1052 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1056 * Enqueue several objects on a ring.
1058 * This function calls the multi-producer or the single-producer
1059 * version depending on the default behavior that was specified at
1060 * ring creation time (see flags).
1063 * A pointer to the ring structure.
1065 * A pointer to a table of void * pointers (objects).
1067 * The number of objects to add in the ring from the obj_table.
1069 * - n: Actual number of objects enqueued.
1071 static inline int __attribute__((always_inline))
1072 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1075 if (r->prod.sp_enqueue)
1076 return rte_ring_sp_enqueue_burst(r, obj_table, n);
1078 return rte_ring_mp_enqueue_burst(r, obj_table, n);
1082 * Dequeue several objects from a ring (multi-consumers safe). When the request
1083 * objects are more than the available objects, only dequeue the actual number
1086 * This function uses a "compare and set" instruction to move the
1087 * consumer index atomically.
1090 * A pointer to the ring structure.
1092 * A pointer to a table of void * pointers (objects) that will be filled.
1094 * The number of objects to dequeue from the ring to the obj_table.
1096 * - n: Actual number of objects dequeued, 0 if ring is empty
1098 static inline int __attribute__((always_inline))
1099 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1101 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1105 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1106 * request objects are more than the available objects, only dequeue the
1107 * actual number of objects
1110 * A pointer to the ring structure.
1112 * A pointer to a table of void * pointers (objects) that will be filled.
1114 * The number of objects to dequeue from the ring to the obj_table.
1116 * - n: Actual number of objects dequeued, 0 if ring is empty
1118 static inline int __attribute__((always_inline))
1119 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1121 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1125 * Dequeue multiple objects from a ring up to a maximum number.
1127 * This function calls the multi-consumers or the single-consumer
1128 * version, depending on the default behaviour that was specified at
1129 * ring creation time (see flags).
1132 * A pointer to the ring structure.
1134 * A pointer to a table of void * pointers (objects) that will be filled.
1136 * The number of objects to dequeue from the ring to the obj_table.
1138 * - Number of objects dequeued, or a negative error code on error
1140 static inline int __attribute__((always_inline))
1141 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1143 if (r->cons.sc_dequeue)
1144 return rte_ring_sc_dequeue_burst(r, obj_table, n);
1146 return rte_ring_mc_dequeue_burst(r, obj_table, n);
1153 #endif /* _RTE_RING_H_ */