4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Derived from FreeBSD's bufring.h
38 **************************************************************************
40 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
41 * All rights reserved.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions are met:
46 * 1. Redistributions of source code must retain the above copyright notice,
47 * this list of conditions and the following disclaimer.
49 * 2. The name of Kip Macy nor the names of other
50 * contributors may be used to endorse or promote products derived from
51 * this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
54 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
57 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
65 ***************************************************************************/
74 * The Ring Manager is a fixed-size queue, implemented as a table of
75 * pointers. Head and tail pointers are modified atomically, allowing
76 * concurrent access to it. It has the following features:
78 * - FIFO (First In First Out)
79 * - Maximum size is fixed; the pointers are stored in a table.
80 * - Lockless implementation.
81 * - Multi- or single-consumer dequeue.
82 * - Multi- or single-producer enqueue.
86 * Note: the ring implementation is not preemptable. A lcore must not
87 * be interrupted by another task that uses the same ring.
96 #include <sys/queue.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
104 enum rte_ring_queue_behavior {
105 RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
106 RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
109 #ifdef RTE_LIBRTE_RING_DEBUG
111 * A structure that stores the ring statistics (per-lcore).
113 struct rte_ring_debug_stats {
114 uint64_t enq_success_bulk; /**< Successful enqueues number. */
115 uint64_t enq_success_objs; /**< Objects successfully enqueued. */
116 uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
117 uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
118 uint64_t enq_fail_bulk; /**< Failed enqueues number. */
119 uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
120 uint64_t deq_success_bulk; /**< Successful dequeues number. */
121 uint64_t deq_success_objs; /**< Objects successfully dequeued. */
122 uint64_t deq_fail_bulk; /**< Failed dequeues number. */
123 uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
124 } __rte_cache_aligned;
127 #define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
130 * An RTE ring structure.
132 * The producer and the consumer have a head and a tail index. The particularity
133 * of these index is that they are not between 0 and size(ring). These indexes
134 * are between 0 and 2^32, and we mask their value when we access the ring[]
135 * field. Thanks to this assumption, we can do subtractions between 2 index
136 * values in a modulo-32bit base: that's why the overflow of the indexes is not
140 TAILQ_ENTRY(rte_ring) next; /**< Next in list. */
142 char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */
143 int flags; /**< Flags supplied at creation. */
145 /** Ring producer status. */
147 uint32_t watermark; /**< Maximum items before EDQUOT. */
148 uint32_t sp_enqueue; /**< True, if single producer. */
149 uint32_t size; /**< Size of ring. */
150 uint32_t mask; /**< Mask (size-1) of ring. */
151 volatile uint32_t head; /**< Producer head. */
152 volatile uint32_t tail; /**< Producer tail. */
153 } prod __rte_cache_aligned;
155 /** Ring consumer status. */
157 uint32_t sc_dequeue; /**< True, if single consumer. */
158 uint32_t size; /**< Size of the ring. */
159 uint32_t mask; /**< Mask (size-1) of ring. */
160 volatile uint32_t head; /**< Consumer head. */
161 volatile uint32_t tail; /**< Consumer tail. */
162 } cons __rte_cache_aligned;
165 #ifdef RTE_LIBRTE_RING_DEBUG
166 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
169 void * volatile ring[0] \
170 __rte_cache_aligned; /**< Memory space of ring starts here. */
173 #define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
174 #define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
175 #define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
176 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
179 * @internal When debug is enabled, store ring statistics.
181 * A pointer to the ring.
183 * The name of the statistics field to increment in the ring.
185 * The number to add to the object-oriented statistics.
187 #ifdef RTE_LIBRTE_RING_DEBUG
188 #define __RING_STAT_ADD(r, name, n) do { \
189 unsigned __lcore_id = rte_lcore_id(); \
190 r->stats[__lcore_id].name##_objs += n; \
191 r->stats[__lcore_id].name##_bulk += 1; \
194 #define __RING_STAT_ADD(r, name, n) do {} while(0)
198 * Create a new ring named *name* in memory.
200 * This function uses ``memzone_reserve()`` to allocate memory. Its size is
201 * set to *count*, which must be a power of two. Water marking is
202 * disabled by default.
203 * Note that the real usable ring size is *count-1* instead of
207 * The name of the ring.
209 * The size of the ring (must be a power of 2).
211 * The *socket_id* argument is the socket identifier in case of
212 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
213 * constraint for the reserved zone.
215 * An OR of the following:
216 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
217 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
218 * is "single-producer". Otherwise, it is "multi-producers".
219 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
220 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
221 * is "single-consumer". Otherwise, it is "multi-consumers".
223 * On success, the pointer to the new allocated ring. NULL on error with
224 * rte_errno set appropriately. Possible errno values include:
225 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
226 * - E_RTE_SECONDARY - function was called from a secondary process instance
227 * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
228 * - EINVAL - count provided is not a power of 2
229 * - ENOSPC - the maximum number of memzones has already been allocated
230 * - EEXIST - a memzone with the same name already exists
231 * - ENOMEM - no appropriate memory area found in which to create memzone
233 struct rte_ring *rte_ring_create(const char *name, unsigned count,
234 int socket_id, unsigned flags);
237 * Change the high water mark.
239 * If *count* is 0, water marking is disabled. Otherwise, it is set to the
240 * *count* value. The *count* value must be greater than 0 and less
241 * than the ring size.
243 * This function can be called at any time (not necessarily at
247 * A pointer to the ring structure.
249 * The new water mark value.
251 * - 0: Success; water mark changed.
252 * - -EINVAL: Invalid water mark value.
254 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
257 * Dump the status of the ring to the console.
260 * A pointer to the ring structure.
262 void rte_ring_dump(const struct rte_ring *r);
265 * @internal Enqueue several objects on the ring (multi-producers safe).
267 * This function uses a "compare and set" instruction to move the
268 * producer index atomically.
271 * A pointer to the ring structure.
273 * A pointer to a table of void * pointers (objects).
275 * The number of objects to add in the ring from the obj_table.
277 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
278 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
280 * Depend on the behavior value
281 * if behavior = RTE_RING_QUEUE_FIXED
282 * - 0: Success; objects enqueue.
283 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
284 * high water mark is exceeded.
285 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
286 * if behavior = RTE_RING_QUEUE_VARIABLE
287 * - n: Actual number of objects enqueued.
290 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
291 unsigned n, enum rte_ring_queue_behavior behavior)
293 uint32_t prod_head, prod_next;
294 uint32_t cons_tail, free_entries;
295 const unsigned max = n;
298 uint32_t mask = r->prod.mask;
301 /* move prod.head atomically */
303 /* Reset n to the initial burst count */
306 prod_head = r->prod.head;
307 cons_tail = r->cons.tail;
308 /* The subtraction is done between two unsigned 32bits value
309 * (the result is always modulo 32 bits even if we have
310 * prod_head > cons_tail). So 'free_entries' is always between 0
311 * and size(ring)-1. */
312 free_entries = (mask + cons_tail - prod_head);
314 /* check that we have enough room in ring */
315 if (unlikely(n > free_entries)) {
316 if (behavior == RTE_RING_QUEUE_FIXED) {
317 __RING_STAT_ADD(r, enq_fail, n);
321 /* No free entry available */
322 if (unlikely(free_entries == 0)) {
323 __RING_STAT_ADD(r, enq_fail, n);
331 prod_next = prod_head + n;
332 success = rte_atomic32_cmpset(&r->prod.head, prod_head,
334 } while (unlikely(success == 0));
336 /* write entries in ring */
337 for (i = 0; likely(i < n); i++)
338 r->ring[(prod_head + i) & mask] = obj_table[i];
341 /* if we exceed the watermark */
342 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
343 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
344 (int)(n | RTE_RING_QUOT_EXCEED);
345 __RING_STAT_ADD(r, enq_quota, n);
348 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
349 __RING_STAT_ADD(r, enq_success, n);
353 * If there are other enqueues in progress that preceeded us,
354 * we need to wait for them to complete
356 while (unlikely(r->prod.tail != prod_head))
359 r->prod.tail = prod_next;
364 * @internal Enqueue several objects on a ring (NOT multi-producers safe).
367 * A pointer to the ring structure.
369 * A pointer to a table of void * pointers (objects).
371 * The number of objects to add in the ring from the obj_table.
373 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
374 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
376 * Depend on the behavior value
377 * if behavior = RTE_RING_QUEUE_FIXED
378 * - 0: Success; objects enqueue.
379 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
380 * high water mark is exceeded.
381 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
382 * if behavior = RTE_RING_QUEUE_VARIABLE
383 * - n: Actual number of objects enqueued.
386 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
387 unsigned n, enum rte_ring_queue_behavior behavior)
389 uint32_t prod_head, cons_tail;
390 uint32_t prod_next, free_entries;
392 uint32_t mask = r->prod.mask;
395 prod_head = r->prod.head;
396 cons_tail = r->cons.tail;
397 /* The subtraction is done between two unsigned 32bits value
398 * (the result is always modulo 32 bits even if we have
399 * prod_head > cons_tail). So 'free_entries' is always between 0
400 * and size(ring)-1. */
401 free_entries = mask + cons_tail - prod_head;
403 /* check that we have enough room in ring */
404 if (unlikely(n > free_entries)) {
405 if (behavior == RTE_RING_QUEUE_FIXED) {
406 __RING_STAT_ADD(r, enq_fail, n);
410 /* No free entry available */
411 if (unlikely(free_entries == 0)) {
412 __RING_STAT_ADD(r, enq_fail, n);
420 prod_next = prod_head + n;
421 r->prod.head = prod_next;
423 /* write entries in ring */
424 for (i = 0; likely(i < n); i++)
425 r->ring[(prod_head + i) & mask] = obj_table[i];
428 /* if we exceed the watermark */
429 if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
430 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
431 (int)(n | RTE_RING_QUOT_EXCEED);
432 __RING_STAT_ADD(r, enq_quota, n);
435 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
436 __RING_STAT_ADD(r, enq_success, n);
439 r->prod.tail = prod_next;
444 * @internal Dequeue several objects from a ring (multi-consumers safe). When
445 * the request objects are more than the available objects, only dequeue the
446 * actual number of objects
448 * This function uses a "compare and set" instruction to move the
449 * consumer index atomically.
452 * A pointer to the ring structure.
454 * A pointer to a table of void * pointers (objects) that will be filled.
456 * The number of objects to dequeue from the ring to the obj_table.
458 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
459 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
461 * Depend on the behavior value
462 * if behavior = RTE_RING_QUEUE_FIXED
463 * - 0: Success; objects dequeued.
464 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
466 * if behavior = RTE_RING_QUEUE_VARIABLE
467 * - n: Actual number of objects dequeued.
471 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
472 unsigned n, enum rte_ring_queue_behavior behavior)
474 uint32_t cons_head, prod_tail;
475 uint32_t cons_next, entries;
476 const unsigned max = n;
479 uint32_t mask = r->prod.mask;
481 /* move cons.head atomically */
483 /* Restore n as it may change every loop */
486 cons_head = r->cons.head;
487 prod_tail = r->prod.tail;
488 /* The subtraction is done between two unsigned 32bits value
489 * (the result is always modulo 32 bits even if we have
490 * cons_head > prod_tail). So 'entries' is always between 0
491 * and size(ring)-1. */
492 entries = (prod_tail - cons_head);
494 /* Set the actual entries for dequeue */
495 if (unlikely(n > entries)) {
496 if (behavior == RTE_RING_QUEUE_FIXED) {
497 __RING_STAT_ADD(r, deq_fail, n);
501 if (unlikely(entries == 0)){
502 __RING_STAT_ADD(r, deq_fail, n);
510 cons_next = cons_head + n;
511 success = rte_atomic32_cmpset(&r->cons.head, cons_head,
513 } while (unlikely(success == 0));
517 for (i = 0; likely(i < n); i++) {
518 obj_table[i] = r->ring[(cons_head + i) & mask];
522 * If there are other dequeues in progress that preceded us,
523 * we need to wait for them to complete
525 while (unlikely(r->cons.tail != cons_head))
528 __RING_STAT_ADD(r, deq_success, n);
529 r->cons.tail = cons_next;
531 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
535 * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
536 * When the request objects are more than the available objects, only dequeue
537 * the actual number of objects
540 * A pointer to the ring structure.
542 * A pointer to a table of void * pointers (objects) that will be filled.
544 * The number of objects to dequeue from the ring to the obj_table.
546 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
547 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
549 * Depend on the behavior value
550 * if behavior = RTE_RING_QUEUE_FIXED
551 * - 0: Success; objects dequeued.
552 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
554 * if behavior = RTE_RING_QUEUE_VARIABLE
555 * - n: Actual number of objects dequeued.
558 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
559 unsigned n, enum rte_ring_queue_behavior behavior)
561 uint32_t cons_head, prod_tail;
562 uint32_t cons_next, entries;
564 uint32_t mask = r->prod.mask;
566 cons_head = r->cons.head;
567 prod_tail = r->prod.tail;
568 /* The subtraction is done between two unsigned 32bits value
569 * (the result is always modulo 32 bits even if we have
570 * cons_head > prod_tail). So 'entries' is always between 0
571 * and size(ring)-1. */
572 entries = prod_tail - cons_head;
574 if (unlikely(n > entries)) {
575 if (behavior == RTE_RING_QUEUE_FIXED) {
576 __RING_STAT_ADD(r, deq_fail, n);
580 if (unlikely(entries == 0)){
581 __RING_STAT_ADD(r, deq_fail, n);
589 cons_next = cons_head + n;
590 r->cons.head = cons_next;
594 for (i = 0; likely(i < n); i++) {
595 obj_table[i] = r->ring[(cons_head + i) & mask];
598 __RING_STAT_ADD(r, deq_success, n);
599 r->cons.tail = cons_next;
600 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
604 * Enqueue several objects on the ring (multi-producers safe).
606 * This function uses a "compare and set" instruction to move the
607 * producer index atomically.
610 * A pointer to the ring structure.
612 * A pointer to a table of void * pointers (objects).
614 * The number of objects to add in the ring from the obj_table.
616 * - 0: Success; objects enqueue.
617 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
618 * high water mark is exceeded.
619 * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
622 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
625 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
629 * Enqueue several objects on a ring (NOT multi-producers safe).
632 * A pointer to the ring structure.
634 * A pointer to a table of void * pointers (objects).
636 * The number of objects to add in the ring from the obj_table.
638 * - 0: Success; objects enqueued.
639 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
640 * high water mark is exceeded.
641 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
644 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
647 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
651 * Enqueue several objects on a ring.
653 * This function calls the multi-producer or the single-producer
654 * version depending on the default behavior that was specified at
655 * ring creation time (see flags).
658 * A pointer to the ring structure.
660 * A pointer to a table of void * pointers (objects).
662 * The number of objects to add in the ring from the obj_table.
664 * - 0: Success; objects enqueued.
665 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
666 * high water mark is exceeded.
667 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
670 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
673 if (r->prod.sp_enqueue)
674 return rte_ring_sp_enqueue_bulk(r, obj_table, n);
676 return rte_ring_mp_enqueue_bulk(r, obj_table, n);
680 * Enqueue one object on a ring (multi-producers safe).
682 * This function uses a "compare and set" instruction to move the
683 * producer index atomically.
686 * A pointer to the ring structure.
688 * A pointer to the object to be added.
690 * - 0: Success; objects enqueued.
691 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
692 * high water mark is exceeded.
693 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
696 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
698 return rte_ring_mp_enqueue_bulk(r, &obj, 1);
702 * Enqueue one object on a ring (NOT multi-producers safe).
705 * A pointer to the ring structure.
707 * A pointer to the object to be added.
709 * - 0: Success; objects enqueued.
710 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
711 * high water mark is exceeded.
712 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
715 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
717 return rte_ring_sp_enqueue_bulk(r, &obj, 1);
721 * Enqueue one object on a ring.
723 * This function calls the multi-producer or the single-producer
724 * version, depending on the default behaviour that was specified at
725 * ring creation time (see flags).
728 * A pointer to the ring structure.
730 * A pointer to the object to be added.
732 * - 0: Success; objects enqueued.
733 * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
734 * high water mark is exceeded.
735 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
738 rte_ring_enqueue(struct rte_ring *r, void *obj)
740 if (r->prod.sp_enqueue)
741 return rte_ring_sp_enqueue(r, obj);
743 return rte_ring_mp_enqueue(r, obj);
747 * Dequeue several objects from a ring (multi-consumers safe).
749 * This function uses a "compare and set" instruction to move the
750 * consumer index atomically.
753 * A pointer to the ring structure.
755 * A pointer to a table of void * pointers (objects) that will be filled.
757 * The number of objects to dequeue from the ring to the obj_table.
759 * - 0: Success; objects dequeued.
760 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
764 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
766 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
770 * Dequeue several objects from a ring (NOT multi-consumers safe).
773 * A pointer to the ring structure.
775 * A pointer to a table of void * pointers (objects) that will be filled.
777 * The number of objects to dequeue from the ring to the obj_table,
778 * must be strictly positive.
780 * - 0: Success; objects dequeued.
781 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
785 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
787 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
791 * Dequeue several objects from a ring.
793 * This function calls the multi-consumers or the single-consumer
794 * version, depending on the default behaviour that was specified at
795 * ring creation time (see flags).
798 * A pointer to the ring structure.
800 * A pointer to a table of void * pointers (objects) that will be filled.
802 * The number of objects to dequeue from the ring to the obj_table.
804 * - 0: Success; objects dequeued.
805 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
809 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
811 if (r->cons.sc_dequeue)
812 return rte_ring_sc_dequeue_bulk(r, obj_table, n);
814 return rte_ring_mc_dequeue_bulk(r, obj_table, n);
818 * Dequeue one object from a ring (multi-consumers safe).
820 * This function uses a "compare and set" instruction to move the
821 * consumer index atomically.
824 * A pointer to the ring structure.
826 * A pointer to a void * pointer (object) that will be filled.
828 * - 0: Success; objects dequeued.
829 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
833 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
835 return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
839 * Dequeue one object from a ring (NOT multi-consumers safe).
842 * A pointer to the ring structure.
844 * A pointer to a void * pointer (object) that will be filled.
846 * - 0: Success; objects dequeued.
847 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
851 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
853 return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
857 * Dequeue one object from a ring.
859 * This function calls the multi-consumers or the single-consumer
860 * version depending on the default behaviour that was specified at
861 * ring creation time (see flags).
864 * A pointer to the ring structure.
866 * A pointer to a void * pointer (object) that will be filled.
868 * - 0: Success, objects dequeued.
869 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
873 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
875 if (r->cons.sc_dequeue)
876 return rte_ring_sc_dequeue(r, obj_p);
878 return rte_ring_mc_dequeue(r, obj_p);
882 * Test if a ring is full.
885 * A pointer to the ring structure.
887 * - 1: The ring is full.
888 * - 0: The ring is not full.
891 rte_ring_full(const struct rte_ring *r)
893 uint32_t prod_tail = r->prod.tail;
894 uint32_t cons_tail = r->cons.tail;
895 return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
899 * Test if a ring is empty.
902 * A pointer to the ring structure.
904 * - 1: The ring is empty.
905 * - 0: The ring is not empty.
908 rte_ring_empty(const struct rte_ring *r)
910 uint32_t prod_tail = r->prod.tail;
911 uint32_t cons_tail = r->cons.tail;
912 return !!(cons_tail == prod_tail);
916 * Return the number of entries in a ring.
919 * A pointer to the ring structure.
921 * The number of entries in the ring.
923 static inline unsigned
924 rte_ring_count(const struct rte_ring *r)
926 uint32_t prod_tail = r->prod.tail;
927 uint32_t cons_tail = r->cons.tail;
928 return ((prod_tail - cons_tail) & r->prod.mask);
932 * Return the number of free entries in a ring.
935 * A pointer to the ring structure.
937 * The number of free entries in the ring.
939 static inline unsigned
940 rte_ring_free_count(const struct rte_ring *r)
942 uint32_t prod_tail = r->prod.tail;
943 uint32_t cons_tail = r->cons.tail;
944 return ((cons_tail - prod_tail - 1) & r->prod.mask);
948 * Dump the status of all rings on the console
950 void rte_ring_list_dump(void);
953 * Search a ring from its name
956 * The name of the ring.
958 * The pointer to the ring matching the name, or NULL if not found,
959 * with rte_errno set appropriately. Possible rte_errno values include:
960 * - ENOENT - required entry not available to return.
962 struct rte_ring *rte_ring_lookup(const char *name);
965 * Enqueue several objects on the ring (multi-producers safe).
967 * This function uses a "compare and set" instruction to move the
968 * producer index atomically.
971 * A pointer to the ring structure.
973 * A pointer to a table of void * pointers (objects).
975 * The number of objects to add in the ring from the obj_table.
977 * - n: Actual number of objects enqueued.
980 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
983 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
987 * Enqueue several objects on a ring (NOT multi-producers safe).
990 * A pointer to the ring structure.
992 * A pointer to a table of void * pointers (objects).
994 * The number of objects to add in the ring from the obj_table.
996 * - n: Actual number of objects enqueued.
999 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1002 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1006 * Enqueue several objects on a ring.
1008 * This function calls the multi-producer or the single-producer
1009 * version depending on the default behavior that was specified at
1010 * ring creation time (see flags).
1013 * A pointer to the ring structure.
1015 * A pointer to a table of void * pointers (objects).
1017 * The number of objects to add in the ring from the obj_table.
1019 * - n: Actual number of objects enqueued.
1022 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1025 if (r->prod.sp_enqueue)
1026 return rte_ring_sp_enqueue_burst(r, obj_table, n);
1028 return rte_ring_mp_enqueue_burst(r, obj_table, n);
1032 * Dequeue several objects from a ring (multi-consumers safe). When the request
1033 * objects are more than the available objects, only dequeue the actual number
1036 * This function uses a "compare and set" instruction to move the
1037 * consumer index atomically.
1040 * A pointer to the ring structure.
1042 * A pointer to a table of void * pointers (objects) that will be filled.
1044 * The number of objects to dequeue from the ring to the obj_table.
1046 * - n: Actual number of objects dequeued, 0 if ring is empty
1049 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1051 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1055 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1056 * request objects are more than the available objects, only dequeue the
1057 * actual number of objects
1060 * A pointer to the ring structure.
1062 * A pointer to a table of void * pointers (objects) that will be filled.
1064 * The number of objects to dequeue from the ring to the obj_table.
1066 * - n: Actual number of objects dequeued, 0 if ring is empty
1069 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1071 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1075 * Dequeue multiple objects from a ring up to a maximum number.
1077 * This function calls the multi-consumers or the single-consumer
1078 * version, depending on the default behaviour that was specified at
1079 * ring creation time (see flags).
1082 * A pointer to the ring structure.
1084 * A pointer to a table of void * pointers (objects) that will be filled.
1086 * The number of objects to dequeue from the ring to the obj_table.
1088 * - Number of objects dequeued, or a negative error code on error
1091 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1093 if (r->cons.sc_dequeue)
1094 return rte_ring_sc_dequeue_burst(r, obj_table, n);
1096 return rte_ring_mc_dequeue_burst(r, obj_table, n);
1103 #endif /* _RTE_RING_H_ */