1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2019 Arm Limited
4 * Copyright (c) 2010-2017 Intel Corporation
5 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
16 * RTE Ring with user defined element size
26 #include <sys/queue.h>
28 #include <rte_common.h>
29 #include <rte_config.h>
30 #include <rte_memory.h>
31 #include <rte_lcore.h>
32 #include <rte_atomic.h>
33 #include <rte_branch_prediction.h>
34 #include <rte_memzone.h>
35 #include <rte_pause.h>
41 * @b EXPERIMENTAL: this API may change without prior notice
43 * Calculate the memory size needed for a ring with given element size
45 * This function returns the number of bytes needed for a ring, given
46 * the number of elements in it and the size of the element. This value
47 * is the sum of the size of the structure rte_ring and the size of the
48 * memory needed for storing the elements. The value is aligned to a cache
52 * The size of ring element, in bytes. It must be a multiple of 4.
54 * The number of elements in the ring (must be a power of 2).
56 * - The memory size needed for the ring on success.
57 * - -EINVAL - esize is not a multiple of 4 or count provided is not a
61 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
65 * @b EXPERIMENTAL: this API may change without prior notice
67 * Create a new ring named *name* that stores elements with given size.
69 * This function uses ``memzone_reserve()`` to allocate memory. Then it
70 * calls rte_ring_init() to initialize an empty ring.
72 * The new ring size is set to *count*, which must be a power of
73 * two. Water marking is disabled by default. The real usable ring size
74 * is *count-1* instead of *count* to differentiate a free ring from an
77 * The ring is added in RTE_TAILQ_RING list.
80 * The name of the ring.
82 * The size of ring element, in bytes. It must be a multiple of 4.
84 * The number of elements in the ring (must be a power of 2).
86 * The *socket_id* argument is the socket identifier in case of
87 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
88 * constraint for the reserved zone.
90 * An OR of the following:
91 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
92 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
93 * is "single-producer". Otherwise, it is "multi-producers".
94 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
95 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
96 * is "single-consumer". Otherwise, it is "multi-consumers".
98 * On success, the pointer to the new allocated ring. NULL on error with
99 * rte_errno set appropriately. Possible errno values include:
100 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
101 * - E_RTE_SECONDARY - function was called from a secondary process instance
102 * - EINVAL - esize is not a multiple of 4 or count provided is not a
104 * - ENOSPC - the maximum number of memzones has already been allocated
105 * - EEXIST - a memzone with the same name already exists
106 * - ENOMEM - no appropriate memory area found in which to create memzone
109 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
110 unsigned int count, int socket_id, unsigned int flags);
112 static __rte_always_inline void
113 enqueue_elems_32(struct rte_ring *r, const uint32_t size, uint32_t idx,
114 const void *obj_table, uint32_t n)
117 uint32_t *ring = (uint32_t *)&r[1];
118 const uint32_t *obj = (const uint32_t *)obj_table;
119 if (likely(idx + n < size)) {
120 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
122 ring[idx + 1] = obj[i + 1];
123 ring[idx + 2] = obj[i + 2];
124 ring[idx + 3] = obj[i + 3];
125 ring[idx + 4] = obj[i + 4];
126 ring[idx + 5] = obj[i + 5];
127 ring[idx + 6] = obj[i + 6];
128 ring[idx + 7] = obj[i + 7];
132 ring[idx++] = obj[i++]; /* fallthrough */
134 ring[idx++] = obj[i++]; /* fallthrough */
136 ring[idx++] = obj[i++]; /* fallthrough */
138 ring[idx++] = obj[i++]; /* fallthrough */
140 ring[idx++] = obj[i++]; /* fallthrough */
142 ring[idx++] = obj[i++]; /* fallthrough */
144 ring[idx++] = obj[i++]; /* fallthrough */
147 for (i = 0; idx < size; i++, idx++)
149 /* Start at the beginning */
150 for (idx = 0; i < n; i++, idx++)
155 static __rte_always_inline void
156 enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
157 const void *obj_table, uint32_t n)
160 const uint32_t size = r->size;
161 uint32_t idx = prod_head & r->mask;
162 uint64_t *ring = (uint64_t *)&r[1];
163 const uint64_t *obj = (const uint64_t *)obj_table;
164 if (likely(idx + n < size)) {
165 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
167 ring[idx + 1] = obj[i + 1];
168 ring[idx + 2] = obj[i + 2];
169 ring[idx + 3] = obj[i + 3];
173 ring[idx++] = obj[i++]; /* fallthrough */
175 ring[idx++] = obj[i++]; /* fallthrough */
177 ring[idx++] = obj[i++];
180 for (i = 0; idx < size; i++, idx++)
182 /* Start at the beginning */
183 for (idx = 0; i < n; i++, idx++)
188 static __rte_always_inline void
189 enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
190 const void *obj_table, uint32_t n)
193 const uint32_t size = r->size;
194 uint32_t idx = prod_head & r->mask;
195 rte_int128_t *ring = (rte_int128_t *)&r[1];
196 const rte_int128_t *obj = (const rte_int128_t *)obj_table;
197 if (likely(idx + n < size)) {
198 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
199 memcpy((void *)(ring + idx),
200 (const void *)(obj + i), 32);
203 memcpy((void *)(ring + idx),
204 (const void *)(obj + i), 16);
207 for (i = 0; idx < size; i++, idx++)
208 memcpy((void *)(ring + idx),
209 (const void *)(obj + i), 16);
210 /* Start at the beginning */
211 for (idx = 0; i < n; i++, idx++)
212 memcpy((void *)(ring + idx),
213 (const void *)(obj + i), 16);
217 /* the actual enqueue of elements on the ring.
218 * Placed here since identical code needed in both
219 * single and multi producer enqueue functions.
221 static __rte_always_inline void
222 enqueue_elems(struct rte_ring *r, uint32_t prod_head, const void *obj_table,
223 uint32_t esize, uint32_t num)
225 /* 8B and 16B copies implemented individually to retain
226 * the current performance.
229 enqueue_elems_64(r, prod_head, obj_table, num);
230 else if (esize == 16)
231 enqueue_elems_128(r, prod_head, obj_table, num);
233 uint32_t idx, scale, nr_idx, nr_num, nr_size;
235 /* Normalize to uint32_t */
236 scale = esize / sizeof(uint32_t);
237 nr_num = num * scale;
238 idx = prod_head & r->mask;
239 nr_idx = idx * scale;
240 nr_size = r->size * scale;
241 enqueue_elems_32(r, nr_size, nr_idx, obj_table, nr_num);
245 static __rte_always_inline void
246 dequeue_elems_32(struct rte_ring *r, const uint32_t size, uint32_t idx,
247 void *obj_table, uint32_t n)
250 uint32_t *ring = (uint32_t *)&r[1];
251 uint32_t *obj = (uint32_t *)obj_table;
252 if (likely(idx + n < size)) {
253 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
255 obj[i + 1] = ring[idx + 1];
256 obj[i + 2] = ring[idx + 2];
257 obj[i + 3] = ring[idx + 3];
258 obj[i + 4] = ring[idx + 4];
259 obj[i + 5] = ring[idx + 5];
260 obj[i + 6] = ring[idx + 6];
261 obj[i + 7] = ring[idx + 7];
265 obj[i++] = ring[idx++]; /* fallthrough */
267 obj[i++] = ring[idx++]; /* fallthrough */
269 obj[i++] = ring[idx++]; /* fallthrough */
271 obj[i++] = ring[idx++]; /* fallthrough */
273 obj[i++] = ring[idx++]; /* fallthrough */
275 obj[i++] = ring[idx++]; /* fallthrough */
277 obj[i++] = ring[idx++]; /* fallthrough */
280 for (i = 0; idx < size; i++, idx++)
282 /* Start at the beginning */
283 for (idx = 0; i < n; i++, idx++)
288 static __rte_always_inline void
289 dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
290 void *obj_table, uint32_t n)
293 const uint32_t size = r->size;
294 uint32_t idx = prod_head & r->mask;
295 uint64_t *ring = (uint64_t *)&r[1];
296 uint64_t *obj = (uint64_t *)obj_table;
297 if (likely(idx + n < size)) {
298 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
300 obj[i + 1] = ring[idx + 1];
301 obj[i + 2] = ring[idx + 2];
302 obj[i + 3] = ring[idx + 3];
306 obj[i++] = ring[idx++]; /* fallthrough */
308 obj[i++] = ring[idx++]; /* fallthrough */
310 obj[i++] = ring[idx++]; /* fallthrough */
313 for (i = 0; idx < size; i++, idx++)
315 /* Start at the beginning */
316 for (idx = 0; i < n; i++, idx++)
321 static __rte_always_inline void
322 dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
323 void *obj_table, uint32_t n)
326 const uint32_t size = r->size;
327 uint32_t idx = prod_head & r->mask;
328 rte_int128_t *ring = (rte_int128_t *)&r[1];
329 rte_int128_t *obj = (rte_int128_t *)obj_table;
330 if (likely(idx + n < size)) {
331 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
332 memcpy((void *)(obj + i), (void *)(ring + idx), 32);
335 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
338 for (i = 0; idx < size; i++, idx++)
339 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
340 /* Start at the beginning */
341 for (idx = 0; i < n; i++, idx++)
342 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
346 /* the actual dequeue of elements from the ring.
347 * Placed here since identical code needed in both
348 * single and multi producer enqueue functions.
350 static __rte_always_inline void
351 dequeue_elems(struct rte_ring *r, uint32_t cons_head, void *obj_table,
352 uint32_t esize, uint32_t num)
354 /* 8B and 16B copies implemented individually to retain
355 * the current performance.
358 dequeue_elems_64(r, cons_head, obj_table, num);
359 else if (esize == 16)
360 dequeue_elems_128(r, cons_head, obj_table, num);
362 uint32_t idx, scale, nr_idx, nr_num, nr_size;
364 /* Normalize to uint32_t */
365 scale = esize / sizeof(uint32_t);
366 nr_num = num * scale;
367 idx = cons_head & r->mask;
368 nr_idx = idx * scale;
369 nr_size = r->size * scale;
370 dequeue_elems_32(r, nr_size, nr_idx, obj_table, nr_num);
374 /* Between load and load. there might be cpu reorder in weak model
376 * There are 2 choices for the users
377 * 1.use rmb() memory barrier
378 * 2.use one-direction load_acquire/store_release barrier,defined by
379 * CONFIG_RTE_USE_C11_MEM_MODEL=y
380 * It depends on performance test results.
381 * By default, move common functions to rte_ring_generic.h
383 #ifdef RTE_USE_C11_MEM_MODEL
384 #include "rte_ring_c11_mem.h"
386 #include "rte_ring_generic.h"
390 * @internal Enqueue several objects on the ring
393 * A pointer to the ring structure.
395 * A pointer to a table of void * pointers (objects).
397 * The size of ring element, in bytes. It must be a multiple of 4.
398 * This must be the same value used while creating the ring. Otherwise
399 * the results are undefined.
401 * The number of objects to add in the ring from the obj_table.
403 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
404 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
406 * Indicates whether to use single producer or multi-producer head update
408 * returns the amount of space after the enqueue operation has finished
410 * Actual number of objects enqueued.
411 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
413 static __rte_always_inline unsigned int
414 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
415 unsigned int esize, unsigned int n,
416 enum rte_ring_queue_behavior behavior, unsigned int is_sp,
417 unsigned int *free_space)
419 uint32_t prod_head, prod_next;
420 uint32_t free_entries;
422 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
423 &prod_head, &prod_next, &free_entries);
427 enqueue_elems(r, prod_head, obj_table, esize, n);
429 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
431 if (free_space != NULL)
432 *free_space = free_entries - n;
437 * @internal Dequeue several objects from the ring
440 * A pointer to the ring structure.
442 * A pointer to a table of void * pointers (objects).
444 * The size of ring element, in bytes. It must be a multiple of 4.
445 * This must be the same value used while creating the ring. Otherwise
446 * the results are undefined.
448 * The number of objects to pull from the ring.
450 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
451 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
453 * Indicates whether to use single consumer or multi-consumer head update
455 * returns the number of remaining ring entries after the dequeue has finished
457 * - Actual number of objects dequeued.
458 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
460 static __rte_always_inline unsigned int
461 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
462 unsigned int esize, unsigned int n,
463 enum rte_ring_queue_behavior behavior, unsigned int is_sc,
464 unsigned int *available)
466 uint32_t cons_head, cons_next;
469 n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
470 &cons_head, &cons_next, &entries);
474 dequeue_elems(r, cons_head, obj_table, esize, n);
476 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
479 if (available != NULL)
480 *available = entries - n;
485 * Enqueue several objects on the ring (multi-producers safe).
487 * This function uses a "compare and set" instruction to move the
488 * producer index atomically.
491 * A pointer to the ring structure.
493 * A pointer to a table of void * pointers (objects).
495 * The size of ring element, in bytes. It must be a multiple of 4.
496 * This must be the same value used while creating the ring. Otherwise
497 * the results are undefined.
499 * The number of objects to add in the ring from the obj_table.
501 * if non-NULL, returns the amount of space in the ring after the
502 * enqueue operation has finished.
504 * The number of objects enqueued, either 0 or n
506 static __rte_always_inline unsigned int
507 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
508 unsigned int esize, unsigned int n, unsigned int *free_space)
510 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
511 RTE_RING_QUEUE_FIXED, __IS_MP, free_space);
515 * Enqueue several objects on a ring
517 * @warning This API is NOT multi-producers safe
520 * A pointer to the ring structure.
522 * A pointer to a table of void * pointers (objects).
524 * The size of ring element, in bytes. It must be a multiple of 4.
525 * This must be the same value used while creating the ring. Otherwise
526 * the results are undefined.
528 * The number of objects to add in the ring from the obj_table.
530 * if non-NULL, returns the amount of space in the ring after the
531 * enqueue operation has finished.
533 * The number of objects enqueued, either 0 or n
535 static __rte_always_inline unsigned int
536 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
537 unsigned int esize, unsigned int n, unsigned int *free_space)
539 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
540 RTE_RING_QUEUE_FIXED, __IS_SP, free_space);
544 * Enqueue several objects on a ring.
546 * This function calls the multi-producer or the single-producer
547 * version depending on the default behavior that was specified at
548 * ring creation time (see flags).
551 * A pointer to the ring structure.
553 * A pointer to a table of void * pointers (objects).
555 * The size of ring element, in bytes. It must be a multiple of 4.
556 * This must be the same value used while creating the ring. Otherwise
557 * the results are undefined.
559 * The number of objects to add in the ring from the obj_table.
561 * if non-NULL, returns the amount of space in the ring after the
562 * enqueue operation has finished.
564 * The number of objects enqueued, either 0 or n
566 static __rte_always_inline unsigned int
567 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
568 unsigned int esize, unsigned int n, unsigned int *free_space)
570 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
571 RTE_RING_QUEUE_FIXED, r->prod.single, free_space);
575 * Enqueue one object on a ring (multi-producers safe).
577 * This function uses a "compare and set" instruction to move the
578 * producer index atomically.
581 * A pointer to the ring structure.
583 * A pointer to the object to be added.
585 * The size of ring element, in bytes. It must be a multiple of 4.
586 * This must be the same value used while creating the ring. Otherwise
587 * the results are undefined.
589 * - 0: Success; objects enqueued.
590 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
592 static __rte_always_inline int
593 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
595 return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
600 * Enqueue one object on a ring
602 * @warning This API is NOT multi-producers safe
605 * A pointer to the ring structure.
607 * A pointer to the object to be added.
609 * The size of ring element, in bytes. It must be a multiple of 4.
610 * This must be the same value used while creating the ring. Otherwise
611 * the results are undefined.
613 * - 0: Success; objects enqueued.
614 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
616 static __rte_always_inline int
617 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
619 return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
624 * Enqueue one object on a ring.
626 * This function calls the multi-producer or the single-producer
627 * version, depending on the default behaviour that was specified at
628 * ring creation time (see flags).
631 * A pointer to the ring structure.
633 * A pointer to the object to be added.
635 * The size of ring element, in bytes. It must be a multiple of 4.
636 * This must be the same value used while creating the ring. Otherwise
637 * the results are undefined.
639 * - 0: Success; objects enqueued.
640 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
642 static __rte_always_inline int
643 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
645 return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
650 * Dequeue several objects from a ring (multi-consumers safe).
652 * This function uses a "compare and set" instruction to move the
653 * consumer index atomically.
656 * A pointer to the ring structure.
658 * A pointer to a table of void * pointers (objects) that will be filled.
660 * The size of ring element, in bytes. It must be a multiple of 4.
661 * This must be the same value used while creating the ring. Otherwise
662 * the results are undefined.
664 * The number of objects to dequeue from the ring to the obj_table.
666 * If non-NULL, returns the number of remaining ring entries after the
667 * dequeue has finished.
669 * The number of objects dequeued, either 0 or n
671 static __rte_always_inline unsigned int
672 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
673 unsigned int esize, unsigned int n, unsigned int *available)
675 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
676 RTE_RING_QUEUE_FIXED, __IS_MC, available);
680 * Dequeue several objects from a ring (NOT multi-consumers safe).
683 * A pointer to the ring structure.
685 * A pointer to a table of void * pointers (objects) that will be filled.
687 * The size of ring element, in bytes. It must be a multiple of 4.
688 * This must be the same value used while creating the ring. Otherwise
689 * the results are undefined.
691 * The number of objects to dequeue from the ring to the obj_table,
692 * must be strictly positive.
694 * If non-NULL, returns the number of remaining ring entries after the
695 * dequeue has finished.
697 * The number of objects dequeued, either 0 or n
699 static __rte_always_inline unsigned int
700 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
701 unsigned int esize, unsigned int n, unsigned int *available)
703 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
704 RTE_RING_QUEUE_FIXED, __IS_SC, available);
708 * Dequeue several objects from a ring.
710 * This function calls the multi-consumers or the single-consumer
711 * version, depending on the default behaviour that was specified at
712 * ring creation time (see flags).
715 * A pointer to the ring structure.
717 * A pointer to a table of void * pointers (objects) that will be filled.
719 * The size of ring element, in bytes. It must be a multiple of 4.
720 * This must be the same value used while creating the ring. Otherwise
721 * the results are undefined.
723 * The number of objects to dequeue from the ring to the obj_table.
725 * If non-NULL, returns the number of remaining ring entries after the
726 * dequeue has finished.
728 * The number of objects dequeued, either 0 or n
730 static __rte_always_inline unsigned int
731 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
732 unsigned int esize, unsigned int n, unsigned int *available)
734 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
735 RTE_RING_QUEUE_FIXED, r->cons.single, available);
739 * Dequeue one object from a ring (multi-consumers safe).
741 * This function uses a "compare and set" instruction to move the
742 * consumer index atomically.
745 * A pointer to the ring structure.
747 * A pointer to a void * pointer (object) that will be filled.
749 * The size of ring element, in bytes. It must be a multiple of 4.
750 * This must be the same value used while creating the ring. Otherwise
751 * the results are undefined.
753 * - 0: Success; objects dequeued.
754 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
757 static __rte_always_inline int
758 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
761 return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
766 * Dequeue one object from a ring (NOT multi-consumers safe).
769 * A pointer to the ring structure.
771 * A pointer to a void * pointer (object) that will be filled.
773 * The size of ring element, in bytes. It must be a multiple of 4.
774 * This must be the same value used while creating the ring. Otherwise
775 * the results are undefined.
777 * - 0: Success; objects dequeued.
778 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
781 static __rte_always_inline int
782 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
785 return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
790 * Dequeue one object from a ring.
792 * This function calls the multi-consumers or the single-consumer
793 * version depending on the default behaviour that was specified at
794 * ring creation time (see flags).
797 * A pointer to the ring structure.
799 * A pointer to a void * pointer (object) that will be filled.
801 * The size of ring element, in bytes. It must be a multiple of 4.
802 * This must be the same value used while creating the ring. Otherwise
803 * the results are undefined.
805 * - 0: Success, objects dequeued.
806 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
809 static __rte_always_inline int
810 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
812 return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
817 * Enqueue several objects on the ring (multi-producers safe).
819 * This function uses a "compare and set" instruction to move the
820 * producer index atomically.
823 * A pointer to the ring structure.
825 * A pointer to a table of void * pointers (objects).
827 * The size of ring element, in bytes. It must be a multiple of 4.
828 * This must be the same value used while creating the ring. Otherwise
829 * the results are undefined.
831 * The number of objects to add in the ring from the obj_table.
833 * if non-NULL, returns the amount of space in the ring after the
834 * enqueue operation has finished.
836 * - n: Actual number of objects enqueued.
838 static __rte_always_inline unsigned
839 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
840 unsigned int esize, unsigned int n, unsigned int *free_space)
842 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
843 RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
847 * Enqueue several objects on a ring
849 * @warning This API is NOT multi-producers safe
852 * A pointer to the ring structure.
854 * A pointer to a table of void * pointers (objects).
856 * The size of ring element, in bytes. It must be a multiple of 4.
857 * This must be the same value used while creating the ring. Otherwise
858 * the results are undefined.
860 * The number of objects to add in the ring from the obj_table.
862 * if non-NULL, returns the amount of space in the ring after the
863 * enqueue operation has finished.
865 * - n: Actual number of objects enqueued.
867 static __rte_always_inline unsigned
868 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
869 unsigned int esize, unsigned int n, unsigned int *free_space)
871 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
872 RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
876 * Enqueue several objects on a ring.
878 * This function calls the multi-producer or the single-producer
879 * version depending on the default behavior that was specified at
880 * ring creation time (see flags).
883 * A pointer to the ring structure.
885 * A pointer to a table of void * pointers (objects).
887 * The size of ring element, in bytes. It must be a multiple of 4.
888 * This must be the same value used while creating the ring. Otherwise
889 * the results are undefined.
891 * The number of objects to add in the ring from the obj_table.
893 * if non-NULL, returns the amount of space in the ring after the
894 * enqueue operation has finished.
896 * - n: Actual number of objects enqueued.
898 static __rte_always_inline unsigned
899 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
900 unsigned int esize, unsigned int n, unsigned int *free_space)
902 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
903 RTE_RING_QUEUE_VARIABLE, r->prod.single, free_space);
907 * Dequeue several objects from a ring (multi-consumers safe). When the request
908 * objects are more than the available objects, only dequeue the actual number
911 * This function uses a "compare and set" instruction to move the
912 * consumer index atomically.
915 * A pointer to the ring structure.
917 * A pointer to a table of void * pointers (objects) that will be filled.
919 * The size of ring element, in bytes. It must be a multiple of 4.
920 * This must be the same value used while creating the ring. Otherwise
921 * the results are undefined.
923 * The number of objects to dequeue from the ring to the obj_table.
925 * If non-NULL, returns the number of remaining ring entries after the
926 * dequeue has finished.
928 * - n: Actual number of objects dequeued, 0 if ring is empty
930 static __rte_always_inline unsigned
931 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
932 unsigned int esize, unsigned int n, unsigned int *available)
934 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
935 RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
939 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
940 * request objects are more than the available objects, only dequeue the
941 * actual number of objects
944 * A pointer to the ring structure.
946 * A pointer to a table of void * pointers (objects) that will be filled.
948 * The size of ring element, in bytes. It must be a multiple of 4.
949 * This must be the same value used while creating the ring. Otherwise
950 * the results are undefined.
952 * The number of objects to dequeue from the ring to the obj_table.
954 * If non-NULL, returns the number of remaining ring entries after the
955 * dequeue has finished.
957 * - n: Actual number of objects dequeued, 0 if ring is empty
959 static __rte_always_inline unsigned
960 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
961 unsigned int esize, unsigned int n, unsigned int *available)
963 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
964 RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
968 * Dequeue multiple objects from a ring up to a maximum number.
970 * This function calls the multi-consumers or the single-consumer
971 * version, depending on the default behaviour that was specified at
972 * ring creation time (see flags).
975 * A pointer to the ring structure.
977 * A pointer to a table of void * pointers (objects) that will be filled.
979 * The size of ring element, in bytes. It must be a multiple of 4.
980 * This must be the same value used while creating the ring. Otherwise
981 * the results are undefined.
983 * The number of objects to dequeue from the ring to the obj_table.
985 * If non-NULL, returns the number of remaining ring entries after the
986 * dequeue has finished.
988 * - Number of objects dequeued
990 static __rte_always_inline unsigned int
991 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
992 unsigned int esize, unsigned int n, unsigned int *available)
994 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
995 RTE_RING_QUEUE_VARIABLE,
996 r->cons.single, available);
1003 #endif /* _RTE_RING_ELEM_H_ */