1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2019 Arm Limited
4 * Copyright (c) 2010-2017 Intel Corporation
5 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
16 * RTE Ring with user defined element size
23 #include <rte_ring_core.h>
27 * @b EXPERIMENTAL: this API may change without prior notice
29 * Calculate the memory size needed for a ring with given element size
31 * This function returns the number of bytes needed for a ring, given
32 * the number of elements in it and the size of the element. This value
33 * is the sum of the size of the structure rte_ring and the size of the
34 * memory needed for storing the elements. The value is aligned to a cache
38 * The size of ring element, in bytes. It must be a multiple of 4.
40 * The number of elements in the ring (must be a power of 2).
42 * - The memory size needed for the ring on success.
43 * - -EINVAL - esize is not a multiple of 4 or count provided is not a
47 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
51 * @b EXPERIMENTAL: this API may change without prior notice
53 * Create a new ring named *name* that stores elements with given size.
55 * This function uses ``memzone_reserve()`` to allocate memory. Then it
56 * calls rte_ring_init() to initialize an empty ring.
58 * The new ring size is set to *count*, which must be a power of
59 * two. Water marking is disabled by default. The real usable ring size
60 * is *count-1* instead of *count* to differentiate a free ring from an
63 * The ring is added in RTE_TAILQ_RING list.
66 * The name of the ring.
68 * The size of ring element, in bytes. It must be a multiple of 4.
70 * The number of elements in the ring (must be a power of 2).
72 * The *socket_id* argument is the socket identifier in case of
73 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
74 * constraint for the reserved zone.
76 * An OR of the following:
77 * - One of mutually exclusive flags that define producer behavior:
78 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
79 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
80 * is "single-producer".
81 * - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
82 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
83 * is "multi-producer RTS mode".
84 * - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
85 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
86 * is "multi-producer HTS mode".
87 * If none of these flags is set, then default "multi-producer"
88 * behavior is selected.
89 * - One of mutually exclusive flags that define consumer behavior:
90 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
91 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
92 * is "single-consumer". Otherwise, it is "multi-consumers".
93 * - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
94 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
95 * is "multi-consumer RTS mode".
96 * - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
97 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
98 * is "multi-consumer HTS mode".
99 * If none of these flags is set, then default "multi-consumer"
100 * behavior is selected.
102 * On success, the pointer to the new allocated ring. NULL on error with
103 * rte_errno set appropriately. Possible errno values include:
104 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
105 * - E_RTE_SECONDARY - function was called from a secondary process instance
106 * - EINVAL - esize is not a multiple of 4 or count provided is not a
108 * - ENOSPC - the maximum number of memzones has already been allocated
109 * - EEXIST - a memzone with the same name already exists
110 * - ENOMEM - no appropriate memory area found in which to create memzone
113 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
114 unsigned int count, int socket_id, unsigned int flags);
116 static __rte_always_inline void
117 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
118 uint32_t idx, const void *obj_table, uint32_t n)
121 uint32_t *ring = (uint32_t *)&r[1];
122 const uint32_t *obj = (const uint32_t *)obj_table;
123 if (likely(idx + n < size)) {
124 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
126 ring[idx + 1] = obj[i + 1];
127 ring[idx + 2] = obj[i + 2];
128 ring[idx + 3] = obj[i + 3];
129 ring[idx + 4] = obj[i + 4];
130 ring[idx + 5] = obj[i + 5];
131 ring[idx + 6] = obj[i + 6];
132 ring[idx + 7] = obj[i + 7];
136 ring[idx++] = obj[i++]; /* fallthrough */
138 ring[idx++] = obj[i++]; /* fallthrough */
140 ring[idx++] = obj[i++]; /* fallthrough */
142 ring[idx++] = obj[i++]; /* fallthrough */
144 ring[idx++] = obj[i++]; /* fallthrough */
146 ring[idx++] = obj[i++]; /* fallthrough */
148 ring[idx++] = obj[i++]; /* fallthrough */
151 for (i = 0; idx < size; i++, idx++)
153 /* Start at the beginning */
154 for (idx = 0; i < n; i++, idx++)
159 static __rte_always_inline void
160 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
161 const void *obj_table, uint32_t n)
164 const uint32_t size = r->size;
165 uint32_t idx = prod_head & r->mask;
166 uint64_t *ring = (uint64_t *)&r[1];
167 const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
168 if (likely(idx + n < size)) {
169 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
171 ring[idx + 1] = obj[i + 1];
172 ring[idx + 2] = obj[i + 2];
173 ring[idx + 3] = obj[i + 3];
177 ring[idx++] = obj[i++]; /* fallthrough */
179 ring[idx++] = obj[i++]; /* fallthrough */
181 ring[idx++] = obj[i++];
184 for (i = 0; idx < size; i++, idx++)
186 /* Start at the beginning */
187 for (idx = 0; i < n; i++, idx++)
192 static __rte_always_inline void
193 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
194 const void *obj_table, uint32_t n)
197 const uint32_t size = r->size;
198 uint32_t idx = prod_head & r->mask;
199 rte_int128_t *ring = (rte_int128_t *)&r[1];
200 const rte_int128_t *obj = (const rte_int128_t *)obj_table;
201 if (likely(idx + n < size)) {
202 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
203 memcpy((void *)(ring + idx),
204 (const void *)(obj + i), 32);
207 memcpy((void *)(ring + idx),
208 (const void *)(obj + i), 16);
211 for (i = 0; idx < size; i++, idx++)
212 memcpy((void *)(ring + idx),
213 (const void *)(obj + i), 16);
214 /* Start at the beginning */
215 for (idx = 0; i < n; i++, idx++)
216 memcpy((void *)(ring + idx),
217 (const void *)(obj + i), 16);
221 /* the actual enqueue of elements on the ring.
222 * Placed here since identical code needed in both
223 * single and multi producer enqueue functions.
225 static __rte_always_inline void
226 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
227 const void *obj_table, uint32_t esize, uint32_t num)
229 /* 8B and 16B copies implemented individually to retain
230 * the current performance.
233 __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
234 else if (esize == 16)
235 __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
237 uint32_t idx, scale, nr_idx, nr_num, nr_size;
239 /* Normalize to uint32_t */
240 scale = esize / sizeof(uint32_t);
241 nr_num = num * scale;
242 idx = prod_head & r->mask;
243 nr_idx = idx * scale;
244 nr_size = r->size * scale;
245 __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
250 static __rte_always_inline void
251 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
252 uint32_t idx, void *obj_table, uint32_t n)
255 uint32_t *ring = (uint32_t *)&r[1];
256 uint32_t *obj = (uint32_t *)obj_table;
257 if (likely(idx + n < size)) {
258 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
260 obj[i + 1] = ring[idx + 1];
261 obj[i + 2] = ring[idx + 2];
262 obj[i + 3] = ring[idx + 3];
263 obj[i + 4] = ring[idx + 4];
264 obj[i + 5] = ring[idx + 5];
265 obj[i + 6] = ring[idx + 6];
266 obj[i + 7] = ring[idx + 7];
270 obj[i++] = ring[idx++]; /* fallthrough */
272 obj[i++] = ring[idx++]; /* fallthrough */
274 obj[i++] = ring[idx++]; /* fallthrough */
276 obj[i++] = ring[idx++]; /* fallthrough */
278 obj[i++] = ring[idx++]; /* fallthrough */
280 obj[i++] = ring[idx++]; /* fallthrough */
282 obj[i++] = ring[idx++]; /* fallthrough */
285 for (i = 0; idx < size; i++, idx++)
287 /* Start at the beginning */
288 for (idx = 0; i < n; i++, idx++)
293 static __rte_always_inline void
294 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
295 void *obj_table, uint32_t n)
298 const uint32_t size = r->size;
299 uint32_t idx = prod_head & r->mask;
300 uint64_t *ring = (uint64_t *)&r[1];
301 unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
302 if (likely(idx + n < size)) {
303 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
305 obj[i + 1] = ring[idx + 1];
306 obj[i + 2] = ring[idx + 2];
307 obj[i + 3] = ring[idx + 3];
311 obj[i++] = ring[idx++]; /* fallthrough */
313 obj[i++] = ring[idx++]; /* fallthrough */
315 obj[i++] = ring[idx++]; /* fallthrough */
318 for (i = 0; idx < size; i++, idx++)
320 /* Start at the beginning */
321 for (idx = 0; i < n; i++, idx++)
326 static __rte_always_inline void
327 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
328 void *obj_table, uint32_t n)
331 const uint32_t size = r->size;
332 uint32_t idx = prod_head & r->mask;
333 rte_int128_t *ring = (rte_int128_t *)&r[1];
334 rte_int128_t *obj = (rte_int128_t *)obj_table;
335 if (likely(idx + n < size)) {
336 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
337 memcpy((void *)(obj + i), (void *)(ring + idx), 32);
340 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
343 for (i = 0; idx < size; i++, idx++)
344 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
345 /* Start at the beginning */
346 for (idx = 0; i < n; i++, idx++)
347 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
351 /* the actual dequeue of elements from the ring.
352 * Placed here since identical code needed in both
353 * single and multi producer enqueue functions.
355 static __rte_always_inline void
356 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
357 void *obj_table, uint32_t esize, uint32_t num)
359 /* 8B and 16B copies implemented individually to retain
360 * the current performance.
363 __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
364 else if (esize == 16)
365 __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
367 uint32_t idx, scale, nr_idx, nr_num, nr_size;
369 /* Normalize to uint32_t */
370 scale = esize / sizeof(uint32_t);
371 nr_num = num * scale;
372 idx = cons_head & r->mask;
373 nr_idx = idx * scale;
374 nr_size = r->size * scale;
375 __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
380 /* Between load and load. there might be cpu reorder in weak model
382 * There are 2 choices for the users
383 * 1.use rmb() memory barrier
384 * 2.use one-direction load_acquire/store_release barrier,defined by
385 * CONFIG_RTE_USE_C11_MEM_MODEL=y
386 * It depends on performance test results.
387 * By default, move common functions to rte_ring_generic.h
389 #ifdef RTE_USE_C11_MEM_MODEL
390 #include "rte_ring_c11_mem.h"
392 #include "rte_ring_generic.h"
396 * @internal Enqueue several objects on the ring
399 * A pointer to the ring structure.
401 * A pointer to a table of objects.
403 * The size of ring element, in bytes. It must be a multiple of 4.
404 * This must be the same value used while creating the ring. Otherwise
405 * the results are undefined.
407 * The number of objects to add in the ring from the obj_table.
409 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
410 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
412 * Indicates whether to use single producer or multi-producer head update
414 * returns the amount of space after the enqueue operation has finished
416 * Actual number of objects enqueued.
417 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
419 static __rte_always_inline unsigned int
420 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
421 unsigned int esize, unsigned int n,
422 enum rte_ring_queue_behavior behavior, unsigned int is_sp,
423 unsigned int *free_space)
425 uint32_t prod_head, prod_next;
426 uint32_t free_entries;
428 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
429 &prod_head, &prod_next, &free_entries);
433 __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
435 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
437 if (free_space != NULL)
438 *free_space = free_entries - n;
443 * @internal Dequeue several objects from the ring
446 * A pointer to the ring structure.
448 * A pointer to a table of objects.
450 * The size of ring element, in bytes. It must be a multiple of 4.
451 * This must be the same value used while creating the ring. Otherwise
452 * the results are undefined.
454 * The number of objects to pull from the ring.
456 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
457 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
459 * Indicates whether to use single consumer or multi-consumer head update
461 * returns the number of remaining ring entries after the dequeue has finished
463 * - Actual number of objects dequeued.
464 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
466 static __rte_always_inline unsigned int
467 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
468 unsigned int esize, unsigned int n,
469 enum rte_ring_queue_behavior behavior, unsigned int is_sc,
470 unsigned int *available)
472 uint32_t cons_head, cons_next;
475 n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
476 &cons_head, &cons_next, &entries);
480 __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
482 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
485 if (available != NULL)
486 *available = entries - n;
491 * Enqueue several objects on the ring (multi-producers safe).
493 * This function uses a "compare and set" instruction to move the
494 * producer index atomically.
497 * A pointer to the ring structure.
499 * A pointer to a table of objects.
501 * The size of ring element, in bytes. It must be a multiple of 4.
502 * This must be the same value used while creating the ring. Otherwise
503 * the results are undefined.
505 * The number of objects to add in the ring from the obj_table.
507 * if non-NULL, returns the amount of space in the ring after the
508 * enqueue operation has finished.
510 * The number of objects enqueued, either 0 or n
512 static __rte_always_inline unsigned int
513 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
514 unsigned int esize, unsigned int n, unsigned int *free_space)
516 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
517 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, free_space);
521 * Enqueue several objects on a ring
523 * @warning This API is NOT multi-producers safe
526 * A pointer to the ring structure.
528 * A pointer to a table of objects.
530 * The size of ring element, in bytes. It must be a multiple of 4.
531 * This must be the same value used while creating the ring. Otherwise
532 * the results are undefined.
534 * The number of objects to add in the ring from the obj_table.
536 * if non-NULL, returns the amount of space in the ring after the
537 * enqueue operation has finished.
539 * The number of objects enqueued, either 0 or n
541 static __rte_always_inline unsigned int
542 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
543 unsigned int esize, unsigned int n, unsigned int *free_space)
545 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
546 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, free_space);
549 #ifdef ALLOW_EXPERIMENTAL_API
550 #include <rte_ring_hts.h>
551 #include <rte_ring_rts.h>
555 * Enqueue several objects on a ring.
557 * This function calls the multi-producer or the single-producer
558 * version depending on the default behavior that was specified at
559 * ring creation time (see flags).
562 * A pointer to the ring structure.
564 * A pointer to a table of objects.
566 * The size of ring element, in bytes. It must be a multiple of 4.
567 * This must be the same value used while creating the ring. Otherwise
568 * the results are undefined.
570 * The number of objects to add in the ring from the obj_table.
572 * if non-NULL, returns the amount of space in the ring after the
573 * enqueue operation has finished.
575 * The number of objects enqueued, either 0 or n
577 static __rte_always_inline unsigned int
578 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
579 unsigned int esize, unsigned int n, unsigned int *free_space)
581 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
582 RTE_RING_QUEUE_FIXED, r->prod.sync_type, free_space);
584 switch (r->prod.sync_type) {
585 case RTE_RING_SYNC_MT:
586 return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
588 case RTE_RING_SYNC_ST:
589 return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
591 #ifdef ALLOW_EXPERIMENTAL_API
592 case RTE_RING_SYNC_MT_RTS:
593 return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
595 case RTE_RING_SYNC_MT_HTS:
596 return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
601 /* valid ring should never reach this point */
603 if (free_space != NULL)
609 * Enqueue one object on a ring (multi-producers safe).
611 * This function uses a "compare and set" instruction to move the
612 * producer index atomically.
615 * A pointer to the ring structure.
617 * A pointer to the object to be added.
619 * The size of ring element, in bytes. It must be a multiple of 4.
620 * This must be the same value used while creating the ring. Otherwise
621 * the results are undefined.
623 * - 0: Success; objects enqueued.
624 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
626 static __rte_always_inline int
627 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
629 return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
634 * Enqueue one object on a ring
636 * @warning This API is NOT multi-producers safe
639 * A pointer to the ring structure.
641 * A pointer to the object to be added.
643 * The size of ring element, in bytes. It must be a multiple of 4.
644 * This must be the same value used while creating the ring. Otherwise
645 * the results are undefined.
647 * - 0: Success; objects enqueued.
648 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
650 static __rte_always_inline int
651 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
653 return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
658 * Enqueue one object on a ring.
660 * This function calls the multi-producer or the single-producer
661 * version, depending on the default behaviour that was specified at
662 * ring creation time (see flags).
665 * A pointer to the ring structure.
667 * A pointer to the object to be added.
669 * The size of ring element, in bytes. It must be a multiple of 4.
670 * This must be the same value used while creating the ring. Otherwise
671 * the results are undefined.
673 * - 0: Success; objects enqueued.
674 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
676 static __rte_always_inline int
677 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
679 return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
684 * Dequeue several objects from a ring (multi-consumers safe).
686 * This function uses a "compare and set" instruction to move the
687 * consumer index atomically.
690 * A pointer to the ring structure.
692 * A pointer to a table of objects that will be filled.
694 * The size of ring element, in bytes. It must be a multiple of 4.
695 * This must be the same value used while creating the ring. Otherwise
696 * the results are undefined.
698 * The number of objects to dequeue from the ring to the obj_table.
700 * If non-NULL, returns the number of remaining ring entries after the
701 * dequeue has finished.
703 * The number of objects dequeued, either 0 or n
705 static __rte_always_inline unsigned int
706 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
707 unsigned int esize, unsigned int n, unsigned int *available)
709 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
710 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, available);
714 * Dequeue several objects from a ring (NOT multi-consumers safe).
717 * A pointer to the ring structure.
719 * A pointer to a table of objects that will be filled.
721 * The size of ring element, in bytes. It must be a multiple of 4.
722 * This must be the same value used while creating the ring. Otherwise
723 * the results are undefined.
725 * The number of objects to dequeue from the ring to the obj_table,
726 * must be strictly positive.
728 * If non-NULL, returns the number of remaining ring entries after the
729 * dequeue has finished.
731 * The number of objects dequeued, either 0 or n
733 static __rte_always_inline unsigned int
734 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
735 unsigned int esize, unsigned int n, unsigned int *available)
737 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
738 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, available);
742 * Dequeue several objects from a ring.
744 * This function calls the multi-consumers or the single-consumer
745 * version, depending on the default behaviour that was specified at
746 * ring creation time (see flags).
749 * A pointer to the ring structure.
751 * A pointer to a table of objects that will be filled.
753 * The size of ring element, in bytes. It must be a multiple of 4.
754 * This must be the same value used while creating the ring. Otherwise
755 * the results are undefined.
757 * The number of objects to dequeue from the ring to the obj_table.
759 * If non-NULL, returns the number of remaining ring entries after the
760 * dequeue has finished.
762 * The number of objects dequeued, either 0 or n
764 static __rte_always_inline unsigned int
765 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
766 unsigned int esize, unsigned int n, unsigned int *available)
768 switch (r->cons.sync_type) {
769 case RTE_RING_SYNC_MT:
770 return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
772 case RTE_RING_SYNC_ST:
773 return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
775 #ifdef ALLOW_EXPERIMENTAL_API
776 case RTE_RING_SYNC_MT_RTS:
777 return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
779 case RTE_RING_SYNC_MT_HTS:
780 return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
785 /* valid ring should never reach this point */
787 if (available != NULL)
793 * Dequeue one object from a ring (multi-consumers safe).
795 * This function uses a "compare and set" instruction to move the
796 * consumer index atomically.
799 * A pointer to the ring structure.
801 * A pointer to the object that will be filled.
803 * The size of ring element, in bytes. It must be a multiple of 4.
804 * This must be the same value used while creating the ring. Otherwise
805 * the results are undefined.
807 * - 0: Success; objects dequeued.
808 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
811 static __rte_always_inline int
812 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
815 return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
820 * Dequeue one object from a ring (NOT multi-consumers safe).
823 * A pointer to the ring structure.
825 * A pointer to the object that will be filled.
827 * The size of ring element, in bytes. It must be a multiple of 4.
828 * This must be the same value used while creating the ring. Otherwise
829 * the results are undefined.
831 * - 0: Success; objects dequeued.
832 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
835 static __rte_always_inline int
836 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
839 return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
844 * Dequeue one object from a ring.
846 * This function calls the multi-consumers or the single-consumer
847 * version depending on the default behaviour that was specified at
848 * ring creation time (see flags).
851 * A pointer to the ring structure.
853 * A pointer to the object that will be filled.
855 * The size of ring element, in bytes. It must be a multiple of 4.
856 * This must be the same value used while creating the ring. Otherwise
857 * the results are undefined.
859 * - 0: Success, objects dequeued.
860 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
863 static __rte_always_inline int
864 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
866 return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
871 * Enqueue several objects on the ring (multi-producers safe).
873 * This function uses a "compare and set" instruction to move the
874 * producer index atomically.
877 * A pointer to the ring structure.
879 * A pointer to a table of objects.
881 * The size of ring element, in bytes. It must be a multiple of 4.
882 * This must be the same value used while creating the ring. Otherwise
883 * the results are undefined.
885 * The number of objects to add in the ring from the obj_table.
887 * if non-NULL, returns the amount of space in the ring after the
888 * enqueue operation has finished.
890 * - n: Actual number of objects enqueued.
892 static __rte_always_inline unsigned
893 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
894 unsigned int esize, unsigned int n, unsigned int *free_space)
896 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
897 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, free_space);
901 * Enqueue several objects on a ring
903 * @warning This API is NOT multi-producers safe
906 * A pointer to the ring structure.
908 * A pointer to a table of objects.
910 * The size of ring element, in bytes. It must be a multiple of 4.
911 * This must be the same value used while creating the ring. Otherwise
912 * the results are undefined.
914 * The number of objects to add in the ring from the obj_table.
916 * if non-NULL, returns the amount of space in the ring after the
917 * enqueue operation has finished.
919 * - n: Actual number of objects enqueued.
921 static __rte_always_inline unsigned
922 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
923 unsigned int esize, unsigned int n, unsigned int *free_space)
925 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
926 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, free_space);
930 * Enqueue several objects on a ring.
932 * This function calls the multi-producer or the single-producer
933 * version depending on the default behavior that was specified at
934 * ring creation time (see flags).
937 * A pointer to the ring structure.
939 * A pointer to a table of objects.
941 * The size of ring element, in bytes. It must be a multiple of 4.
942 * This must be the same value used while creating the ring. Otherwise
943 * the results are undefined.
945 * The number of objects to add in the ring from the obj_table.
947 * if non-NULL, returns the amount of space in the ring after the
948 * enqueue operation has finished.
950 * - n: Actual number of objects enqueued.
952 static __rte_always_inline unsigned
953 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
954 unsigned int esize, unsigned int n, unsigned int *free_space)
956 switch (r->prod.sync_type) {
957 case RTE_RING_SYNC_MT:
958 return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
960 case RTE_RING_SYNC_ST:
961 return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
963 #ifdef ALLOW_EXPERIMENTAL_API
964 case RTE_RING_SYNC_MT_RTS:
965 return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
967 case RTE_RING_SYNC_MT_HTS:
968 return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
973 /* valid ring should never reach this point */
975 if (free_space != NULL)
981 * Dequeue several objects from a ring (multi-consumers safe). When the request
982 * objects are more than the available objects, only dequeue the actual number
985 * This function uses a "compare and set" instruction to move the
986 * consumer index atomically.
989 * A pointer to the ring structure.
991 * A pointer to a table of objects that will be filled.
993 * The size of ring element, in bytes. It must be a multiple of 4.
994 * This must be the same value used while creating the ring. Otherwise
995 * the results are undefined.
997 * The number of objects to dequeue from the ring to the obj_table.
999 * If non-NULL, returns the number of remaining ring entries after the
1000 * dequeue has finished.
1002 * - n: Actual number of objects dequeued, 0 if ring is empty
1004 static __rte_always_inline unsigned
1005 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1006 unsigned int esize, unsigned int n, unsigned int *available)
1008 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1009 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, available);
1013 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1014 * request objects are more than the available objects, only dequeue the
1015 * actual number of objects
1018 * A pointer to the ring structure.
1020 * A pointer to a table of objects that will be filled.
1022 * The size of ring element, in bytes. It must be a multiple of 4.
1023 * This must be the same value used while creating the ring. Otherwise
1024 * the results are undefined.
1026 * The number of objects to dequeue from the ring to the obj_table.
1028 * If non-NULL, returns the number of remaining ring entries after the
1029 * dequeue has finished.
1031 * - n: Actual number of objects dequeued, 0 if ring is empty
1033 static __rte_always_inline unsigned
1034 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1035 unsigned int esize, unsigned int n, unsigned int *available)
1037 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1038 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, available);
1042 * Dequeue multiple objects from a ring up to a maximum number.
1044 * This function calls the multi-consumers or the single-consumer
1045 * version, depending on the default behaviour that was specified at
1046 * ring creation time (see flags).
1049 * A pointer to the ring structure.
1051 * A pointer to a table of objects that will be filled.
1053 * The size of ring element, in bytes. It must be a multiple of 4.
1054 * This must be the same value used while creating the ring. Otherwise
1055 * the results are undefined.
1057 * The number of objects to dequeue from the ring to the obj_table.
1059 * If non-NULL, returns the number of remaining ring entries after the
1060 * dequeue has finished.
1062 * - Number of objects dequeued
1064 static __rte_always_inline unsigned int
1065 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1066 unsigned int esize, unsigned int n, unsigned int *available)
1068 switch (r->cons.sync_type) {
1069 case RTE_RING_SYNC_MT:
1070 return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1072 case RTE_RING_SYNC_ST:
1073 return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1075 #ifdef ALLOW_EXPERIMENTAL_API
1076 case RTE_RING_SYNC_MT_RTS:
1077 return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1079 case RTE_RING_SYNC_MT_HTS:
1080 return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1085 /* valid ring should never reach this point */
1087 if (available != NULL)
1092 #ifdef ALLOW_EXPERIMENTAL_API
1093 #include <rte_ring_peek.h>
1096 #include <rte_ring.h>
1102 #endif /* _RTE_RING_ELEM_H_ */