1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2019 Arm Limited
4 * Copyright (c) 2010-2017 Intel Corporation
5 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
16 * RTE Ring with user defined element size
23 #include <rte_ring_core.h>
27 * @b EXPERIMENTAL: this API may change without prior notice
29 * Calculate the memory size needed for a ring with given element size
31 * This function returns the number of bytes needed for a ring, given
32 * the number of elements in it and the size of the element. This value
33 * is the sum of the size of the structure rte_ring and the size of the
34 * memory needed for storing the elements. The value is aligned to a cache
38 * The size of ring element, in bytes. It must be a multiple of 4.
40 * The number of elements in the ring (must be a power of 2).
42 * - The memory size needed for the ring on success.
43 * - -EINVAL - esize is not a multiple of 4 or count provided is not a
47 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
51 * @b EXPERIMENTAL: this API may change without prior notice
53 * Create a new ring named *name* that stores elements with given size.
55 * This function uses ``memzone_reserve()`` to allocate memory. Then it
56 * calls rte_ring_init() to initialize an empty ring.
58 * The new ring size is set to *count*, which must be a power of
59 * two. Water marking is disabled by default. The real usable ring size
60 * is *count-1* instead of *count* to differentiate a free ring from an
63 * The ring is added in RTE_TAILQ_RING list.
66 * The name of the ring.
68 * The size of ring element, in bytes. It must be a multiple of 4.
70 * The number of elements in the ring (must be a power of 2).
72 * The *socket_id* argument is the socket identifier in case of
73 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
74 * constraint for the reserved zone.
76 * An OR of the following:
77 * - One of mutually exclusive flags that define producer behavior:
78 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
79 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
80 * is "single-producer".
81 * - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
82 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
83 * is "multi-producer RTS mode".
84 * If none of these flags is set, then default "multi-producer"
85 * behavior is selected.
86 * - One of mutually exclusive flags that define consumer behavior:
87 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
88 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
89 * is "single-consumer". Otherwise, it is "multi-consumers".
90 * - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
91 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
92 * is "multi-consumer RTS mode".
93 * If none of these flags is set, then default "multi-consumer"
94 * behavior is selected.
96 * On success, the pointer to the new allocated ring. NULL on error with
97 * rte_errno set appropriately. Possible errno values include:
98 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
99 * - E_RTE_SECONDARY - function was called from a secondary process instance
100 * - EINVAL - esize is not a multiple of 4 or count provided is not a
102 * - ENOSPC - the maximum number of memzones has already been allocated
103 * - EEXIST - a memzone with the same name already exists
104 * - ENOMEM - no appropriate memory area found in which to create memzone
107 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
108 unsigned int count, int socket_id, unsigned int flags);
110 static __rte_always_inline void
111 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
112 uint32_t idx, const void *obj_table, uint32_t n)
115 uint32_t *ring = (uint32_t *)&r[1];
116 const uint32_t *obj = (const uint32_t *)obj_table;
117 if (likely(idx + n < size)) {
118 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
120 ring[idx + 1] = obj[i + 1];
121 ring[idx + 2] = obj[i + 2];
122 ring[idx + 3] = obj[i + 3];
123 ring[idx + 4] = obj[i + 4];
124 ring[idx + 5] = obj[i + 5];
125 ring[idx + 6] = obj[i + 6];
126 ring[idx + 7] = obj[i + 7];
130 ring[idx++] = obj[i++]; /* fallthrough */
132 ring[idx++] = obj[i++]; /* fallthrough */
134 ring[idx++] = obj[i++]; /* fallthrough */
136 ring[idx++] = obj[i++]; /* fallthrough */
138 ring[idx++] = obj[i++]; /* fallthrough */
140 ring[idx++] = obj[i++]; /* fallthrough */
142 ring[idx++] = obj[i++]; /* fallthrough */
145 for (i = 0; idx < size; i++, idx++)
147 /* Start at the beginning */
148 for (idx = 0; i < n; i++, idx++)
153 static __rte_always_inline void
154 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
155 const void *obj_table, uint32_t n)
158 const uint32_t size = r->size;
159 uint32_t idx = prod_head & r->mask;
160 uint64_t *ring = (uint64_t *)&r[1];
161 const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
162 if (likely(idx + n < size)) {
163 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
165 ring[idx + 1] = obj[i + 1];
166 ring[idx + 2] = obj[i + 2];
167 ring[idx + 3] = obj[i + 3];
171 ring[idx++] = obj[i++]; /* fallthrough */
173 ring[idx++] = obj[i++]; /* fallthrough */
175 ring[idx++] = obj[i++];
178 for (i = 0; idx < size; i++, idx++)
180 /* Start at the beginning */
181 for (idx = 0; i < n; i++, idx++)
186 static __rte_always_inline void
187 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
188 const void *obj_table, uint32_t n)
191 const uint32_t size = r->size;
192 uint32_t idx = prod_head & r->mask;
193 rte_int128_t *ring = (rte_int128_t *)&r[1];
194 const rte_int128_t *obj = (const rte_int128_t *)obj_table;
195 if (likely(idx + n < size)) {
196 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
197 memcpy((void *)(ring + idx),
198 (const void *)(obj + i), 32);
201 memcpy((void *)(ring + idx),
202 (const void *)(obj + i), 16);
205 for (i = 0; idx < size; i++, idx++)
206 memcpy((void *)(ring + idx),
207 (const void *)(obj + i), 16);
208 /* Start at the beginning */
209 for (idx = 0; i < n; i++, idx++)
210 memcpy((void *)(ring + idx),
211 (const void *)(obj + i), 16);
215 /* the actual enqueue of elements on the ring.
216 * Placed here since identical code needed in both
217 * single and multi producer enqueue functions.
219 static __rte_always_inline void
220 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
221 const void *obj_table, uint32_t esize, uint32_t num)
223 /* 8B and 16B copies implemented individually to retain
224 * the current performance.
227 __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
228 else if (esize == 16)
229 __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
231 uint32_t idx, scale, nr_idx, nr_num, nr_size;
233 /* Normalize to uint32_t */
234 scale = esize / sizeof(uint32_t);
235 nr_num = num * scale;
236 idx = prod_head & r->mask;
237 nr_idx = idx * scale;
238 nr_size = r->size * scale;
239 __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
244 static __rte_always_inline void
245 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
246 uint32_t idx, void *obj_table, uint32_t n)
249 uint32_t *ring = (uint32_t *)&r[1];
250 uint32_t *obj = (uint32_t *)obj_table;
251 if (likely(idx + n < size)) {
252 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
254 obj[i + 1] = ring[idx + 1];
255 obj[i + 2] = ring[idx + 2];
256 obj[i + 3] = ring[idx + 3];
257 obj[i + 4] = ring[idx + 4];
258 obj[i + 5] = ring[idx + 5];
259 obj[i + 6] = ring[idx + 6];
260 obj[i + 7] = ring[idx + 7];
264 obj[i++] = ring[idx++]; /* fallthrough */
266 obj[i++] = ring[idx++]; /* fallthrough */
268 obj[i++] = ring[idx++]; /* fallthrough */
270 obj[i++] = ring[idx++]; /* fallthrough */
272 obj[i++] = ring[idx++]; /* fallthrough */
274 obj[i++] = ring[idx++]; /* fallthrough */
276 obj[i++] = ring[idx++]; /* fallthrough */
279 for (i = 0; idx < size; i++, idx++)
281 /* Start at the beginning */
282 for (idx = 0; i < n; i++, idx++)
287 static __rte_always_inline void
288 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
289 void *obj_table, uint32_t n)
292 const uint32_t size = r->size;
293 uint32_t idx = prod_head & r->mask;
294 uint64_t *ring = (uint64_t *)&r[1];
295 unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
296 if (likely(idx + n < size)) {
297 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
299 obj[i + 1] = ring[idx + 1];
300 obj[i + 2] = ring[idx + 2];
301 obj[i + 3] = ring[idx + 3];
305 obj[i++] = ring[idx++]; /* fallthrough */
307 obj[i++] = ring[idx++]; /* fallthrough */
309 obj[i++] = ring[idx++]; /* fallthrough */
312 for (i = 0; idx < size; i++, idx++)
314 /* Start at the beginning */
315 for (idx = 0; i < n; i++, idx++)
320 static __rte_always_inline void
321 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
322 void *obj_table, uint32_t n)
325 const uint32_t size = r->size;
326 uint32_t idx = prod_head & r->mask;
327 rte_int128_t *ring = (rte_int128_t *)&r[1];
328 rte_int128_t *obj = (rte_int128_t *)obj_table;
329 if (likely(idx + n < size)) {
330 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
331 memcpy((void *)(obj + i), (void *)(ring + idx), 32);
334 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
337 for (i = 0; idx < size; i++, idx++)
338 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
339 /* Start at the beginning */
340 for (idx = 0; i < n; i++, idx++)
341 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
345 /* the actual dequeue of elements from the ring.
346 * Placed here since identical code needed in both
347 * single and multi producer enqueue functions.
349 static __rte_always_inline void
350 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
351 void *obj_table, uint32_t esize, uint32_t num)
353 /* 8B and 16B copies implemented individually to retain
354 * the current performance.
357 __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
358 else if (esize == 16)
359 __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
361 uint32_t idx, scale, nr_idx, nr_num, nr_size;
363 /* Normalize to uint32_t */
364 scale = esize / sizeof(uint32_t);
365 nr_num = num * scale;
366 idx = cons_head & r->mask;
367 nr_idx = idx * scale;
368 nr_size = r->size * scale;
369 __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
374 /* Between load and load. there might be cpu reorder in weak model
376 * There are 2 choices for the users
377 * 1.use rmb() memory barrier
378 * 2.use one-direction load_acquire/store_release barrier,defined by
379 * CONFIG_RTE_USE_C11_MEM_MODEL=y
380 * It depends on performance test results.
381 * By default, move common functions to rte_ring_generic.h
383 #ifdef RTE_USE_C11_MEM_MODEL
384 #include "rte_ring_c11_mem.h"
386 #include "rte_ring_generic.h"
390 * @internal Enqueue several objects on the ring
393 * A pointer to the ring structure.
395 * A pointer to a table of objects.
397 * The size of ring element, in bytes. It must be a multiple of 4.
398 * This must be the same value used while creating the ring. Otherwise
399 * the results are undefined.
401 * The number of objects to add in the ring from the obj_table.
403 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
404 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
406 * Indicates whether to use single producer or multi-producer head update
408 * returns the amount of space after the enqueue operation has finished
410 * Actual number of objects enqueued.
411 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
413 static __rte_always_inline unsigned int
414 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
415 unsigned int esize, unsigned int n,
416 enum rte_ring_queue_behavior behavior, unsigned int is_sp,
417 unsigned int *free_space)
419 uint32_t prod_head, prod_next;
420 uint32_t free_entries;
422 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
423 &prod_head, &prod_next, &free_entries);
427 __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
429 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
431 if (free_space != NULL)
432 *free_space = free_entries - n;
437 * @internal Dequeue several objects from the ring
440 * A pointer to the ring structure.
442 * A pointer to a table of objects.
444 * The size of ring element, in bytes. It must be a multiple of 4.
445 * This must be the same value used while creating the ring. Otherwise
446 * the results are undefined.
448 * The number of objects to pull from the ring.
450 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
451 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
453 * Indicates whether to use single consumer or multi-consumer head update
455 * returns the number of remaining ring entries after the dequeue has finished
457 * - Actual number of objects dequeued.
458 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
460 static __rte_always_inline unsigned int
461 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
462 unsigned int esize, unsigned int n,
463 enum rte_ring_queue_behavior behavior, unsigned int is_sc,
464 unsigned int *available)
466 uint32_t cons_head, cons_next;
469 n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
470 &cons_head, &cons_next, &entries);
474 __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
476 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
479 if (available != NULL)
480 *available = entries - n;
485 * Enqueue several objects on the ring (multi-producers safe).
487 * This function uses a "compare and set" instruction to move the
488 * producer index atomically.
491 * A pointer to the ring structure.
493 * A pointer to a table of objects.
495 * The size of ring element, in bytes. It must be a multiple of 4.
496 * This must be the same value used while creating the ring. Otherwise
497 * the results are undefined.
499 * The number of objects to add in the ring from the obj_table.
501 * if non-NULL, returns the amount of space in the ring after the
502 * enqueue operation has finished.
504 * The number of objects enqueued, either 0 or n
506 static __rte_always_inline unsigned int
507 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
508 unsigned int esize, unsigned int n, unsigned int *free_space)
510 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
511 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, free_space);
515 * Enqueue several objects on a ring
517 * @warning This API is NOT multi-producers safe
520 * A pointer to the ring structure.
522 * A pointer to a table of objects.
524 * The size of ring element, in bytes. It must be a multiple of 4.
525 * This must be the same value used while creating the ring. Otherwise
526 * the results are undefined.
528 * The number of objects to add in the ring from the obj_table.
530 * if non-NULL, returns the amount of space in the ring after the
531 * enqueue operation has finished.
533 * The number of objects enqueued, either 0 or n
535 static __rte_always_inline unsigned int
536 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
537 unsigned int esize, unsigned int n, unsigned int *free_space)
539 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
540 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, free_space);
543 #ifdef ALLOW_EXPERIMENTAL_API
544 #include <rte_ring_rts.h>
548 * Enqueue several objects on a ring.
550 * This function calls the multi-producer or the single-producer
551 * version depending on the default behavior that was specified at
552 * ring creation time (see flags).
555 * A pointer to the ring structure.
557 * A pointer to a table of objects.
559 * The size of ring element, in bytes. It must be a multiple of 4.
560 * This must be the same value used while creating the ring. Otherwise
561 * the results are undefined.
563 * The number of objects to add in the ring from the obj_table.
565 * if non-NULL, returns the amount of space in the ring after the
566 * enqueue operation has finished.
568 * The number of objects enqueued, either 0 or n
570 static __rte_always_inline unsigned int
571 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
572 unsigned int esize, unsigned int n, unsigned int *free_space)
574 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
575 RTE_RING_QUEUE_FIXED, r->prod.sync_type, free_space);
577 switch (r->prod.sync_type) {
578 case RTE_RING_SYNC_MT:
579 return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
581 case RTE_RING_SYNC_ST:
582 return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
584 #ifdef ALLOW_EXPERIMENTAL_API
585 case RTE_RING_SYNC_MT_RTS:
586 return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
591 /* valid ring should never reach this point */
593 if (free_space != NULL)
599 * Enqueue one object on a ring (multi-producers safe).
601 * This function uses a "compare and set" instruction to move the
602 * producer index atomically.
605 * A pointer to the ring structure.
607 * A pointer to the object to be added.
609 * The size of ring element, in bytes. It must be a multiple of 4.
610 * This must be the same value used while creating the ring. Otherwise
611 * the results are undefined.
613 * - 0: Success; objects enqueued.
614 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
616 static __rte_always_inline int
617 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
619 return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
624 * Enqueue one object on a ring
626 * @warning This API is NOT multi-producers safe
629 * A pointer to the ring structure.
631 * A pointer to the object to be added.
633 * The size of ring element, in bytes. It must be a multiple of 4.
634 * This must be the same value used while creating the ring. Otherwise
635 * the results are undefined.
637 * - 0: Success; objects enqueued.
638 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
640 static __rte_always_inline int
641 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
643 return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
648 * Enqueue one object on a ring.
650 * This function calls the multi-producer or the single-producer
651 * version, depending on the default behaviour that was specified at
652 * ring creation time (see flags).
655 * A pointer to the ring structure.
657 * A pointer to the object to be added.
659 * The size of ring element, in bytes. It must be a multiple of 4.
660 * This must be the same value used while creating the ring. Otherwise
661 * the results are undefined.
663 * - 0: Success; objects enqueued.
664 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
666 static __rte_always_inline int
667 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
669 return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
674 * Dequeue several objects from a ring (multi-consumers safe).
676 * This function uses a "compare and set" instruction to move the
677 * consumer index atomically.
680 * A pointer to the ring structure.
682 * A pointer to a table of objects that will be filled.
684 * The size of ring element, in bytes. It must be a multiple of 4.
685 * This must be the same value used while creating the ring. Otherwise
686 * the results are undefined.
688 * The number of objects to dequeue from the ring to the obj_table.
690 * If non-NULL, returns the number of remaining ring entries after the
691 * dequeue has finished.
693 * The number of objects dequeued, either 0 or n
695 static __rte_always_inline unsigned int
696 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
697 unsigned int esize, unsigned int n, unsigned int *available)
699 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
700 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, available);
704 * Dequeue several objects from a ring (NOT multi-consumers safe).
707 * A pointer to the ring structure.
709 * A pointer to a table of objects that will be filled.
711 * The size of ring element, in bytes. It must be a multiple of 4.
712 * This must be the same value used while creating the ring. Otherwise
713 * the results are undefined.
715 * The number of objects to dequeue from the ring to the obj_table,
716 * must be strictly positive.
718 * If non-NULL, returns the number of remaining ring entries after the
719 * dequeue has finished.
721 * The number of objects dequeued, either 0 or n
723 static __rte_always_inline unsigned int
724 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
725 unsigned int esize, unsigned int n, unsigned int *available)
727 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
728 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, available);
732 * Dequeue several objects from a ring.
734 * This function calls the multi-consumers or the single-consumer
735 * version, depending on the default behaviour that was specified at
736 * ring creation time (see flags).
739 * A pointer to the ring structure.
741 * A pointer to a table of objects that will be filled.
743 * The size of ring element, in bytes. It must be a multiple of 4.
744 * This must be the same value used while creating the ring. Otherwise
745 * the results are undefined.
747 * The number of objects to dequeue from the ring to the obj_table.
749 * If non-NULL, returns the number of remaining ring entries after the
750 * dequeue has finished.
752 * The number of objects dequeued, either 0 or n
754 static __rte_always_inline unsigned int
755 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
756 unsigned int esize, unsigned int n, unsigned int *available)
758 switch (r->cons.sync_type) {
759 case RTE_RING_SYNC_MT:
760 return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
762 case RTE_RING_SYNC_ST:
763 return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
765 #ifdef ALLOW_EXPERIMENTAL_API
766 case RTE_RING_SYNC_MT_RTS:
767 return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
772 /* valid ring should never reach this point */
774 if (available != NULL)
780 * Dequeue one object from a ring (multi-consumers safe).
782 * This function uses a "compare and set" instruction to move the
783 * consumer index atomically.
786 * A pointer to the ring structure.
788 * A pointer to the object that will be filled.
790 * The size of ring element, in bytes. It must be a multiple of 4.
791 * This must be the same value used while creating the ring. Otherwise
792 * the results are undefined.
794 * - 0: Success; objects dequeued.
795 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
798 static __rte_always_inline int
799 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
802 return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
807 * Dequeue one object from a ring (NOT multi-consumers safe).
810 * A pointer to the ring structure.
812 * A pointer to the object that will be filled.
814 * The size of ring element, in bytes. It must be a multiple of 4.
815 * This must be the same value used while creating the ring. Otherwise
816 * the results are undefined.
818 * - 0: Success; objects dequeued.
819 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
822 static __rte_always_inline int
823 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
826 return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
831 * Dequeue one object from a ring.
833 * This function calls the multi-consumers or the single-consumer
834 * version depending on the default behaviour that was specified at
835 * ring creation time (see flags).
838 * A pointer to the ring structure.
840 * A pointer to the object that will be filled.
842 * The size of ring element, in bytes. It must be a multiple of 4.
843 * This must be the same value used while creating the ring. Otherwise
844 * the results are undefined.
846 * - 0: Success, objects dequeued.
847 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
850 static __rte_always_inline int
851 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
853 return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
858 * Enqueue several objects on the ring (multi-producers safe).
860 * This function uses a "compare and set" instruction to move the
861 * producer index atomically.
864 * A pointer to the ring structure.
866 * A pointer to a table of objects.
868 * The size of ring element, in bytes. It must be a multiple of 4.
869 * This must be the same value used while creating the ring. Otherwise
870 * the results are undefined.
872 * The number of objects to add in the ring from the obj_table.
874 * if non-NULL, returns the amount of space in the ring after the
875 * enqueue operation has finished.
877 * - n: Actual number of objects enqueued.
879 static __rte_always_inline unsigned
880 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
881 unsigned int esize, unsigned int n, unsigned int *free_space)
883 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
884 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, free_space);
888 * Enqueue several objects on a ring
890 * @warning This API is NOT multi-producers safe
893 * A pointer to the ring structure.
895 * A pointer to a table of objects.
897 * The size of ring element, in bytes. It must be a multiple of 4.
898 * This must be the same value used while creating the ring. Otherwise
899 * the results are undefined.
901 * The number of objects to add in the ring from the obj_table.
903 * if non-NULL, returns the amount of space in the ring after the
904 * enqueue operation has finished.
906 * - n: Actual number of objects enqueued.
908 static __rte_always_inline unsigned
909 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
910 unsigned int esize, unsigned int n, unsigned int *free_space)
912 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
913 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, free_space);
917 * Enqueue several objects on a ring.
919 * This function calls the multi-producer or the single-producer
920 * version depending on the default behavior that was specified at
921 * ring creation time (see flags).
924 * A pointer to the ring structure.
926 * A pointer to a table of objects.
928 * The size of ring element, in bytes. It must be a multiple of 4.
929 * This must be the same value used while creating the ring. Otherwise
930 * the results are undefined.
932 * The number of objects to add in the ring from the obj_table.
934 * if non-NULL, returns the amount of space in the ring after the
935 * enqueue operation has finished.
937 * - n: Actual number of objects enqueued.
939 static __rte_always_inline unsigned
940 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
941 unsigned int esize, unsigned int n, unsigned int *free_space)
943 switch (r->prod.sync_type) {
944 case RTE_RING_SYNC_MT:
945 return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
947 case RTE_RING_SYNC_ST:
948 return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
950 #ifdef ALLOW_EXPERIMENTAL_API
951 case RTE_RING_SYNC_MT_RTS:
952 return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
957 /* valid ring should never reach this point */
959 if (free_space != NULL)
965 * Dequeue several objects from a ring (multi-consumers safe). When the request
966 * objects are more than the available objects, only dequeue the actual number
969 * This function uses a "compare and set" instruction to move the
970 * consumer index atomically.
973 * A pointer to the ring structure.
975 * A pointer to a table of objects that will be filled.
977 * The size of ring element, in bytes. It must be a multiple of 4.
978 * This must be the same value used while creating the ring. Otherwise
979 * the results are undefined.
981 * The number of objects to dequeue from the ring to the obj_table.
983 * If non-NULL, returns the number of remaining ring entries after the
984 * dequeue has finished.
986 * - n: Actual number of objects dequeued, 0 if ring is empty
988 static __rte_always_inline unsigned
989 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
990 unsigned int esize, unsigned int n, unsigned int *available)
992 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
993 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, available);
997 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
998 * request objects are more than the available objects, only dequeue the
999 * actual number of objects
1002 * A pointer to the ring structure.
1004 * A pointer to a table of objects that will be filled.
1006 * The size of ring element, in bytes. It must be a multiple of 4.
1007 * This must be the same value used while creating the ring. Otherwise
1008 * the results are undefined.
1010 * The number of objects to dequeue from the ring to the obj_table.
1012 * If non-NULL, returns the number of remaining ring entries after the
1013 * dequeue has finished.
1015 * - n: Actual number of objects dequeued, 0 if ring is empty
1017 static __rte_always_inline unsigned
1018 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1019 unsigned int esize, unsigned int n, unsigned int *available)
1021 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1022 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, available);
1026 * Dequeue multiple objects from a ring up to a maximum number.
1028 * This function calls the multi-consumers or the single-consumer
1029 * version, depending on the default behaviour that was specified at
1030 * ring creation time (see flags).
1033 * A pointer to the ring structure.
1035 * A pointer to a table of objects that will be filled.
1037 * The size of ring element, in bytes. It must be a multiple of 4.
1038 * This must be the same value used while creating the ring. Otherwise
1039 * the results are undefined.
1041 * The number of objects to dequeue from the ring to the obj_table.
1043 * If non-NULL, returns the number of remaining ring entries after the
1044 * dequeue has finished.
1046 * - Number of objects dequeued
1048 static __rte_always_inline unsigned int
1049 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1050 unsigned int esize, unsigned int n, unsigned int *available)
1052 switch (r->cons.sync_type) {
1053 case RTE_RING_SYNC_MT:
1054 return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1056 case RTE_RING_SYNC_ST:
1057 return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1059 #ifdef ALLOW_EXPERIMENTAL_API
1060 case RTE_RING_SYNC_MT_RTS:
1061 return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1066 /* valid ring should never reach this point */
1068 if (available != NULL)
1073 #include <rte_ring.h>
1079 #endif /* _RTE_RING_ELEM_H_ */