1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2019 Arm Limited
4 * Copyright (c) 2010-2017 Intel Corporation
5 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
16 * RTE Ring with user defined element size
23 #include <rte_ring_core.h>
26 * Calculate the memory size needed for a ring with given element size
28 * This function returns the number of bytes needed for a ring, given
29 * the number of elements in it and the size of the element. This value
30 * is the sum of the size of the structure rte_ring and the size of the
31 * memory needed for storing the elements. The value is aligned to a cache
35 * The size of ring element, in bytes. It must be a multiple of 4.
37 * The number of elements in the ring (must be a power of 2).
39 * - The memory size needed for the ring on success.
40 * - -EINVAL - esize is not a multiple of 4 or count provided is not a
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
46 * Create a new ring named *name* that stores elements with given size.
48 * This function uses ``memzone_reserve()`` to allocate memory. Then it
49 * calls rte_ring_init() to initialize an empty ring.
51 * The new ring size is set to *count*, which must be a power of
52 * two. Water marking is disabled by default. The real usable ring size
53 * is *count-1* instead of *count* to differentiate a free ring from an
56 * The ring is added in RTE_TAILQ_RING list.
59 * The name of the ring.
61 * The size of ring element, in bytes. It must be a multiple of 4.
63 * The number of elements in the ring (must be a power of 2).
65 * The *socket_id* argument is the socket identifier in case of
66 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
67 * constraint for the reserved zone.
69 * An OR of the following:
70 * - One of mutually exclusive flags that define producer behavior:
71 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
72 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
73 * is "single-producer".
74 * - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
75 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
76 * is "multi-producer RTS mode".
77 * - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
78 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
79 * is "multi-producer HTS mode".
80 * If none of these flags is set, then default "multi-producer"
81 * behavior is selected.
82 * - One of mutually exclusive flags that define consumer behavior:
83 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
84 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
85 * is "single-consumer". Otherwise, it is "multi-consumers".
86 * - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
87 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
88 * is "multi-consumer RTS mode".
89 * - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
90 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
91 * is "multi-consumer HTS mode".
92 * If none of these flags is set, then default "multi-consumer"
93 * behavior is selected.
95 * On success, the pointer to the new allocated ring. NULL on error with
96 * rte_errno set appropriately. Possible errno values include:
97 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
98 * - E_RTE_SECONDARY - function was called from a secondary process instance
99 * - EINVAL - esize is not a multiple of 4 or count provided is not a
101 * - ENOSPC - the maximum number of memzones has already been allocated
102 * - EEXIST - a memzone with the same name already exists
103 * - ENOMEM - no appropriate memory area found in which to create memzone
105 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
106 unsigned int count, int socket_id, unsigned int flags);
108 static __rte_always_inline void
109 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
110 uint32_t idx, const void *obj_table, uint32_t n)
113 uint32_t *ring = (uint32_t *)&r[1];
114 const uint32_t *obj = (const uint32_t *)obj_table;
115 if (likely(idx + n < size)) {
116 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
118 ring[idx + 1] = obj[i + 1];
119 ring[idx + 2] = obj[i + 2];
120 ring[idx + 3] = obj[i + 3];
121 ring[idx + 4] = obj[i + 4];
122 ring[idx + 5] = obj[i + 5];
123 ring[idx + 6] = obj[i + 6];
124 ring[idx + 7] = obj[i + 7];
128 ring[idx++] = obj[i++]; /* fallthrough */
130 ring[idx++] = obj[i++]; /* fallthrough */
132 ring[idx++] = obj[i++]; /* fallthrough */
134 ring[idx++] = obj[i++]; /* fallthrough */
136 ring[idx++] = obj[i++]; /* fallthrough */
138 ring[idx++] = obj[i++]; /* fallthrough */
140 ring[idx++] = obj[i++]; /* fallthrough */
143 for (i = 0; idx < size; i++, idx++)
145 /* Start at the beginning */
146 for (idx = 0; i < n; i++, idx++)
151 static __rte_always_inline void
152 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
153 const void *obj_table, uint32_t n)
156 const uint32_t size = r->size;
157 uint32_t idx = prod_head & r->mask;
158 uint64_t *ring = (uint64_t *)&r[1];
159 const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
160 if (likely(idx + n < size)) {
161 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
163 ring[idx + 1] = obj[i + 1];
164 ring[idx + 2] = obj[i + 2];
165 ring[idx + 3] = obj[i + 3];
169 ring[idx++] = obj[i++]; /* fallthrough */
171 ring[idx++] = obj[i++]; /* fallthrough */
173 ring[idx++] = obj[i++];
176 for (i = 0; idx < size; i++, idx++)
178 /* Start at the beginning */
179 for (idx = 0; i < n; i++, idx++)
184 static __rte_always_inline void
185 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
186 const void *obj_table, uint32_t n)
189 const uint32_t size = r->size;
190 uint32_t idx = prod_head & r->mask;
191 rte_int128_t *ring = (rte_int128_t *)&r[1];
192 const rte_int128_t *obj = (const rte_int128_t *)obj_table;
193 if (likely(idx + n < size)) {
194 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
195 memcpy((void *)(ring + idx),
196 (const void *)(obj + i), 32);
199 memcpy((void *)(ring + idx),
200 (const void *)(obj + i), 16);
203 for (i = 0; idx < size; i++, idx++)
204 memcpy((void *)(ring + idx),
205 (const void *)(obj + i), 16);
206 /* Start at the beginning */
207 for (idx = 0; i < n; i++, idx++)
208 memcpy((void *)(ring + idx),
209 (const void *)(obj + i), 16);
213 /* the actual enqueue of elements on the ring.
214 * Placed here since identical code needed in both
215 * single and multi producer enqueue functions.
217 static __rte_always_inline void
218 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
219 const void *obj_table, uint32_t esize, uint32_t num)
221 /* 8B and 16B copies implemented individually to retain
222 * the current performance.
225 __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
226 else if (esize == 16)
227 __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
229 uint32_t idx, scale, nr_idx, nr_num, nr_size;
231 /* Normalize to uint32_t */
232 scale = esize / sizeof(uint32_t);
233 nr_num = num * scale;
234 idx = prod_head & r->mask;
235 nr_idx = idx * scale;
236 nr_size = r->size * scale;
237 __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
242 static __rte_always_inline void
243 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
244 uint32_t idx, void *obj_table, uint32_t n)
247 uint32_t *ring = (uint32_t *)&r[1];
248 uint32_t *obj = (uint32_t *)obj_table;
249 if (likely(idx + n < size)) {
250 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
252 obj[i + 1] = ring[idx + 1];
253 obj[i + 2] = ring[idx + 2];
254 obj[i + 3] = ring[idx + 3];
255 obj[i + 4] = ring[idx + 4];
256 obj[i + 5] = ring[idx + 5];
257 obj[i + 6] = ring[idx + 6];
258 obj[i + 7] = ring[idx + 7];
262 obj[i++] = ring[idx++]; /* fallthrough */
264 obj[i++] = ring[idx++]; /* fallthrough */
266 obj[i++] = ring[idx++]; /* fallthrough */
268 obj[i++] = ring[idx++]; /* fallthrough */
270 obj[i++] = ring[idx++]; /* fallthrough */
272 obj[i++] = ring[idx++]; /* fallthrough */
274 obj[i++] = ring[idx++]; /* fallthrough */
277 for (i = 0; idx < size; i++, idx++)
279 /* Start at the beginning */
280 for (idx = 0; i < n; i++, idx++)
285 static __rte_always_inline void
286 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
287 void *obj_table, uint32_t n)
290 const uint32_t size = r->size;
291 uint32_t idx = prod_head & r->mask;
292 uint64_t *ring = (uint64_t *)&r[1];
293 unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
294 if (likely(idx + n < size)) {
295 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
297 obj[i + 1] = ring[idx + 1];
298 obj[i + 2] = ring[idx + 2];
299 obj[i + 3] = ring[idx + 3];
303 obj[i++] = ring[idx++]; /* fallthrough */
305 obj[i++] = ring[idx++]; /* fallthrough */
307 obj[i++] = ring[idx++]; /* fallthrough */
310 for (i = 0; idx < size; i++, idx++)
312 /* Start at the beginning */
313 for (idx = 0; i < n; i++, idx++)
318 static __rte_always_inline void
319 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
320 void *obj_table, uint32_t n)
323 const uint32_t size = r->size;
324 uint32_t idx = prod_head & r->mask;
325 rte_int128_t *ring = (rte_int128_t *)&r[1];
326 rte_int128_t *obj = (rte_int128_t *)obj_table;
327 if (likely(idx + n < size)) {
328 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
329 memcpy((void *)(obj + i), (void *)(ring + idx), 32);
332 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
335 for (i = 0; idx < size; i++, idx++)
336 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
337 /* Start at the beginning */
338 for (idx = 0; i < n; i++, idx++)
339 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
343 /* the actual dequeue of elements from the ring.
344 * Placed here since identical code needed in both
345 * single and multi producer enqueue functions.
347 static __rte_always_inline void
348 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
349 void *obj_table, uint32_t esize, uint32_t num)
351 /* 8B and 16B copies implemented individually to retain
352 * the current performance.
355 __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
356 else if (esize == 16)
357 __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
359 uint32_t idx, scale, nr_idx, nr_num, nr_size;
361 /* Normalize to uint32_t */
362 scale = esize / sizeof(uint32_t);
363 nr_num = num * scale;
364 idx = cons_head & r->mask;
365 nr_idx = idx * scale;
366 nr_size = r->size * scale;
367 __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
372 /* Between load and load. there might be cpu reorder in weak model
374 * There are 2 choices for the users
375 * 1.use rmb() memory barrier
376 * 2.use one-direction load_acquire/store_release barrier,defined by
377 * CONFIG_RTE_USE_C11_MEM_MODEL=y
378 * It depends on performance test results.
379 * By default, move common functions to rte_ring_generic.h
381 #ifdef RTE_USE_C11_MEM_MODEL
382 #include "rte_ring_c11_mem.h"
384 #include "rte_ring_generic.h"
388 * @internal Enqueue several objects on the ring
391 * A pointer to the ring structure.
393 * A pointer to a table of objects.
395 * The size of ring element, in bytes. It must be a multiple of 4.
396 * This must be the same value used while creating the ring. Otherwise
397 * the results are undefined.
399 * The number of objects to add in the ring from the obj_table.
401 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
402 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
404 * Indicates whether to use single producer or multi-producer head update
406 * returns the amount of space after the enqueue operation has finished
408 * Actual number of objects enqueued.
409 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
411 static __rte_always_inline unsigned int
412 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
413 unsigned int esize, unsigned int n,
414 enum rte_ring_queue_behavior behavior, unsigned int is_sp,
415 unsigned int *free_space)
417 uint32_t prod_head, prod_next;
418 uint32_t free_entries;
420 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
421 &prod_head, &prod_next, &free_entries);
425 __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
427 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
429 if (free_space != NULL)
430 *free_space = free_entries - n;
435 * @internal Dequeue several objects from the ring
438 * A pointer to the ring structure.
440 * A pointer to a table of objects.
442 * The size of ring element, in bytes. It must be a multiple of 4.
443 * This must be the same value used while creating the ring. Otherwise
444 * the results are undefined.
446 * The number of objects to pull from the ring.
448 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
449 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
451 * Indicates whether to use single consumer or multi-consumer head update
453 * returns the number of remaining ring entries after the dequeue has finished
455 * - Actual number of objects dequeued.
456 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
458 static __rte_always_inline unsigned int
459 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
460 unsigned int esize, unsigned int n,
461 enum rte_ring_queue_behavior behavior, unsigned int is_sc,
462 unsigned int *available)
464 uint32_t cons_head, cons_next;
467 n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
468 &cons_head, &cons_next, &entries);
472 __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
474 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
477 if (available != NULL)
478 *available = entries - n;
483 * Enqueue several objects on the ring (multi-producers safe).
485 * This function uses a "compare and set" instruction to move the
486 * producer index atomically.
489 * A pointer to the ring structure.
491 * A pointer to a table of objects.
493 * The size of ring element, in bytes. It must be a multiple of 4.
494 * This must be the same value used while creating the ring. Otherwise
495 * the results are undefined.
497 * The number of objects to add in the ring from the obj_table.
499 * if non-NULL, returns the amount of space in the ring after the
500 * enqueue operation has finished.
502 * The number of objects enqueued, either 0 or n
504 static __rte_always_inline unsigned int
505 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
506 unsigned int esize, unsigned int n, unsigned int *free_space)
508 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
509 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, free_space);
513 * Enqueue several objects on a ring
515 * @warning This API is NOT multi-producers safe
518 * A pointer to the ring structure.
520 * A pointer to a table of objects.
522 * The size of ring element, in bytes. It must be a multiple of 4.
523 * This must be the same value used while creating the ring. Otherwise
524 * the results are undefined.
526 * The number of objects to add in the ring from the obj_table.
528 * if non-NULL, returns the amount of space in the ring after the
529 * enqueue operation has finished.
531 * The number of objects enqueued, either 0 or n
533 static __rte_always_inline unsigned int
534 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
535 unsigned int esize, unsigned int n, unsigned int *free_space)
537 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
538 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, free_space);
541 #ifdef ALLOW_EXPERIMENTAL_API
542 #include <rte_ring_hts.h>
543 #include <rte_ring_rts.h>
547 * Enqueue several objects on a ring.
549 * This function calls the multi-producer or the single-producer
550 * version depending on the default behavior that was specified at
551 * ring creation time (see flags).
554 * A pointer to the ring structure.
556 * A pointer to a table of objects.
558 * The size of ring element, in bytes. It must be a multiple of 4.
559 * This must be the same value used while creating the ring. Otherwise
560 * the results are undefined.
562 * The number of objects to add in the ring from the obj_table.
564 * if non-NULL, returns the amount of space in the ring after the
565 * enqueue operation has finished.
567 * The number of objects enqueued, either 0 or n
569 static __rte_always_inline unsigned int
570 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
571 unsigned int esize, unsigned int n, unsigned int *free_space)
573 switch (r->prod.sync_type) {
574 case RTE_RING_SYNC_MT:
575 return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
577 case RTE_RING_SYNC_ST:
578 return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
580 #ifdef ALLOW_EXPERIMENTAL_API
581 case RTE_RING_SYNC_MT_RTS:
582 return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
584 case RTE_RING_SYNC_MT_HTS:
585 return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
590 /* valid ring should never reach this point */
592 if (free_space != NULL)
598 * Enqueue one object on a ring (multi-producers safe).
600 * This function uses a "compare and set" instruction to move the
601 * producer index atomically.
604 * A pointer to the ring structure.
606 * A pointer to the object to be added.
608 * The size of ring element, in bytes. It must be a multiple of 4.
609 * This must be the same value used while creating the ring. Otherwise
610 * the results are undefined.
612 * - 0: Success; objects enqueued.
613 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
615 static __rte_always_inline int
616 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
618 return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
623 * Enqueue one object on a ring
625 * @warning This API is NOT multi-producers safe
628 * A pointer to the ring structure.
630 * A pointer to the object to be added.
632 * The size of ring element, in bytes. It must be a multiple of 4.
633 * This must be the same value used while creating the ring. Otherwise
634 * the results are undefined.
636 * - 0: Success; objects enqueued.
637 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
639 static __rte_always_inline int
640 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
642 return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
647 * Enqueue one object on a ring.
649 * This function calls the multi-producer or the single-producer
650 * version, depending on the default behaviour that was specified at
651 * ring creation time (see flags).
654 * A pointer to the ring structure.
656 * A pointer to the object to be added.
658 * The size of ring element, in bytes. It must be a multiple of 4.
659 * This must be the same value used while creating the ring. Otherwise
660 * the results are undefined.
662 * - 0: Success; objects enqueued.
663 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
665 static __rte_always_inline int
666 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
668 return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
673 * Dequeue several objects from a ring (multi-consumers safe).
675 * This function uses a "compare and set" instruction to move the
676 * consumer index atomically.
679 * A pointer to the ring structure.
681 * A pointer to a table of objects that will be filled.
683 * The size of ring element, in bytes. It must be a multiple of 4.
684 * This must be the same value used while creating the ring. Otherwise
685 * the results are undefined.
687 * The number of objects to dequeue from the ring to the obj_table.
689 * If non-NULL, returns the number of remaining ring entries after the
690 * dequeue has finished.
692 * The number of objects dequeued, either 0 or n
694 static __rte_always_inline unsigned int
695 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
696 unsigned int esize, unsigned int n, unsigned int *available)
698 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
699 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, available);
703 * Dequeue several objects from a ring (NOT multi-consumers safe).
706 * A pointer to the ring structure.
708 * A pointer to a table of objects that will be filled.
710 * The size of ring element, in bytes. It must be a multiple of 4.
711 * This must be the same value used while creating the ring. Otherwise
712 * the results are undefined.
714 * The number of objects to dequeue from the ring to the obj_table,
715 * must be strictly positive.
717 * If non-NULL, returns the number of remaining ring entries after the
718 * dequeue has finished.
720 * The number of objects dequeued, either 0 or n
722 static __rte_always_inline unsigned int
723 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
724 unsigned int esize, unsigned int n, unsigned int *available)
726 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
727 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, available);
731 * Dequeue several objects from a ring.
733 * This function calls the multi-consumers or the single-consumer
734 * version, depending on the default behaviour that was specified at
735 * ring creation time (see flags).
738 * A pointer to the ring structure.
740 * A pointer to a table of objects that will be filled.
742 * The size of ring element, in bytes. It must be a multiple of 4.
743 * This must be the same value used while creating the ring. Otherwise
744 * the results are undefined.
746 * The number of objects to dequeue from the ring to the obj_table.
748 * If non-NULL, returns the number of remaining ring entries after the
749 * dequeue has finished.
751 * The number of objects dequeued, either 0 or n
753 static __rte_always_inline unsigned int
754 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
755 unsigned int esize, unsigned int n, unsigned int *available)
757 switch (r->cons.sync_type) {
758 case RTE_RING_SYNC_MT:
759 return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
761 case RTE_RING_SYNC_ST:
762 return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
764 #ifdef ALLOW_EXPERIMENTAL_API
765 case RTE_RING_SYNC_MT_RTS:
766 return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
768 case RTE_RING_SYNC_MT_HTS:
769 return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
774 /* valid ring should never reach this point */
776 if (available != NULL)
782 * Dequeue one object from a ring (multi-consumers safe).
784 * This function uses a "compare and set" instruction to move the
785 * consumer index atomically.
788 * A pointer to the ring structure.
790 * A pointer to the object that will be filled.
792 * The size of ring element, in bytes. It must be a multiple of 4.
793 * This must be the same value used while creating the ring. Otherwise
794 * the results are undefined.
796 * - 0: Success; objects dequeued.
797 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
800 static __rte_always_inline int
801 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
804 return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
809 * Dequeue one object from a ring (NOT multi-consumers safe).
812 * A pointer to the ring structure.
814 * A pointer to the object that will be filled.
816 * The size of ring element, in bytes. It must be a multiple of 4.
817 * This must be the same value used while creating the ring. Otherwise
818 * the results are undefined.
820 * - 0: Success; objects dequeued.
821 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
824 static __rte_always_inline int
825 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
828 return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
833 * Dequeue one object from a ring.
835 * This function calls the multi-consumers or the single-consumer
836 * version depending on the default behaviour that was specified at
837 * ring creation time (see flags).
840 * A pointer to the ring structure.
842 * A pointer to the object that will be filled.
844 * The size of ring element, in bytes. It must be a multiple of 4.
845 * This must be the same value used while creating the ring. Otherwise
846 * the results are undefined.
848 * - 0: Success, objects dequeued.
849 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
852 static __rte_always_inline int
853 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
855 return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
860 * Enqueue several objects on the ring (multi-producers safe).
862 * This function uses a "compare and set" instruction to move the
863 * producer index atomically.
866 * A pointer to the ring structure.
868 * A pointer to a table of objects.
870 * The size of ring element, in bytes. It must be a multiple of 4.
871 * This must be the same value used while creating the ring. Otherwise
872 * the results are undefined.
874 * The number of objects to add in the ring from the obj_table.
876 * if non-NULL, returns the amount of space in the ring after the
877 * enqueue operation has finished.
879 * - n: Actual number of objects enqueued.
881 static __rte_always_inline unsigned int
882 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
883 unsigned int esize, unsigned int n, unsigned int *free_space)
885 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
886 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, free_space);
890 * Enqueue several objects on a ring
892 * @warning This API is NOT multi-producers safe
895 * A pointer to the ring structure.
897 * A pointer to a table of objects.
899 * The size of ring element, in bytes. It must be a multiple of 4.
900 * This must be the same value used while creating the ring. Otherwise
901 * the results are undefined.
903 * The number of objects to add in the ring from the obj_table.
905 * if non-NULL, returns the amount of space in the ring after the
906 * enqueue operation has finished.
908 * - n: Actual number of objects enqueued.
910 static __rte_always_inline unsigned int
911 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
912 unsigned int esize, unsigned int n, unsigned int *free_space)
914 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
915 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, free_space);
919 * Enqueue several objects on a ring.
921 * This function calls the multi-producer or the single-producer
922 * version depending on the default behavior that was specified at
923 * ring creation time (see flags).
926 * A pointer to the ring structure.
928 * A pointer to a table of objects.
930 * The size of ring element, in bytes. It must be a multiple of 4.
931 * This must be the same value used while creating the ring. Otherwise
932 * the results are undefined.
934 * The number of objects to add in the ring from the obj_table.
936 * if non-NULL, returns the amount of space in the ring after the
937 * enqueue operation has finished.
939 * - n: Actual number of objects enqueued.
941 static __rte_always_inline unsigned int
942 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
943 unsigned int esize, unsigned int n, unsigned int *free_space)
945 switch (r->prod.sync_type) {
946 case RTE_RING_SYNC_MT:
947 return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
949 case RTE_RING_SYNC_ST:
950 return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
952 #ifdef ALLOW_EXPERIMENTAL_API
953 case RTE_RING_SYNC_MT_RTS:
954 return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
956 case RTE_RING_SYNC_MT_HTS:
957 return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
962 /* valid ring should never reach this point */
964 if (free_space != NULL)
970 * Dequeue several objects from a ring (multi-consumers safe). When the request
971 * objects are more than the available objects, only dequeue the actual number
974 * This function uses a "compare and set" instruction to move the
975 * consumer index atomically.
978 * A pointer to the ring structure.
980 * A pointer to a table of objects that will be filled.
982 * The size of ring element, in bytes. It must be a multiple of 4.
983 * This must be the same value used while creating the ring. Otherwise
984 * the results are undefined.
986 * The number of objects to dequeue from the ring to the obj_table.
988 * If non-NULL, returns the number of remaining ring entries after the
989 * dequeue has finished.
991 * - n: Actual number of objects dequeued, 0 if ring is empty
993 static __rte_always_inline unsigned int
994 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
995 unsigned int esize, unsigned int n, unsigned int *available)
997 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
998 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, available);
1002 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1003 * request objects are more than the available objects, only dequeue the
1004 * actual number of objects
1007 * A pointer to the ring structure.
1009 * A pointer to a table of objects that will be filled.
1011 * The size of ring element, in bytes. It must be a multiple of 4.
1012 * This must be the same value used while creating the ring. Otherwise
1013 * the results are undefined.
1015 * The number of objects to dequeue from the ring to the obj_table.
1017 * If non-NULL, returns the number of remaining ring entries after the
1018 * dequeue has finished.
1020 * - n: Actual number of objects dequeued, 0 if ring is empty
1022 static __rte_always_inline unsigned int
1023 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1024 unsigned int esize, unsigned int n, unsigned int *available)
1026 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1027 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, available);
1031 * Dequeue multiple objects from a ring up to a maximum number.
1033 * This function calls the multi-consumers or the single-consumer
1034 * version, depending on the default behaviour that was specified at
1035 * ring creation time (see flags).
1038 * A pointer to the ring structure.
1040 * A pointer to a table of objects that will be filled.
1042 * The size of ring element, in bytes. It must be a multiple of 4.
1043 * This must be the same value used while creating the ring. Otherwise
1044 * the results are undefined.
1046 * The number of objects to dequeue from the ring to the obj_table.
1048 * If non-NULL, returns the number of remaining ring entries after the
1049 * dequeue has finished.
1051 * - Number of objects dequeued
1053 static __rte_always_inline unsigned int
1054 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1055 unsigned int esize, unsigned int n, unsigned int *available)
1057 switch (r->cons.sync_type) {
1058 case RTE_RING_SYNC_MT:
1059 return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1061 case RTE_RING_SYNC_ST:
1062 return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1064 #ifdef ALLOW_EXPERIMENTAL_API
1065 case RTE_RING_SYNC_MT_RTS:
1066 return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1068 case RTE_RING_SYNC_MT_HTS:
1069 return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1074 /* valid ring should never reach this point */
1076 if (available != NULL)
1081 #ifdef ALLOW_EXPERIMENTAL_API
1082 #include <rte_ring_peek.h>
1085 #include <rte_ring.h>
1091 #endif /* _RTE_RING_ELEM_H_ */