1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2010-2020 Intel Corporation
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6 * Derived from FreeBSD's bufring.h
7 * Used as BSD-3 Licensed with permission from Kip Macy.
10 #ifndef _RTE_RING_RTS_H_
11 #define _RTE_RING_RTS_H_
14 * @file rte_ring_rts.h
15 * @b EXPERIMENTAL: this API may change without prior notice
16 * It is not recommended to include this file directly.
17 * Please include <rte_ring.h> instead.
19 * Contains functions for Relaxed Tail Sync (RTS) ring mode.
20 * The main idea remains the same as for our original MP/MC synchronization
22 * The main difference is that tail value is increased not
23 * by every thread that finished enqueue/dequeue,
24 * but only by the current last one doing enqueue/dequeue.
25 * That allows threads to skip spinning on tail value,
26 * leaving actual tail value change to last thread at a given instance.
27 * RTS requires 2 64-bit CAS for each enqueue(/dequeue) operation:
28 * one for head update, second for tail update.
29 * As a gain it allows thread to avoid spinning/waiting on tail value.
30 * In comparison original MP/MC algorithm requires one 32-bit CAS
31 * for head update and waiting/spinning on tail value.
34 * - introduce update counter (cnt) for both head and tail.
35 * - increment head.cnt for each head.value update
36 * - write head.value and head.cnt atomically (64-bit CAS)
37 * - move tail.value ahead only when tail.cnt + 1 == head.cnt
38 * (indicating that this is the last thread updating the tail)
39 * - increment tail.cnt when each enqueue/dequeue op finishes
40 * (no matter if tail.value going to change or not)
41 * - write tail.value and tail.cnt atomically (64-bit CAS)
43 * To avoid producer/consumer starvation:
44 * - limit max allowed distance between head and tail value (HTD_MAX).
45 * I.E. thread is allowed to proceed with changing head.value,
46 * only when: head.value - tail.value <= HTD_MAX
47 * HTD_MAX is an optional parameter.
48 * With HTD_MAX == 0 we'll have fully serialized ring -
49 * i.e. only one thread at a time will be able to enqueue/dequeue
51 * With HTD_MAX >= ring.capacity - no limitation.
52 * By default HTD_MAX == ring.capacity / 8.
59 #include <rte_ring_rts_elem_pvt.h>
62 * Enqueue several objects on the RTS ring (multi-producers safe).
65 * A pointer to the ring structure.
67 * A pointer to a table of objects.
69 * The size of ring element, in bytes. It must be a multiple of 4.
70 * This must be the same value used while creating the ring. Otherwise
71 * the results are undefined.
73 * The number of objects to add in the ring from the obj_table.
75 * if non-NULL, returns the amount of space in the ring after the
76 * enqueue operation has finished.
78 * The number of objects enqueued, either 0 or n
81 static __rte_always_inline unsigned int
82 rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
83 unsigned int esize, unsigned int n, unsigned int *free_space)
85 return __rte_ring_do_rts_enqueue_elem(r, obj_table, esize, n,
86 RTE_RING_QUEUE_FIXED, free_space);
90 * Dequeue several objects from an RTS ring (multi-consumers safe).
93 * A pointer to the ring structure.
95 * A pointer to a table of objects that will be filled.
97 * The size of ring element, in bytes. It must be a multiple of 4.
98 * This must be the same value used while creating the ring. Otherwise
99 * the results are undefined.
101 * The number of objects to dequeue from the ring to the obj_table.
103 * If non-NULL, returns the number of remaining ring entries after the
104 * dequeue has finished.
106 * The number of objects dequeued, either 0 or n
109 static __rte_always_inline unsigned int
110 rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
111 unsigned int esize, unsigned int n, unsigned int *available)
113 return __rte_ring_do_rts_dequeue_elem(r, obj_table, esize, n,
114 RTE_RING_QUEUE_FIXED, available);
118 * Enqueue several objects on the RTS ring (multi-producers safe).
121 * A pointer to the ring structure.
123 * A pointer to a table of objects.
125 * The size of ring element, in bytes. It must be a multiple of 4.
126 * This must be the same value used while creating the ring. Otherwise
127 * the results are undefined.
129 * The number of objects to add in the ring from the obj_table.
131 * if non-NULL, returns the amount of space in the ring after the
132 * enqueue operation has finished.
134 * - n: Actual number of objects enqueued.
137 static __rte_always_inline unsigned int
138 rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
139 unsigned int esize, unsigned int n, unsigned int *free_space)
141 return __rte_ring_do_rts_enqueue_elem(r, obj_table, esize, n,
142 RTE_RING_QUEUE_VARIABLE, free_space);
146 * Dequeue several objects from an RTS ring (multi-consumers safe).
147 * When the requested objects are more than the available objects,
148 * only dequeue the actual number of objects.
151 * A pointer to the ring structure.
153 * A pointer to a table of objects that will be filled.
155 * The size of ring element, in bytes. It must be a multiple of 4.
156 * This must be the same value used while creating the ring. Otherwise
157 * the results are undefined.
159 * The number of objects to dequeue from the ring to the obj_table.
161 * If non-NULL, returns the number of remaining ring entries after the
162 * dequeue has finished.
164 * - n: Actual number of objects dequeued, 0 if ring is empty
167 static __rte_always_inline unsigned int
168 rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
169 unsigned int esize, unsigned int n, unsigned int *available)
171 return __rte_ring_do_rts_dequeue_elem(r, obj_table, esize, n,
172 RTE_RING_QUEUE_VARIABLE, available);
176 * Enqueue several objects on the RTS ring (multi-producers safe).
179 * A pointer to the ring structure.
181 * A pointer to a table of void * pointers (objects).
183 * The number of objects to add in the ring from the obj_table.
185 * if non-NULL, returns the amount of space in the ring after the
186 * enqueue operation has finished.
188 * The number of objects enqueued, either 0 or n
191 static __rte_always_inline unsigned int
192 rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
193 unsigned int n, unsigned int *free_space)
195 return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table,
196 sizeof(uintptr_t), n, free_space);
200 * Dequeue several objects from an RTS ring (multi-consumers safe).
203 * A pointer to the ring structure.
205 * A pointer to a table of void * pointers (objects) that will be filled.
207 * The number of objects to dequeue from the ring to the obj_table.
209 * If non-NULL, returns the number of remaining ring entries after the
210 * dequeue has finished.
212 * The number of objects dequeued, either 0 or n
215 static __rte_always_inline unsigned int
216 rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table,
217 unsigned int n, unsigned int *available)
219 return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table,
220 sizeof(uintptr_t), n, available);
224 * Enqueue several objects on the RTS ring (multi-producers safe).
227 * A pointer to the ring structure.
229 * A pointer to a table of void * pointers (objects).
231 * The number of objects to add in the ring from the obj_table.
233 * if non-NULL, returns the amount of space in the ring after the
234 * enqueue operation has finished.
236 * - n: Actual number of objects enqueued.
239 static __rte_always_inline unsigned int
240 rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table,
241 unsigned int n, unsigned int *free_space)
243 return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table,
244 sizeof(uintptr_t), n, free_space);
248 * Dequeue several objects from an RTS ring (multi-consumers safe).
249 * When the requested objects are more than the available objects,
250 * only dequeue the actual number of objects.
253 * A pointer to the ring structure.
255 * A pointer to a table of void * pointers (objects) that will be filled.
257 * The number of objects to dequeue from the ring to the obj_table.
259 * If non-NULL, returns the number of remaining ring entries after the
260 * dequeue has finished.
262 * - n: Actual number of objects dequeued, 0 if ring is empty
265 static __rte_always_inline unsigned int
266 rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table,
267 unsigned int n, unsigned int *available)
269 return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table,
270 sizeof(uintptr_t), n, available);
274 * Return producer max Head-Tail-Distance (HTD).
277 * A pointer to the ring structure.
279 * Producer HTD value, if producer is set in appropriate sync mode,
280 * or UINT32_MAX otherwise.
283 static inline uint32_t
284 rte_ring_get_prod_htd_max(const struct rte_ring *r)
286 if (r->prod.sync_type == RTE_RING_SYNC_MT_RTS)
287 return r->rts_prod.htd_max;
292 * Set producer max Head-Tail-Distance (HTD).
293 * Note that producer has to use appropriate sync mode (RTS).
296 * A pointer to the ring structure.
298 * new HTD value to setup.
300 * Zero on success, or negative error code otherwise.
304 rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
306 if (r->prod.sync_type != RTE_RING_SYNC_MT_RTS)
309 r->rts_prod.htd_max = v;
314 * Return consumer max Head-Tail-Distance (HTD).
317 * A pointer to the ring structure.
319 * Consumer HTD value, if consumer is set in appropriate sync mode,
320 * or UINT32_MAX otherwise.
323 static inline uint32_t
324 rte_ring_get_cons_htd_max(const struct rte_ring *r)
326 if (r->cons.sync_type == RTE_RING_SYNC_MT_RTS)
327 return r->rts_cons.htd_max;
332 * Set consumer max Head-Tail-Distance (HTD).
333 * Note that consumer has to use appropriate sync mode (RTS).
336 * A pointer to the ring structure.
338 * new HTD value to setup.
340 * Zero on success, or negative error code otherwise.
344 rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
346 if (r->cons.sync_type != RTE_RING_SYNC_MT_RTS)
349 r->rts_cons.htd_max = v;
357 #endif /* _RTE_RING_RTS_H_ */