1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2020 Arm Limited
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6 * Derived from FreeBSD's bufring.h
7 * Used as BSD-3 Licensed with permission from Kip Macy.
10 #ifndef _RTE_RING_PEEK_ZC_H_
11 #define _RTE_RING_PEEK_ZC_H_
15 * It is not recommended to include this file directly.
16 * Please include <rte_ring_elem.h> instead.
18 * Ring Peek Zero Copy APIs
19 * These APIs make it possible to split public enqueue/dequeue API
21 * - enqueue/dequeue start
22 * - copy data to/from the ring
23 * - enqueue/dequeue finish
24 * Along with the advantages of the peek APIs, these APIs provide the ability
25 * to avoid copying of the data to temporary area (for ex: array of mbufs
28 * Note that currently these APIs are available only for two sync modes:
29 * 1) Single Producer/Single Consumer (RTE_RING_SYNC_ST)
30 * 2) Serialized Producer/Serialized Consumer (RTE_RING_SYNC_MT_HTS).
31 * It is user's responsibility to create/init ring with appropriate sync
34 * Following are some examples showing the API usage.
36 * struct elem_obj {uint64_t a; uint32_t b, c;};
37 * struct elem_obj *obj;
39 * // Create ring with sync type RTE_RING_SYNC_ST or RTE_RING_SYNC_MT_HTS
40 * // Reserve space on the ring
41 * n = rte_ring_enqueue_zc_bulk_elem_start(r, sizeof(elem_obj), 1, &zcd, NULL);
43 * // Produce the data directly on the ring memory
44 * obj = (struct elem_obj *)zcd->ptr1;
45 * obj->a = rte_get_a();
46 * obj->b = rte_get_b();
47 * obj->c = rte_get_c();
48 * rte_ring_enqueue_zc_elem_finish(ring, n);
51 * // Create ring with sync type RTE_RING_SYNC_ST or RTE_RING_SYNC_MT_HTS
52 * // Reserve space on the ring
53 * n = rte_ring_enqueue_zc_burst_start(r, 32, &zcd, NULL);
55 * // Pkt I/O core polls packets from the NIC
57 * nb_rx = rte_eth_rx_burst(portid, queueid, zcd->ptr1, zcd->n1);
58 * if (nb_rx == zcd->n1 && n != zcd->n1)
59 * nb_rx = rte_eth_rx_burst(portid, queueid,
60 * zcd->ptr2, n - zcd->n1);
62 * // Provide packets to the packet processing cores
63 * rte_ring_enqueue_zc_finish(r, nb_rx);
66 * Note that between _start_ and _finish_ none other thread can proceed
67 * with enqueue/dequeue operation till _finish_ completes.
74 #include <rte_ring_peek_elem_pvt.h>
77 * Ring zero-copy information structure.
79 * This structure contains the pointers and length of the space
80 * reserved on the ring storage.
82 struct rte_ring_zc_data {
83 /* Pointer to the first space in the ring */
85 /* Pointer to the second space in the ring if there is wrap-around.
86 * It contains valid value only if wrap-around happens.
89 /* Number of elements in the first pointer. If this is equal to
90 * the number of elements requested, then ptr2 is NULL.
91 * Otherwise, subtracting n1 from number of elements requested
92 * will give the number of elements available at ptr2.
95 } __rte_cache_aligned;
97 static __rte_always_inline void
98 __rte_ring_get_elem_addr(struct rte_ring *r, uint32_t head,
99 uint32_t esize, uint32_t num, void **dst1, uint32_t *n1, void **dst2)
101 uint32_t idx, scale, nr_idx;
102 uint32_t *ring = (uint32_t *)&r[1];
104 /* Normalize to uint32_t */
105 scale = esize / sizeof(uint32_t);
106 idx = head & r->mask;
107 nr_idx = idx * scale;
109 *dst1 = ring + nr_idx;
112 if (idx + num > r->size) {
121 * @internal This function moves prod head value.
123 static __rte_always_inline unsigned int
124 __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize,
125 uint32_t n, enum rte_ring_queue_behavior behavior,
126 struct rte_ring_zc_data *zcd, unsigned int *free_space)
128 uint32_t free, head, next;
130 switch (r->prod.sync_type) {
131 case RTE_RING_SYNC_ST:
132 n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
133 behavior, &head, &next, &free);
135 case RTE_RING_SYNC_MT_HTS:
136 n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
138 case RTE_RING_SYNC_MT:
139 case RTE_RING_SYNC_MT_RTS:
141 /* unsupported mode, shouldn't be here */
148 __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
149 &zcd->n1, &zcd->ptr2);
151 if (free_space != NULL)
152 *free_space = free - n;
157 * Start to enqueue several objects on the ring.
158 * Note that no actual objects are put in the queue by this function,
159 * it just reserves space for the user on the ring.
160 * User has to copy objects into the queue using the returned pointers.
161 * User should call rte_ring_enqueue_zc_elem_finish to complete the
165 * A pointer to the ring structure.
167 * The size of ring element, in bytes. It must be a multiple of 4.
169 * The number of objects to add in the ring.
171 * Structure containing the pointers and length of the space
172 * reserved on the ring storage.
174 * If non-NULL, returns the amount of space in the ring after the
175 * reservation operation has finished.
177 * The number of objects that can be enqueued, either 0 or n
179 static __rte_always_inline unsigned int
180 rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
181 unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
183 return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
184 RTE_RING_QUEUE_FIXED, zcd, free_space);
188 * Start to enqueue several pointers to objects on the ring.
189 * Note that no actual pointers are put in the queue by this function,
190 * it just reserves space for the user on the ring.
191 * User has to copy pointers to objects into the queue using the
193 * User should call rte_ring_enqueue_zc_finish to complete the
197 * A pointer to the ring structure.
199 * The number of objects to add in the ring.
201 * Structure containing the pointers and length of the space
202 * reserved on the ring storage.
204 * If non-NULL, returns the amount of space in the ring after the
205 * reservation operation has finished.
207 * The number of objects that can be enqueued, either 0 or n
209 static __rte_always_inline unsigned int
210 rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n,
211 struct rte_ring_zc_data *zcd, unsigned int *free_space)
213 return rte_ring_enqueue_zc_bulk_elem_start(r, sizeof(uintptr_t), n,
218 * Start to enqueue several objects on the ring.
219 * Note that no actual objects are put in the queue by this function,
220 * it just reserves space for the user on the ring.
221 * User has to copy objects into the queue using the returned pointers.
222 * User should call rte_ring_enqueue_zc_elem_finish to complete the
226 * A pointer to the ring structure.
228 * The size of ring element, in bytes. It must be a multiple of 4.
230 * The number of objects to add in the ring.
232 * Structure containing the pointers and length of the space
233 * reserved on the ring storage.
235 * If non-NULL, returns the amount of space in the ring after the
236 * reservation operation has finished.
238 * The number of objects that can be enqueued, either 0 or n
240 static __rte_always_inline unsigned int
241 rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
242 unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
244 return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
245 RTE_RING_QUEUE_VARIABLE, zcd, free_space);
249 * Start to enqueue several pointers to objects on the ring.
250 * Note that no actual pointers are put in the queue by this function,
251 * it just reserves space for the user on the ring.
252 * User has to copy pointers to objects into the queue using the
254 * User should call rte_ring_enqueue_zc_finish to complete the
258 * A pointer to the ring structure.
260 * The number of objects to add in the ring.
262 * Structure containing the pointers and length of the space
263 * reserved on the ring storage.
265 * If non-NULL, returns the amount of space in the ring after the
266 * reservation operation has finished.
268 * The number of objects that can be enqueued, either 0 or n.
270 static __rte_always_inline unsigned int
271 rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n,
272 struct rte_ring_zc_data *zcd, unsigned int *free_space)
274 return rte_ring_enqueue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
279 * Complete enqueuing several objects on the ring.
280 * Note that number of objects to enqueue should not exceed previous
281 * enqueue_start return value.
284 * A pointer to the ring structure.
286 * The number of objects to add to the ring.
288 static __rte_always_inline void
289 rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
293 switch (r->prod.sync_type) {
294 case RTE_RING_SYNC_ST:
295 n = __rte_ring_st_get_tail(&r->prod, &tail, n);
296 __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
298 case RTE_RING_SYNC_MT_HTS:
299 n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
300 __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
302 case RTE_RING_SYNC_MT:
303 case RTE_RING_SYNC_MT_RTS:
305 /* unsupported mode, shouldn't be here */
311 * Complete enqueuing several pointers to objects on the ring.
312 * Note that number of objects to enqueue should not exceed previous
313 * enqueue_start return value.
316 * A pointer to the ring structure.
318 * The number of pointers to objects to add to the ring.
320 static __rte_always_inline void
321 rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
323 rte_ring_enqueue_zc_elem_finish(r, n);
327 * @internal This function moves cons head value and copies up to *n*
328 * objects from the ring to the user provided obj_table.
330 static __rte_always_inline unsigned int
331 __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r,
332 uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
333 struct rte_ring_zc_data *zcd, unsigned int *available)
335 uint32_t avail, head, next;
337 switch (r->cons.sync_type) {
338 case RTE_RING_SYNC_ST:
339 n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
340 behavior, &head, &next, &avail);
342 case RTE_RING_SYNC_MT_HTS:
343 n = __rte_ring_hts_move_cons_head(r, n, behavior,
346 case RTE_RING_SYNC_MT:
347 case RTE_RING_SYNC_MT_RTS:
349 /* unsupported mode, shouldn't be here */
356 __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
357 &zcd->n1, &zcd->ptr2);
359 if (available != NULL)
360 *available = avail - n;
365 * Start to dequeue several objects from the ring.
366 * Note that no actual objects are copied from the queue by this function.
367 * User has to copy objects from the queue using the returned pointers.
368 * User should call rte_ring_dequeue_zc_elem_finish to complete the
372 * A pointer to the ring structure.
374 * The size of ring element, in bytes. It must be a multiple of 4.
376 * The number of objects to remove from the ring.
378 * Structure containing the pointers and length of the space
379 * reserved on the ring storage.
381 * If non-NULL, returns the number of remaining ring entries after the
382 * dequeue has finished.
384 * The number of objects that can be dequeued, either 0 or n.
386 static __rte_always_inline unsigned int
387 rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
388 unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
390 return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
391 RTE_RING_QUEUE_FIXED, zcd, available);
395 * Start to dequeue several pointers to objects from the ring.
396 * Note that no actual pointers are removed from the queue by this function.
397 * User has to copy pointers to objects from the queue using the
399 * User should call rte_ring_dequeue_zc_finish to complete the
403 * A pointer to the ring structure.
405 * The number of objects to remove from the ring.
407 * Structure containing the pointers and length of the space
408 * reserved on the ring storage.
410 * If non-NULL, returns the number of remaining ring entries after the
411 * dequeue has finished.
413 * The number of objects that can be dequeued, either 0 or n.
415 static __rte_always_inline unsigned int
416 rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n,
417 struct rte_ring_zc_data *zcd, unsigned int *available)
419 return rte_ring_dequeue_zc_bulk_elem_start(r, sizeof(uintptr_t),
424 * Start to dequeue several objects from the ring.
425 * Note that no actual objects are copied from the queue by this function.
426 * User has to copy objects from the queue using the returned pointers.
427 * User should call rte_ring_dequeue_zc_elem_finish to complete the
431 * A pointer to the ring structure.
433 * The size of ring element, in bytes. It must be a multiple of 4.
434 * This must be the same value used while creating the ring. Otherwise
435 * the results are undefined.
437 * The number of objects to dequeue from the ring.
439 * Structure containing the pointers and length of the space
440 * reserved on the ring storage.
442 * If non-NULL, returns the number of remaining ring entries after the
443 * dequeue has finished.
445 * The number of objects that can be dequeued, either 0 or n.
447 static __rte_always_inline unsigned int
448 rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
449 unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
451 return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
452 RTE_RING_QUEUE_VARIABLE, zcd, available);
456 * Start to dequeue several pointers to objects from the ring.
457 * Note that no actual pointers are removed from the queue by this function.
458 * User has to copy pointers to objects from the queue using the
460 * User should call rte_ring_dequeue_zc_finish to complete the
464 * A pointer to the ring structure.
466 * The number of objects to remove from the ring.
468 * Structure containing the pointers and length of the space
469 * reserved on the ring storage.
471 * If non-NULL, returns the number of remaining ring entries after the
472 * dequeue has finished.
474 * The number of objects that can be dequeued, either 0 or n.
476 static __rte_always_inline unsigned int
477 rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n,
478 struct rte_ring_zc_data *zcd, unsigned int *available)
480 return rte_ring_dequeue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
485 * Complete dequeuing several objects from the ring.
486 * Note that number of objects to dequeued should not exceed previous
487 * dequeue_start return value.
490 * A pointer to the ring structure.
492 * The number of objects to remove from the ring.
494 static __rte_always_inline void
495 rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
499 switch (r->cons.sync_type) {
500 case RTE_RING_SYNC_ST:
501 n = __rte_ring_st_get_tail(&r->cons, &tail, n);
502 __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
504 case RTE_RING_SYNC_MT_HTS:
505 n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
506 __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
508 case RTE_RING_SYNC_MT:
509 case RTE_RING_SYNC_MT_RTS:
511 /* unsupported mode, shouldn't be here */
517 * Complete dequeuing several objects from the ring.
518 * Note that number of objects to dequeued should not exceed previous
519 * dequeue_start return value.
522 * A pointer to the ring structure.
524 * The number of objects to remove from the ring.
526 static __rte_always_inline void
527 rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
529 rte_ring_dequeue_elem_finish(r, n);
536 #endif /* _RTE_RING_PEEK_ZC_H_ */