1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018-2020 Arm Limited
5 #ifndef _RTE_RCU_QSBR_H_
6 #define _RTE_RCU_QSBR_H_
10 * RTE Quiescent State Based Reclamation (QSBR)
12 * Quiescent State (QS) is any point in the thread execution
13 * where the thread does not hold a reference to a data structure
14 * in shared memory. While using lock-less data structures, the writer
15 * can safely free memory once all the reader threads have entered
18 * This library provides the ability for the readers to report quiescent
19 * state and for the writers to identify when all the readers have
20 * entered quiescent state.
32 #include <rte_common.h>
33 #include <rte_memory.h>
34 #include <rte_lcore.h>
35 #include <rte_debug.h>
36 #include <rte_atomic.h>
39 extern int rte_rcu_log_type;
41 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
42 #define __RTE_RCU_DP_LOG(level, fmt, args...) \
43 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
44 "%s(): " fmt "\n", __func__, ## args)
46 #define __RTE_RCU_DP_LOG(level, fmt, args...)
49 #if defined(RTE_LIBRTE_RCU_DEBUG)
50 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\
51 if (v->qsbr_cnt[thread_id].lock_cnt) \
52 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
53 "%s(): " fmt "\n", __func__, ## args); \
56 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
59 /* Registered thread IDs are stored as a bitmap of 64b element array.
60 * Given thread id needs to be converted to index into the array and
61 * the id within the array element.
63 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
64 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
65 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
66 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
67 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
68 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
69 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
70 #define __RTE_QSBR_THRID_MASK 0x3f
71 #define RTE_QSBR_THRID_INVALID 0xffffffff
73 /* Worker thread counter */
74 struct rte_rcu_qsbr_cnt {
76 /**< Quiescent state counter. Value 0 indicates the thread is offline
77 * 64b counter is used to avoid adding more code to address
78 * counter overflow. Changing this to 32b would require additional
79 * changes to various APIs.
82 /**< Lock counter. Used when CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled */
83 } __rte_cache_aligned;
85 #define __RTE_QSBR_CNT_THR_OFFLINE 0
86 #define __RTE_QSBR_CNT_INIT 1
87 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0)
88 #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t)
90 /* RTE Quiescent State variable structure.
91 * This structure has two elements that vary in size based on the
92 * 'max_threads' parameter.
93 * 1) Quiescent state counter array
94 * 2) Register thread ID array
97 uint64_t token __rte_cache_aligned;
98 /**< Counter to allow for multiple concurrent quiescent state queries */
100 /**< Least token acked by all the threads in the last call to
101 * rte_rcu_qsbr_check API.
104 uint32_t num_elems __rte_cache_aligned;
105 /**< Number of elements in the thread ID array */
106 uint32_t num_threads;
107 /**< Number of threads currently using this QS variable */
108 uint32_t max_threads;
109 /**< Maximum number of threads using this QS variable */
111 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
112 /**< Quiescent state counter array of 'max_threads' elements */
114 /**< Registered thread IDs are stored in a bitmap array,
115 * after the quiescent state counter array.
117 } __rte_cache_aligned;
120 * Call back function called to free the resources.
123 * Pointer provided while creating the defer queue
125 * Pointer to the resource data stored on the defer queue
127 * Number of resources to free. Currently, this is set to 1.
132 typedef void (*rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n);
134 #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE
137 * Various flags supported.
139 /**< Enqueue and reclaim operations are multi-thread safe by default.
140 * The call back functions registered to free the resources are
141 * assumed to be multi-thread safe.
142 * Set this flag if multi-thread safety is not required.
144 #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1
147 * Parameters used when creating the defer queue.
149 struct rte_rcu_qsbr_dq_parameters {
151 /**< Name of the queue. */
153 /**< Flags to control API behaviors */
155 /**< Number of entries in queue. Typically, this will be
156 * the same as the maximum number of entries supported in the
157 * lock free data structure.
158 * Data structures with unbounded number of entries is not
159 * supported currently.
162 /**< Size (in bytes) of each element in the defer queue.
163 * This has to be multiple of 4B.
165 uint32_t trigger_reclaim_limit;
166 /**< Trigger automatic reclamation after the defer queue
167 * has at least these many resources waiting. This auto
168 * reclamation is triggered in rte_rcu_qsbr_dq_enqueue API
170 * If this is greater than 'size', auto reclamation is
172 * If this is set to 0, auto reclamation is triggered
173 * in every call to rte_rcu_qsbr_dq_enqueue API.
175 uint32_t max_reclaim_size;
176 /**< When automatic reclamation is enabled, reclaim at the max
177 * these many resources. This should contain a valid value, if
178 * auto reclamation is on. Setting this to 'size' or greater will
179 * reclaim all possible resources currently on the defer queue.
181 rte_rcu_qsbr_free_resource_t free_fn;
182 /**< Function to call to free the resource. */
184 /**< Pointer passed to the free function. Typically, this is the
185 * pointer to the data structure to which the resource to free
186 * belongs. This can be NULL.
188 struct rte_rcu_qsbr *v;
189 /**< RCU QSBR variable to use for this defer queue */
192 /* RTE defer queue structure.
193 * This structure holds the defer queue. The defer queue is used to
194 * hold the deleted entries from the data structure that are not
197 struct rte_rcu_qsbr_dq;
201 * @b EXPERIMENTAL: this API may change without prior notice
203 * Return the size of the memory occupied by a Quiescent State variable.
206 * Maximum number of threads reporting quiescent state on this variable.
208 * On success - size of memory in bytes required for this QS variable.
209 * On error - 1 with error code set in rte_errno.
210 * Possible rte_errno codes are:
211 * - EINVAL - max_threads is 0
215 rte_rcu_qsbr_get_memsize(uint32_t max_threads);
219 * @b EXPERIMENTAL: this API may change without prior notice
221 * Initialize a Quiescent State (QS) variable.
226 * Maximum number of threads reporting quiescent state on this variable.
227 * This should be the same value as passed to rte_rcu_qsbr_get_memsize.
230 * On error - 1 with error code set in rte_errno.
231 * Possible rte_errno codes are:
232 * - EINVAL - max_threads is 0 or 'v' is NULL.
237 rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads);
241 * @b EXPERIMENTAL: this API may change without prior notice
243 * Register a reader thread to report its quiescent state
246 * This is implemented as a lock-free function. It is multi-thread
248 * Any reader thread that wants to report its quiescent state must
249 * call this API. This can be called during initialization or as part
250 * of the packet processing loop.
252 * Note that rte_rcu_qsbr_thread_online must be called before the
253 * thread updates its quiescent state using rte_rcu_qsbr_quiescent.
258 * Reader thread with this thread ID will report its quiescent state on
259 * the QS variable. thread_id is a value between 0 and (max_threads - 1).
260 * 'max_threads' is the parameter passed in 'rte_rcu_qsbr_init' API.
264 rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id);
268 * @b EXPERIMENTAL: this API may change without prior notice
270 * Remove a reader thread, from the list of threads reporting their
271 * quiescent state on a QS variable.
273 * This is implemented as a lock-free function. It is multi-thread safe.
274 * This API can be called from the reader threads during shutdown.
275 * Ongoing quiescent state queries will stop waiting for the status from this
276 * unregistered reader thread.
281 * Reader thread with this thread ID will stop reporting its quiescent
282 * state on the QS variable.
286 rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id);
290 * @b EXPERIMENTAL: this API may change without prior notice
292 * Add a registered reader thread, to the list of threads reporting their
293 * quiescent state on a QS variable.
295 * This is implemented as a lock-free function. It is multi-thread
298 * Any registered reader thread that wants to report its quiescent state must
299 * call this API before calling rte_rcu_qsbr_quiescent. This can be called
300 * during initialization or as part of the packet processing loop.
302 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
303 * calling any functions that block, to ensure that rte_rcu_qsbr_check
304 * API does not wait indefinitely for the reader thread to update its QS.
306 * The reader thread must call rte_rcu_thread_online API, after the blocking
307 * function call returns, to ensure that rte_rcu_qsbr_check API
308 * waits for the reader thread to update its quiescent state.
313 * Reader thread with this thread ID will report its quiescent state on
317 static __rte_always_inline void
318 rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
322 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
324 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
325 v->qsbr_cnt[thread_id].lock_cnt);
327 /* Copy the current value of token.
328 * The fence at the end of the function will ensure that
329 * the following will not move down after the load of any shared
332 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
334 /* __atomic_store_n(cnt, __ATOMIC_RELAXED) is used to ensure
335 * 'cnt' (64b) is accessed atomically.
337 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
338 t, __ATOMIC_RELAXED);
340 /* The subsequent load of the data structure should not
341 * move above the store. Hence a store-load barrier
343 * If the load of the data structure moves above the store,
344 * writer might not see that the reader is online, even though
345 * the reader is referencing the shared data structure.
347 #ifdef RTE_ARCH_X86_64
348 /* rte_smp_mb() for x86 is lighter */
351 __atomic_thread_fence(__ATOMIC_SEQ_CST);
357 * @b EXPERIMENTAL: this API may change without prior notice
359 * Remove a registered reader thread from the list of threads reporting their
360 * quiescent state on a QS variable.
362 * This is implemented as a lock-free function. It is multi-thread
365 * This can be called during initialization or as part of the packet
368 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
369 * calling any functions that block, to ensure that rte_rcu_qsbr_check
370 * API does not wait indefinitely for the reader thread to update its QS.
375 * rte_rcu_qsbr_check API will not wait for the reader thread with
376 * this thread ID to report its quiescent state on the QS variable.
379 static __rte_always_inline void
380 rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
382 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
384 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
385 v->qsbr_cnt[thread_id].lock_cnt);
387 /* The reader can go offline only after the load of the
388 * data structure is completed. i.e. any load of the
389 * data strcture can not move after this store.
392 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
393 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
398 * @b EXPERIMENTAL: this API may change without prior notice
400 * Acquire a lock for accessing a shared data structure.
402 * This is implemented as a lock-free function. It is multi-thread
405 * This API is provided to aid debugging. This should be called before
406 * accessing a shared data structure.
408 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled a lock counter is incremented.
409 * Similarly rte_rcu_qsbr_unlock will decrement the counter. When the
410 * rte_rcu_qsbr_check API will verify that this counter is 0.
412 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
420 static __rte_always_inline void
421 rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v,
422 __rte_unused unsigned int thread_id)
424 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
426 #if defined(RTE_LIBRTE_RCU_DEBUG)
427 /* Increment the lock counter */
428 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
429 1, __ATOMIC_ACQUIRE);
435 * @b EXPERIMENTAL: this API may change without prior notice
437 * Release a lock after accessing a shared data structure.
439 * This is implemented as a lock-free function. It is multi-thread
442 * This API is provided to aid debugging. This should be called after
443 * accessing a shared data structure.
445 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled, rte_rcu_qsbr_unlock will
446 * decrement a lock counter. rte_rcu_qsbr_check API will verify that this
449 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
457 static __rte_always_inline void
458 rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v,
459 __rte_unused unsigned int thread_id)
461 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
463 #if defined(RTE_LIBRTE_RCU_DEBUG)
464 /* Decrement the lock counter */
465 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
466 1, __ATOMIC_RELEASE);
468 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
469 "Lock counter %u. Nested locks?\n",
470 v->qsbr_cnt[thread_id].lock_cnt);
476 * @b EXPERIMENTAL: this API may change without prior notice
478 * Ask the reader threads to report the quiescent state
481 * This is implemented as a lock-free function. It is multi-thread
482 * safe and can be called from worker threads.
487 * - This is the token for this call of the API. This should be
488 * passed to rte_rcu_qsbr_check API.
491 static __rte_always_inline uint64_t
492 rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
496 RTE_ASSERT(v != NULL);
498 /* Release the changes to the shared data structure.
499 * This store release will ensure that changes to any data
500 * structure are visible to the workers before the token
503 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
510 * @b EXPERIMENTAL: this API may change without prior notice
512 * Update quiescent state for a reader thread.
514 * This is implemented as a lock-free function. It is multi-thread safe.
515 * All the reader threads registered to report their quiescent state
516 * on the QS variable must call this API.
521 * Update the quiescent state for the reader with this thread ID.
524 static __rte_always_inline void
525 rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
529 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
531 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
532 v->qsbr_cnt[thread_id].lock_cnt);
534 /* Acquire the changes to the shared data structure released
535 * by rte_rcu_qsbr_start.
536 * Later loads of the shared data structure should not move
537 * above this load. Hence, use load-acquire.
539 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
541 /* Check if there are updates available from the writer.
542 * Inform the writer that updates are visible to this reader.
543 * Prior loads of the shared data structure should not move
544 * beyond this store. Hence use store-release.
546 if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
547 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
548 t, __ATOMIC_RELEASE);
550 __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %"PRIu64", Thread ID = %d",
551 __func__, t, thread_id);
554 /* Check the quiescent state counter for registered threads only, assuming
555 * that not all threads have registered.
557 static __rte_always_inline int
558 __rte_rcu_qsbr_check_selective(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
563 uint64_t *reg_thread_id;
564 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
566 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
568 i++, reg_thread_id++) {
569 /* Load the current registered thread bit map before
570 * loading the reader thread quiescent state counters.
572 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
573 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
576 j = __builtin_ctzl(bmap);
577 __RTE_RCU_DP_LOG(DEBUG,
578 "%s: check: token = %"PRIu64", wait = %d, Bit Map = 0x%"PRIx64", Thread ID = %d",
579 __func__, t, wait, bmap, id + j);
581 &v->qsbr_cnt[id + j].cnt,
583 __RTE_RCU_DP_LOG(DEBUG,
584 "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d",
585 __func__, t, wait, c, id+j);
587 /* Counter is not checked for wrap-around condition
588 * as it is a 64b counter.
591 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
592 /* This thread is not in quiescent state */
597 /* This thread might have unregistered.
598 * Re-read the bitmap.
600 bmap = __atomic_load_n(reg_thread_id,
606 /* This thread is in quiescent state. Use the counter
607 * to find the least acknowledged token among all the
610 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
617 /* All readers are checked, update least acknowledged token.
618 * There might be multiple writers trying to update this. There is
619 * no need to update this very accurately using compare-and-swap.
621 if (acked_token != __RTE_QSBR_CNT_MAX)
622 __atomic_store_n(&v->acked_token, acked_token,
628 /* Check the quiescent state counter for all threads, assuming that
629 * all the threads have registered.
631 static __rte_always_inline int
632 __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
635 struct rte_rcu_qsbr_cnt *cnt;
637 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
639 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
640 __RTE_RCU_DP_LOG(DEBUG,
641 "%s: check: token = %"PRIu64", wait = %d, Thread ID = %d",
642 __func__, t, wait, i);
644 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
645 __RTE_RCU_DP_LOG(DEBUG,
646 "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d",
647 __func__, t, wait, c, i);
649 /* Counter is not checked for wrap-around condition
650 * as it is a 64b counter.
652 if (likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
655 /* This thread is not in quiescent state */
662 /* This thread is in quiescent state. Use the counter to find
663 * the least acknowledged token among all the readers.
665 if (likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
669 /* All readers are checked, update least acknowledged token.
670 * There might be multiple writers trying to update this. There is
671 * no need to update this very accurately using compare-and-swap.
673 if (acked_token != __RTE_QSBR_CNT_MAX)
674 __atomic_store_n(&v->acked_token, acked_token,
682 * @b EXPERIMENTAL: this API may change without prior notice
684 * Checks if all the reader threads have entered the quiescent state
685 * referenced by token.
687 * This is implemented as a lock-free function. It is multi-thread
688 * safe and can be called from the worker threads as well.
690 * If this API is called with 'wait' set to true, the following
691 * factors must be considered:
693 * 1) If the calling thread is also reporting the status on the
694 * same QS variable, it must update the quiescent state status, before
697 * 2) In addition, while calling from multiple threads, only
698 * one of those threads can be reporting the quiescent state status
699 * on a given QS variable.
704 * Token returned by rte_rcu_qsbr_start API
706 * If true, block till all the reader threads have completed entering
707 * the quiescent state referenced by token 't'.
709 * - 0 if all reader threads have NOT passed through specified number
710 * of quiescent states.
711 * - 1 if all reader threads have passed through specified number
712 * of quiescent states.
715 static __rte_always_inline int
716 rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
718 RTE_ASSERT(v != NULL);
720 /* Check if all the readers have already acknowledged this token */
721 if (likely(t <= v->acked_token)) {
722 __RTE_RCU_DP_LOG(DEBUG,
723 "%s: check: token = %"PRIu64", wait = %d",
725 __RTE_RCU_DP_LOG(DEBUG,
726 "%s: status: least acked token = %"PRIu64"",
727 __func__, v->acked_token);
731 if (likely(v->num_threads == v->max_threads))
732 return __rte_rcu_qsbr_check_all(v, t, wait);
734 return __rte_rcu_qsbr_check_selective(v, t, wait);
739 * @b EXPERIMENTAL: this API may change without prior notice
741 * Wait till the reader threads have entered quiescent state.
743 * This is implemented as a lock-free function. It is multi-thread safe.
744 * This API can be thought of as a wrapper around rte_rcu_qsbr_start and
745 * rte_rcu_qsbr_check APIs.
747 * If this API is called from multiple threads, only one of
748 * those threads can be reporting the quiescent state status on a
754 * Thread ID of the caller if it is registered to report quiescent state
755 * on this QS variable (i.e. the calling thread is also part of the
756 * readside critical section). If not, pass RTE_QSBR_THRID_INVALID.
760 rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id);
764 * @b EXPERIMENTAL: this API may change without prior notice
766 * Dump the details of a single QS variables to a file.
768 * It is NOT multi-thread safe.
771 * A pointer to a file for output
776 * On error - 1 with error code set in rte_errno.
777 * Possible rte_errno codes are:
778 * - EINVAL - NULL parameters are passed
782 rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v);
786 * @b EXPERIMENTAL: this API may change without prior notice
788 * Create a queue used to store the data structure elements that can
789 * be freed later. This queue is referred to as 'defer queue'.
792 * Parameters to create a defer queue.
794 * On success - Valid pointer to defer queue
796 * Possible rte_errno codes are:
797 * - EINVAL - NULL parameters are passed
798 * - ENOMEM - Not enough memory
801 struct rte_rcu_qsbr_dq *
802 rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params);
806 * @b EXPERIMENTAL: this API may change without prior notice
808 * Enqueue one resource to the defer queue and start the grace period.
809 * The resource will be freed later after at least one grace period
812 * If the defer queue is full, it will attempt to reclaim resources.
813 * It will also reclaim resources at regular intervals to avoid
814 * the defer queue from growing too big.
816 * Multi-thread safety is provided as the defer queue configuration.
817 * When multi-thread safety is requested, it is possible that the
818 * resources are not stored in their order of deletion. This results
819 * in resources being held in the defer queue longer than they should.
822 * Defer queue to allocate an entry from.
824 * Pointer to resource data to copy to the defer queue. The size of
825 * the data to copy is equal to the element size provided when the
826 * defer queue was created.
829 * On error - 1 with rte_errno set to
830 * - EINVAL - NULL parameters are passed
831 * - ENOSPC - Defer queue is full. This condition can not happen
832 * if the defer queue size is equal (or larger) than the
833 * number of elements in the data structure.
837 rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e);
841 * @b EXPERIMENTAL: this API may change without prior notice
843 * Free resources from the defer queue.
845 * This API is multi-thread safe.
848 * Defer queue to free an entry from.
850 * Maximum number of resources to free.
852 * Number of resources that were freed.
854 * Number of resources pending on the defer queue. This number might not
855 * be accurate if multi-thread safety is configured.
857 * Number of resources that can be added to the defer queue.
858 * This number might not be accurate if multi-thread safety is configured.
860 * On successful reclamation of at least 1 resource - 0
861 * On error - 1 with rte_errno set to
862 * - EINVAL - NULL parameters are passed
866 rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n,
867 unsigned int *freed, unsigned int *pending, unsigned int *available);
871 * @b EXPERIMENTAL: this API may change without prior notice
873 * Delete a defer queue.
875 * It tries to reclaim all the resources on the defer queue.
876 * If any of the resources have not completed the grace period
877 * the reclamation stops and returns immediately. The rest of
878 * the resources are not reclaimed and the defer queue is not
882 * Defer queue to delete.
886 * Possible rte_errno codes are:
887 * - EAGAIN - Some of the resources have not completed at least 1 grace
892 rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq);
898 #endif /* _RTE_RCU_QSBR_H_ */