1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018-2020 Arm Limited
5 #ifndef _RTE_RCU_QSBR_H_
6 #define _RTE_RCU_QSBR_H_
11 * RTE Quiescent State Based Reclamation (QSBR).
15 * All functions in this file may be changed or removed without prior notice.
17 * Quiescent State (QS) is any point in the thread execution
18 * where the thread does not hold a reference to a data structure
19 * in shared memory. While using lock-less data structures, the writer
20 * can safely free memory once all the reader threads have entered
23 * This library provides the ability for the readers to report quiescent
24 * state and for the writers to identify when all the readers have
25 * entered quiescent state.
37 #include <rte_common.h>
38 #include <rte_memory.h>
39 #include <rte_lcore.h>
40 #include <rte_debug.h>
41 #include <rte_atomic.h>
44 extern int rte_rcu_log_type;
46 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
47 #define __RTE_RCU_DP_LOG(level, fmt, args...) \
48 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
49 "%s(): " fmt "\n", __func__, ## args)
51 #define __RTE_RCU_DP_LOG(level, fmt, args...)
54 #if defined(RTE_LIBRTE_RCU_DEBUG)
55 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\
56 if (v->qsbr_cnt[thread_id].lock_cnt) \
57 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
58 "%s(): " fmt "\n", __func__, ## args); \
61 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
64 /* Registered thread IDs are stored as a bitmap of 64b element array.
65 * Given thread id needs to be converted to index into the array and
66 * the id within the array element.
68 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
69 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
70 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
71 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
72 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
73 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
74 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
75 #define __RTE_QSBR_THRID_MASK 0x3f
76 #define RTE_QSBR_THRID_INVALID 0xffffffff
78 /* Worker thread counter */
79 struct rte_rcu_qsbr_cnt {
81 /**< Quiescent state counter. Value 0 indicates the thread is offline
82 * 64b counter is used to avoid adding more code to address
83 * counter overflow. Changing this to 32b would require additional
84 * changes to various APIs.
87 /**< Lock counter. Used when CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled */
88 } __rte_cache_aligned;
90 #define __RTE_QSBR_CNT_THR_OFFLINE 0
91 #define __RTE_QSBR_CNT_INIT 1
92 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0)
93 #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t)
95 /* RTE Quiescent State variable structure.
96 * This structure has two elements that vary in size based on the
97 * 'max_threads' parameter.
98 * 1) Quiescent state counter array
99 * 2) Register thread ID array
101 struct rte_rcu_qsbr {
102 uint64_t token __rte_cache_aligned;
103 /**< Counter to allow for multiple concurrent quiescent state queries */
104 uint64_t acked_token;
105 /**< Least token acked by all the threads in the last call to
106 * rte_rcu_qsbr_check API.
109 uint32_t num_elems __rte_cache_aligned;
110 /**< Number of elements in the thread ID array */
111 uint32_t num_threads;
112 /**< Number of threads currently using this QS variable */
113 uint32_t max_threads;
114 /**< Maximum number of threads using this QS variable */
116 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
117 /**< Quiescent state counter array of 'max_threads' elements */
119 /**< Registered thread IDs are stored in a bitmap array,
120 * after the quiescent state counter array.
122 } __rte_cache_aligned;
125 * Call back function called to free the resources.
128 * Pointer provided while creating the defer queue
130 * Pointer to the resource data stored on the defer queue
132 * Number of resources to free. Currently, this is set to 1.
137 typedef void (*rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n);
139 #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE
142 * Various flags supported.
144 /**< Enqueue and reclaim operations are multi-thread safe by default.
145 * The call back functions registered to free the resources are
146 * assumed to be multi-thread safe.
147 * Set this flag if multi-thread safety is not required.
149 #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1
152 * Parameters used when creating the defer queue.
154 struct rte_rcu_qsbr_dq_parameters {
156 /**< Name of the queue. */
158 /**< Flags to control API behaviors */
160 /**< Number of entries in queue. Typically, this will be
161 * the same as the maximum number of entries supported in the
162 * lock free data structure.
163 * Data structures with unbounded number of entries is not
164 * supported currently.
167 /**< Size (in bytes) of each element in the defer queue.
168 * This has to be multiple of 4B.
170 uint32_t trigger_reclaim_limit;
171 /**< Trigger automatic reclamation after the defer queue
172 * has at least these many resources waiting. This auto
173 * reclamation is triggered in rte_rcu_qsbr_dq_enqueue API
175 * If this is greater than 'size', auto reclamation is
177 * If this is set to 0, auto reclamation is triggered
178 * in every call to rte_rcu_qsbr_dq_enqueue API.
180 uint32_t max_reclaim_size;
181 /**< When automatic reclamation is enabled, reclaim at the max
182 * these many resources. This should contain a valid value, if
183 * auto reclamation is on. Setting this to 'size' or greater will
184 * reclaim all possible resources currently on the defer queue.
186 rte_rcu_qsbr_free_resource_t free_fn;
187 /**< Function to call to free the resource. */
189 /**< Pointer passed to the free function. Typically, this is the
190 * pointer to the data structure to which the resource to free
191 * belongs. This can be NULL.
193 struct rte_rcu_qsbr *v;
194 /**< RCU QSBR variable to use for this defer queue */
197 /* RTE defer queue structure.
198 * This structure holds the defer queue. The defer queue is used to
199 * hold the deleted entries from the data structure that are not
202 struct rte_rcu_qsbr_dq;
206 * @b EXPERIMENTAL: this API may change without prior notice
208 * Return the size of the memory occupied by a Quiescent State variable.
211 * Maximum number of threads reporting quiescent state on this variable.
213 * On success - size of memory in bytes required for this QS variable.
214 * On error - 1 with error code set in rte_errno.
215 * Possible rte_errno codes are:
216 * - EINVAL - max_threads is 0
220 rte_rcu_qsbr_get_memsize(uint32_t max_threads);
224 * @b EXPERIMENTAL: this API may change without prior notice
226 * Initialize a Quiescent State (QS) variable.
231 * Maximum number of threads reporting quiescent state on this variable.
232 * This should be the same value as passed to rte_rcu_qsbr_get_memsize.
235 * On error - 1 with error code set in rte_errno.
236 * Possible rte_errno codes are:
237 * - EINVAL - max_threads is 0 or 'v' is NULL.
242 rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads);
246 * @b EXPERIMENTAL: this API may change without prior notice
248 * Register a reader thread to report its quiescent state
251 * This is implemented as a lock-free function. It is multi-thread
253 * Any reader thread that wants to report its quiescent state must
254 * call this API. This can be called during initialization or as part
255 * of the packet processing loop.
257 * Note that rte_rcu_qsbr_thread_online must be called before the
258 * thread updates its quiescent state using rte_rcu_qsbr_quiescent.
263 * Reader thread with this thread ID will report its quiescent state on
264 * the QS variable. thread_id is a value between 0 and (max_threads - 1).
265 * 'max_threads' is the parameter passed in 'rte_rcu_qsbr_init' API.
269 rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id);
273 * @b EXPERIMENTAL: this API may change without prior notice
275 * Remove a reader thread, from the list of threads reporting their
276 * quiescent state on a QS variable.
278 * This is implemented as a lock-free function. It is multi-thread safe.
279 * This API can be called from the reader threads during shutdown.
280 * Ongoing quiescent state queries will stop waiting for the status from this
281 * unregistered reader thread.
286 * Reader thread with this thread ID will stop reporting its quiescent
287 * state on the QS variable.
291 rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id);
295 * @b EXPERIMENTAL: this API may change without prior notice
297 * Add a registered reader thread, to the list of threads reporting their
298 * quiescent state on a QS variable.
300 * This is implemented as a lock-free function. It is multi-thread
303 * Any registered reader thread that wants to report its quiescent state must
304 * call this API before calling rte_rcu_qsbr_quiescent. This can be called
305 * during initialization or as part of the packet processing loop.
307 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
308 * calling any functions that block, to ensure that rte_rcu_qsbr_check
309 * API does not wait indefinitely for the reader thread to update its QS.
311 * The reader thread must call rte_rcu_thread_online API, after the blocking
312 * function call returns, to ensure that rte_rcu_qsbr_check API
313 * waits for the reader thread to update its quiescent state.
318 * Reader thread with this thread ID will report its quiescent state on
322 static __rte_always_inline void
323 rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
327 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
329 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
330 v->qsbr_cnt[thread_id].lock_cnt);
332 /* Copy the current value of token.
333 * The fence at the end of the function will ensure that
334 * the following will not move down after the load of any shared
337 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
339 /* __atomic_store_n(cnt, __ATOMIC_RELAXED) is used to ensure
340 * 'cnt' (64b) is accessed atomically.
342 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
343 t, __ATOMIC_RELAXED);
345 /* The subsequent load of the data structure should not
346 * move above the store. Hence a store-load barrier
348 * If the load of the data structure moves above the store,
349 * writer might not see that the reader is online, even though
350 * the reader is referencing the shared data structure.
352 #ifdef RTE_ARCH_X86_64
353 /* rte_smp_mb() for x86 is lighter */
356 __atomic_thread_fence(__ATOMIC_SEQ_CST);
362 * @b EXPERIMENTAL: this API may change without prior notice
364 * Remove a registered reader thread from the list of threads reporting their
365 * quiescent state on a QS variable.
367 * This is implemented as a lock-free function. It is multi-thread
370 * This can be called during initialization or as part of the packet
373 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
374 * calling any functions that block, to ensure that rte_rcu_qsbr_check
375 * API does not wait indefinitely for the reader thread to update its QS.
380 * rte_rcu_qsbr_check API will not wait for the reader thread with
381 * this thread ID to report its quiescent state on the QS variable.
384 static __rte_always_inline void
385 rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
387 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
389 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
390 v->qsbr_cnt[thread_id].lock_cnt);
392 /* The reader can go offline only after the load of the
393 * data structure is completed. i.e. any load of the
394 * data strcture can not move after this store.
397 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
398 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
403 * @b EXPERIMENTAL: this API may change without prior notice
405 * Acquire a lock for accessing a shared data structure.
407 * This is implemented as a lock-free function. It is multi-thread
410 * This API is provided to aid debugging. This should be called before
411 * accessing a shared data structure.
413 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled a lock counter is incremented.
414 * Similarly rte_rcu_qsbr_unlock will decrement the counter. When the
415 * rte_rcu_qsbr_check API will verify that this counter is 0.
417 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
425 static __rte_always_inline void
426 rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v,
427 __rte_unused unsigned int thread_id)
429 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
431 #if defined(RTE_LIBRTE_RCU_DEBUG)
432 /* Increment the lock counter */
433 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
434 1, __ATOMIC_ACQUIRE);
440 * @b EXPERIMENTAL: this API may change without prior notice
442 * Release a lock after accessing a shared data structure.
444 * This is implemented as a lock-free function. It is multi-thread
447 * This API is provided to aid debugging. This should be called after
448 * accessing a shared data structure.
450 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is enabled, rte_rcu_qsbr_unlock will
451 * decrement a lock counter. rte_rcu_qsbr_check API will verify that this
454 * When CONFIG_RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
462 static __rte_always_inline void
463 rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v,
464 __rte_unused unsigned int thread_id)
466 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
468 #if defined(RTE_LIBRTE_RCU_DEBUG)
469 /* Decrement the lock counter */
470 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
471 1, __ATOMIC_RELEASE);
473 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
474 "Lock counter %u. Nested locks?\n",
475 v->qsbr_cnt[thread_id].lock_cnt);
481 * @b EXPERIMENTAL: this API may change without prior notice
483 * Ask the reader threads to report the quiescent state
486 * This is implemented as a lock-free function. It is multi-thread
487 * safe and can be called from worker threads.
492 * - This is the token for this call of the API. This should be
493 * passed to rte_rcu_qsbr_check API.
496 static __rte_always_inline uint64_t
497 rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
501 RTE_ASSERT(v != NULL);
503 /* Release the changes to the shared data structure.
504 * This store release will ensure that changes to any data
505 * structure are visible to the workers before the token
508 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
515 * @b EXPERIMENTAL: this API may change without prior notice
517 * Update quiescent state for a reader thread.
519 * This is implemented as a lock-free function. It is multi-thread safe.
520 * All the reader threads registered to report their quiescent state
521 * on the QS variable must call this API.
526 * Update the quiescent state for the reader with this thread ID.
529 static __rte_always_inline void
530 rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
534 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
536 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
537 v->qsbr_cnt[thread_id].lock_cnt);
539 /* Acquire the changes to the shared data structure released
540 * by rte_rcu_qsbr_start.
541 * Later loads of the shared data structure should not move
542 * above this load. Hence, use load-acquire.
544 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
546 /* Check if there are updates available from the writer.
547 * Inform the writer that updates are visible to this reader.
548 * Prior loads of the shared data structure should not move
549 * beyond this store. Hence use store-release.
551 if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
552 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
553 t, __ATOMIC_RELEASE);
555 __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %"PRIu64", Thread ID = %d",
556 __func__, t, thread_id);
559 /* Check the quiescent state counter for registered threads only, assuming
560 * that not all threads have registered.
562 static __rte_always_inline int
563 __rte_rcu_qsbr_check_selective(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
568 uint64_t *reg_thread_id;
569 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
571 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
573 i++, reg_thread_id++) {
574 /* Load the current registered thread bit map before
575 * loading the reader thread quiescent state counters.
577 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
578 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
581 j = __builtin_ctzl(bmap);
582 __RTE_RCU_DP_LOG(DEBUG,
583 "%s: check: token = %"PRIu64", wait = %d, Bit Map = 0x%"PRIx64", Thread ID = %d",
584 __func__, t, wait, bmap, id + j);
586 &v->qsbr_cnt[id + j].cnt,
588 __RTE_RCU_DP_LOG(DEBUG,
589 "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d",
590 __func__, t, wait, c, id+j);
592 /* Counter is not checked for wrap-around condition
593 * as it is a 64b counter.
596 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
597 /* This thread is not in quiescent state */
602 /* This thread might have unregistered.
603 * Re-read the bitmap.
605 bmap = __atomic_load_n(reg_thread_id,
611 /* This thread is in quiescent state. Use the counter
612 * to find the least acknowledged token among all the
615 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
622 /* All readers are checked, update least acknowledged token.
623 * There might be multiple writers trying to update this. There is
624 * no need to update this very accurately using compare-and-swap.
626 if (acked_token != __RTE_QSBR_CNT_MAX)
627 __atomic_store_n(&v->acked_token, acked_token,
633 /* Check the quiescent state counter for all threads, assuming that
634 * all the threads have registered.
636 static __rte_always_inline int
637 __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
640 struct rte_rcu_qsbr_cnt *cnt;
642 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
644 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
645 __RTE_RCU_DP_LOG(DEBUG,
646 "%s: check: token = %"PRIu64", wait = %d, Thread ID = %d",
647 __func__, t, wait, i);
649 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
650 __RTE_RCU_DP_LOG(DEBUG,
651 "%s: status: token = %"PRIu64", wait = %d, Thread QS cnt = %"PRIu64", Thread ID = %d",
652 __func__, t, wait, c, i);
654 /* Counter is not checked for wrap-around condition
655 * as it is a 64b counter.
657 if (likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
660 /* This thread is not in quiescent state */
667 /* This thread is in quiescent state. Use the counter to find
668 * the least acknowledged token among all the readers.
670 if (likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
674 /* All readers are checked, update least acknowledged token.
675 * There might be multiple writers trying to update this. There is
676 * no need to update this very accurately using compare-and-swap.
678 if (acked_token != __RTE_QSBR_CNT_MAX)
679 __atomic_store_n(&v->acked_token, acked_token,
687 * @b EXPERIMENTAL: this API may change without prior notice
689 * Checks if all the reader threads have entered the quiescent state
690 * referenced by token.
692 * This is implemented as a lock-free function. It is multi-thread
693 * safe and can be called from the worker threads as well.
695 * If this API is called with 'wait' set to true, the following
696 * factors must be considered:
698 * 1) If the calling thread is also reporting the status on the
699 * same QS variable, it must update the quiescent state status, before
702 * 2) In addition, while calling from multiple threads, only
703 * one of those threads can be reporting the quiescent state status
704 * on a given QS variable.
709 * Token returned by rte_rcu_qsbr_start API
711 * If true, block till all the reader threads have completed entering
712 * the quiescent state referenced by token 't'.
714 * - 0 if all reader threads have NOT passed through specified number
715 * of quiescent states.
716 * - 1 if all reader threads have passed through specified number
717 * of quiescent states.
720 static __rte_always_inline int
721 rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
723 RTE_ASSERT(v != NULL);
725 /* Check if all the readers have already acknowledged this token */
726 if (likely(t <= v->acked_token)) {
727 __RTE_RCU_DP_LOG(DEBUG,
728 "%s: check: token = %"PRIu64", wait = %d",
730 __RTE_RCU_DP_LOG(DEBUG,
731 "%s: status: least acked token = %"PRIu64"",
732 __func__, v->acked_token);
736 if (likely(v->num_threads == v->max_threads))
737 return __rte_rcu_qsbr_check_all(v, t, wait);
739 return __rte_rcu_qsbr_check_selective(v, t, wait);
744 * @b EXPERIMENTAL: this API may change without prior notice
746 * Wait till the reader threads have entered quiescent state.
748 * This is implemented as a lock-free function. It is multi-thread safe.
749 * This API can be thought of as a wrapper around rte_rcu_qsbr_start and
750 * rte_rcu_qsbr_check APIs.
752 * If this API is called from multiple threads, only one of
753 * those threads can be reporting the quiescent state status on a
759 * Thread ID of the caller if it is registered to report quiescent state
760 * on this QS variable (i.e. the calling thread is also part of the
761 * readside critical section). If not, pass RTE_QSBR_THRID_INVALID.
765 rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id);
769 * @b EXPERIMENTAL: this API may change without prior notice
771 * Dump the details of a single QS variables to a file.
773 * It is NOT multi-thread safe.
776 * A pointer to a file for output
781 * On error - 1 with error code set in rte_errno.
782 * Possible rte_errno codes are:
783 * - EINVAL - NULL parameters are passed
787 rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v);
791 * @b EXPERIMENTAL: this API may change without prior notice
793 * Create a queue used to store the data structure elements that can
794 * be freed later. This queue is referred to as 'defer queue'.
797 * Parameters to create a defer queue.
799 * On success - Valid pointer to defer queue
801 * Possible rte_errno codes are:
802 * - EINVAL - NULL parameters are passed
803 * - ENOMEM - Not enough memory
806 struct rte_rcu_qsbr_dq *
807 rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params);
811 * @b EXPERIMENTAL: this API may change without prior notice
813 * Enqueue one resource to the defer queue and start the grace period.
814 * The resource will be freed later after at least one grace period
817 * If the defer queue is full, it will attempt to reclaim resources.
818 * It will also reclaim resources at regular intervals to avoid
819 * the defer queue from growing too big.
821 * Multi-thread safety is provided as the defer queue configuration.
822 * When multi-thread safety is requested, it is possible that the
823 * resources are not stored in their order of deletion. This results
824 * in resources being held in the defer queue longer than they should.
827 * Defer queue to allocate an entry from.
829 * Pointer to resource data to copy to the defer queue. The size of
830 * the data to copy is equal to the element size provided when the
831 * defer queue was created.
834 * On error - 1 with rte_errno set to
835 * - EINVAL - NULL parameters are passed
836 * - ENOSPC - Defer queue is full. This condition can not happen
837 * if the defer queue size is equal (or larger) than the
838 * number of elements in the data structure.
842 rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e);
846 * @b EXPERIMENTAL: this API may change without prior notice
848 * Free resources from the defer queue.
850 * This API is multi-thread safe.
853 * Defer queue to free an entry from.
855 * Maximum number of resources to free.
857 * Number of resources that were freed.
859 * Number of resources pending on the defer queue. This number might not
860 * be accurate if multi-thread safety is configured.
862 * Number of resources that can be added to the defer queue.
863 * This number might not be accurate if multi-thread safety is configured.
865 * On successful reclamation of at least 1 resource - 0
866 * On error - 1 with rte_errno set to
867 * - EINVAL - NULL parameters are passed
871 rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n,
872 unsigned int *freed, unsigned int *pending, unsigned int *available);
876 * @b EXPERIMENTAL: this API may change without prior notice
878 * Delete a defer queue.
880 * It tries to reclaim all the resources on the defer queue.
881 * If any of the resources have not completed the grace period
882 * the reclamation stops and returns immediately. The rest of
883 * the resources are not reclaimed and the defer queue is not
887 * Defer queue to delete.
891 * Possible rte_errno codes are:
892 * - EAGAIN - Some of the resources have not completed at least 1 grace
897 rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq);
903 #endif /* _RTE_RCU_QSBR_H_ */