1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018-2020 Arm Limited
5 #ifndef _RTE_RCU_QSBR_H_
6 #define _RTE_RCU_QSBR_H_
11 * RTE Quiescent State Based Reclamation (QSBR).
15 * All functions in this file may be changed or removed without prior notice.
17 * Quiescent State (QS) is any point in the thread execution
18 * where the thread does not hold a reference to a data structure
19 * in shared memory. While using lock-less data structures, the writer
20 * can safely free memory once all the reader threads have entered
23 * This library provides the ability for the readers to report quiescent
24 * state and for the writers to identify when all the readers have
25 * entered quiescent state.
35 #include <rte_common.h>
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
40 extern int rte_rcu_log_type;
42 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
43 #define __RTE_RCU_DP_LOG(level, fmt, args...) \
44 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
45 "%s(): " fmt "\n", __func__, ## args)
47 #define __RTE_RCU_DP_LOG(level, fmt, args...)
50 #if defined(RTE_LIBRTE_RCU_DEBUG)
51 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\
52 if (v->qsbr_cnt[thread_id].lock_cnt) \
53 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
54 "%s(): " fmt "\n", __func__, ## args); \
57 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
60 /* Registered thread IDs are stored as a bitmap of 64b element array.
61 * Given thread id needs to be converted to index into the array and
62 * the id within the array element.
64 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
65 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
66 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
67 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
68 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
69 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
70 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
71 #define __RTE_QSBR_THRID_MASK 0x3f
72 #define RTE_QSBR_THRID_INVALID 0xffffffff
74 /* Worker thread counter */
75 struct rte_rcu_qsbr_cnt {
77 /**< Quiescent state counter. Value 0 indicates the thread is offline
78 * 64b counter is used to avoid adding more code to address
79 * counter overflow. Changing this to 32b would require additional
80 * changes to various APIs.
83 /**< Lock counter. Used when RTE_LIBRTE_RCU_DEBUG is enabled */
84 } __rte_cache_aligned;
86 #define __RTE_QSBR_CNT_THR_OFFLINE 0
87 #define __RTE_QSBR_CNT_INIT 1
88 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0)
89 #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t)
91 /* RTE Quiescent State variable structure.
92 * This structure has two elements that vary in size based on the
93 * 'max_threads' parameter.
94 * 1) Quiescent state counter array
95 * 2) Register thread ID array
98 uint64_t token __rte_cache_aligned;
99 /**< Counter to allow for multiple concurrent quiescent state queries */
100 uint64_t acked_token;
101 /**< Least token acked by all the threads in the last call to
102 * rte_rcu_qsbr_check API.
105 uint32_t num_elems __rte_cache_aligned;
106 /**< Number of elements in the thread ID array */
107 uint32_t num_threads;
108 /**< Number of threads currently using this QS variable */
109 uint32_t max_threads;
110 /**< Maximum number of threads using this QS variable */
112 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
113 /**< Quiescent state counter array of 'max_threads' elements */
115 /**< Registered thread IDs are stored in a bitmap array,
116 * after the quiescent state counter array.
118 } __rte_cache_aligned;
121 * Call back function called to free the resources.
124 * Pointer provided while creating the defer queue
126 * Pointer to the resource data stored on the defer queue
128 * Number of resources to free. Currently, this is set to 1.
133 typedef void (*rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n);
135 #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE
138 * Various flags supported.
140 /**< Enqueue and reclaim operations are multi-thread safe by default.
141 * The call back functions registered to free the resources are
142 * assumed to be multi-thread safe.
143 * Set this flag if multi-thread safety is not required.
145 #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1
148 * Parameters used when creating the defer queue.
150 struct rte_rcu_qsbr_dq_parameters {
152 /**< Name of the queue. */
154 /**< Flags to control API behaviors */
156 /**< Number of entries in queue. Typically, this will be
157 * the same as the maximum number of entries supported in the
158 * lock free data structure.
159 * Data structures with unbounded number of entries is not
160 * supported currently.
163 /**< Size (in bytes) of each element in the defer queue.
164 * This has to be multiple of 4B.
166 uint32_t trigger_reclaim_limit;
167 /**< Trigger automatic reclamation after the defer queue
168 * has at least these many resources waiting. This auto
169 * reclamation is triggered in rte_rcu_qsbr_dq_enqueue API
171 * If this is greater than 'size', auto reclamation is
173 * If this is set to 0, auto reclamation is triggered
174 * in every call to rte_rcu_qsbr_dq_enqueue API.
176 uint32_t max_reclaim_size;
177 /**< When automatic reclamation is enabled, reclaim at the max
178 * these many resources. This should contain a valid value, if
179 * auto reclamation is on. Setting this to 'size' or greater will
180 * reclaim all possible resources currently on the defer queue.
182 rte_rcu_qsbr_free_resource_t free_fn;
183 /**< Function to call to free the resource. */
185 /**< Pointer passed to the free function. Typically, this is the
186 * pointer to the data structure to which the resource to free
187 * belongs. This can be NULL.
189 struct rte_rcu_qsbr *v;
190 /**< RCU QSBR variable to use for this defer queue */
193 /* RTE defer queue structure.
194 * This structure holds the defer queue. The defer queue is used to
195 * hold the deleted entries from the data structure that are not
198 struct rte_rcu_qsbr_dq;
201 * Return the size of the memory occupied by a Quiescent State variable.
204 * Maximum number of threads reporting quiescent state on this variable.
206 * On success - size of memory in bytes required for this QS variable.
207 * On error - 1 with error code set in rte_errno.
208 * Possible rte_errno codes are:
209 * - EINVAL - max_threads is 0
212 rte_rcu_qsbr_get_memsize(uint32_t max_threads);
215 * Initialize a Quiescent State (QS) variable.
220 * Maximum number of threads reporting quiescent state on this variable.
221 * This should be the same value as passed to rte_rcu_qsbr_get_memsize.
224 * On error - 1 with error code set in rte_errno.
225 * Possible rte_errno codes are:
226 * - EINVAL - max_threads is 0 or 'v' is NULL.
230 rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads);
233 * Register a reader thread to report its quiescent state
236 * This is implemented as a lock-free function. It is multi-thread
238 * Any reader thread that wants to report its quiescent state must
239 * call this API. This can be called during initialization or as part
240 * of the packet processing loop.
242 * Note that rte_rcu_qsbr_thread_online must be called before the
243 * thread updates its quiescent state using rte_rcu_qsbr_quiescent.
248 * Reader thread with this thread ID will report its quiescent state on
249 * the QS variable. thread_id is a value between 0 and (max_threads - 1).
250 * 'max_threads' is the parameter passed in 'rte_rcu_qsbr_init' API.
253 rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id);
256 * Remove a reader thread, from the list of threads reporting their
257 * quiescent state on a QS variable.
259 * This is implemented as a lock-free function. It is multi-thread safe.
260 * This API can be called from the reader threads during shutdown.
261 * Ongoing quiescent state queries will stop waiting for the status from this
262 * unregistered reader thread.
267 * Reader thread with this thread ID will stop reporting its quiescent
268 * state on the QS variable.
271 rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id);
274 * Add a registered reader thread, to the list of threads reporting their
275 * quiescent state on a QS variable.
277 * This is implemented as a lock-free function. It is multi-thread
280 * Any registered reader thread that wants to report its quiescent state must
281 * call this API before calling rte_rcu_qsbr_quiescent. This can be called
282 * during initialization or as part of the packet processing loop.
284 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
285 * calling any functions that block, to ensure that rte_rcu_qsbr_check
286 * API does not wait indefinitely for the reader thread to update its QS.
288 * The reader thread must call rte_rcu_thread_online API, after the blocking
289 * function call returns, to ensure that rte_rcu_qsbr_check API
290 * waits for the reader thread to update its quiescent state.
295 * Reader thread with this thread ID will report its quiescent state on
298 static __rte_always_inline void
299 rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
303 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
305 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
306 v->qsbr_cnt[thread_id].lock_cnt);
308 /* Copy the current value of token.
309 * The fence at the end of the function will ensure that
310 * the following will not move down after the load of any shared
313 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
315 /* __atomic_store_n(cnt, __ATOMIC_RELAXED) is used to ensure
316 * 'cnt' (64b) is accessed atomically.
318 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
319 t, __ATOMIC_RELAXED);
321 /* The subsequent load of the data structure should not
322 * move above the store. Hence a store-load barrier
324 * If the load of the data structure moves above the store,
325 * writer might not see that the reader is online, even though
326 * the reader is referencing the shared data structure.
328 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
332 * Remove a registered reader thread from the list of threads reporting their
333 * quiescent state on a QS variable.
335 * This is implemented as a lock-free function. It is multi-thread
338 * This can be called during initialization or as part of the packet
341 * The reader thread must call rte_rcu_qsbr_thread_offline API, before
342 * calling any functions that block, to ensure that rte_rcu_qsbr_check
343 * API does not wait indefinitely for the reader thread to update its QS.
348 * rte_rcu_qsbr_check API will not wait for the reader thread with
349 * this thread ID to report its quiescent state on the QS variable.
351 static __rte_always_inline void
352 rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
354 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
356 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
357 v->qsbr_cnt[thread_id].lock_cnt);
359 /* The reader can go offline only after the load of the
360 * data structure is completed. i.e. any load of the
361 * data structure can not move after this store.
364 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
365 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
369 * Acquire a lock for accessing a shared data structure.
371 * This is implemented as a lock-free function. It is multi-thread
374 * This API is provided to aid debugging. This should be called before
375 * accessing a shared data structure.
377 * When RTE_LIBRTE_RCU_DEBUG is enabled a lock counter is incremented.
378 * Similarly rte_rcu_qsbr_unlock will decrement the counter. When the
379 * rte_rcu_qsbr_check API will verify that this counter is 0.
381 * When RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
388 static __rte_always_inline void
389 rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v,
390 __rte_unused unsigned int thread_id)
392 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
394 #if defined(RTE_LIBRTE_RCU_DEBUG)
395 /* Increment the lock counter */
396 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
397 1, __ATOMIC_ACQUIRE);
402 * Release a lock after accessing a shared data structure.
404 * This is implemented as a lock-free function. It is multi-thread
407 * This API is provided to aid debugging. This should be called after
408 * accessing a shared data structure.
410 * When RTE_LIBRTE_RCU_DEBUG is enabled, rte_rcu_qsbr_unlock will
411 * decrement a lock counter. rte_rcu_qsbr_check API will verify that this
414 * When RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
421 static __rte_always_inline void
422 rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v,
423 __rte_unused unsigned int thread_id)
425 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
427 #if defined(RTE_LIBRTE_RCU_DEBUG)
428 /* Decrement the lock counter */
429 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
430 1, __ATOMIC_RELEASE);
432 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
433 "Lock counter %u. Nested locks?\n",
434 v->qsbr_cnt[thread_id].lock_cnt);
439 * Ask the reader threads to report the quiescent state
442 * This is implemented as a lock-free function. It is multi-thread
443 * safe and can be called from worker threads.
448 * - This is the token for this call of the API. This should be
449 * passed to rte_rcu_qsbr_check API.
451 static __rte_always_inline uint64_t
452 rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
456 RTE_ASSERT(v != NULL);
458 /* Release the changes to the shared data structure.
459 * This store release will ensure that changes to any data
460 * structure are visible to the workers before the token
463 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
469 * Update quiescent state for a reader thread.
471 * This is implemented as a lock-free function. It is multi-thread safe.
472 * All the reader threads registered to report their quiescent state
473 * on the QS variable must call this API.
478 * Update the quiescent state for the reader with this thread ID.
480 static __rte_always_inline void
481 rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
485 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
487 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
488 v->qsbr_cnt[thread_id].lock_cnt);
490 /* Acquire the changes to the shared data structure released
491 * by rte_rcu_qsbr_start.
492 * Later loads of the shared data structure should not move
493 * above this load. Hence, use load-acquire.
495 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
497 /* Check if there are updates available from the writer.
498 * Inform the writer that updates are visible to this reader.
499 * Prior loads of the shared data structure should not move
500 * beyond this store. Hence use store-release.
502 if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
503 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
504 t, __ATOMIC_RELEASE);
506 __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %" PRIu64 ", Thread ID = %d",
507 __func__, t, thread_id);
510 /* Check the quiescent state counter for registered threads only, assuming
511 * that not all threads have registered.
513 static __rte_always_inline int
514 __rte_rcu_qsbr_check_selective(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
519 uint64_t *reg_thread_id;
520 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
522 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
524 i++, reg_thread_id++) {
525 /* Load the current registered thread bit map before
526 * loading the reader thread quiescent state counters.
528 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
529 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
532 j = __builtin_ctzl(bmap);
533 __RTE_RCU_DP_LOG(DEBUG,
534 "%s: check: token = %" PRIu64 ", wait = %d, Bit Map = 0x%" PRIx64 ", Thread ID = %d",
535 __func__, t, wait, bmap, id + j);
537 &v->qsbr_cnt[id + j].cnt,
539 __RTE_RCU_DP_LOG(DEBUG,
540 "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
541 __func__, t, wait, c, id+j);
543 /* Counter is not checked for wrap-around condition
544 * as it is a 64b counter.
547 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
548 /* This thread is not in quiescent state */
553 /* This thread might have unregistered.
554 * Re-read the bitmap.
556 bmap = __atomic_load_n(reg_thread_id,
562 /* This thread is in quiescent state. Use the counter
563 * to find the least acknowledged token among all the
566 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
573 /* All readers are checked, update least acknowledged token.
574 * There might be multiple writers trying to update this. There is
575 * no need to update this very accurately using compare-and-swap.
577 if (acked_token != __RTE_QSBR_CNT_MAX)
578 __atomic_store_n(&v->acked_token, acked_token,
584 /* Check the quiescent state counter for all threads, assuming that
585 * all the threads have registered.
587 static __rte_always_inline int
588 __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
591 struct rte_rcu_qsbr_cnt *cnt;
593 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
595 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
596 __RTE_RCU_DP_LOG(DEBUG,
597 "%s: check: token = %" PRIu64 ", wait = %d, Thread ID = %d",
598 __func__, t, wait, i);
600 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
601 __RTE_RCU_DP_LOG(DEBUG,
602 "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
603 __func__, t, wait, c, i);
605 /* Counter is not checked for wrap-around condition
606 * as it is a 64b counter.
608 if (likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
611 /* This thread is not in quiescent state */
618 /* This thread is in quiescent state. Use the counter to find
619 * the least acknowledged token among all the readers.
621 if (likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
625 /* All readers are checked, update least acknowledged token.
626 * There might be multiple writers trying to update this. There is
627 * no need to update this very accurately using compare-and-swap.
629 if (acked_token != __RTE_QSBR_CNT_MAX)
630 __atomic_store_n(&v->acked_token, acked_token,
637 * Checks if all the reader threads have entered the quiescent state
638 * referenced by token.
640 * This is implemented as a lock-free function. It is multi-thread
641 * safe and can be called from the worker threads as well.
643 * If this API is called with 'wait' set to true, the following
644 * factors must be considered:
646 * 1) If the calling thread is also reporting the status on the
647 * same QS variable, it must update the quiescent state status, before
650 * 2) In addition, while calling from multiple threads, only
651 * one of those threads can be reporting the quiescent state status
652 * on a given QS variable.
657 * Token returned by rte_rcu_qsbr_start API
659 * If true, block till all the reader threads have completed entering
660 * the quiescent state referenced by token 't'.
662 * - 0 if all reader threads have NOT passed through specified number
663 * of quiescent states.
664 * - 1 if all reader threads have passed through specified number
665 * of quiescent states.
667 static __rte_always_inline int
668 rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
670 RTE_ASSERT(v != NULL);
672 /* Check if all the readers have already acknowledged this token */
673 if (likely(t <= v->acked_token)) {
674 __RTE_RCU_DP_LOG(DEBUG,
675 "%s: check: token = %" PRIu64 ", wait = %d",
677 __RTE_RCU_DP_LOG(DEBUG,
678 "%s: status: least acked token = %" PRIu64,
679 __func__, v->acked_token);
683 if (likely(v->num_threads == v->max_threads))
684 return __rte_rcu_qsbr_check_all(v, t, wait);
686 return __rte_rcu_qsbr_check_selective(v, t, wait);
690 * Wait till the reader threads have entered quiescent state.
692 * This is implemented as a lock-free function. It is multi-thread safe.
693 * This API can be thought of as a wrapper around rte_rcu_qsbr_start and
694 * rte_rcu_qsbr_check APIs.
696 * If this API is called from multiple threads, only one of
697 * those threads can be reporting the quiescent state status on a
703 * Thread ID of the caller if it is registered to report quiescent state
704 * on this QS variable (i.e. the calling thread is also part of the
705 * readside critical section). If not, pass RTE_QSBR_THRID_INVALID.
708 rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id);
711 * Dump the details of a single QS variables to a file.
713 * It is NOT multi-thread safe.
716 * A pointer to a file for output
721 * On error - 1 with error code set in rte_errno.
722 * Possible rte_errno codes are:
723 * - EINVAL - NULL parameters are passed
726 rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v);
730 * @b EXPERIMENTAL: this API may change without prior notice
732 * Create a queue used to store the data structure elements that can
733 * be freed later. This queue is referred to as 'defer queue'.
736 * Parameters to create a defer queue.
738 * On success - Valid pointer to defer queue
740 * Possible rte_errno codes are:
741 * - EINVAL - NULL parameters are passed
742 * - ENOMEM - Not enough memory
745 struct rte_rcu_qsbr_dq *
746 rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params);
750 * @b EXPERIMENTAL: this API may change without prior notice
752 * Enqueue one resource to the defer queue and start the grace period.
753 * The resource will be freed later after at least one grace period
756 * If the defer queue is full, it will attempt to reclaim resources.
757 * It will also reclaim resources at regular intervals to avoid
758 * the defer queue from growing too big.
760 * Multi-thread safety is provided as the defer queue configuration.
761 * When multi-thread safety is requested, it is possible that the
762 * resources are not stored in their order of deletion. This results
763 * in resources being held in the defer queue longer than they should.
766 * Defer queue to allocate an entry from.
768 * Pointer to resource data to copy to the defer queue. The size of
769 * the data to copy is equal to the element size provided when the
770 * defer queue was created.
773 * On error - 1 with rte_errno set to
774 * - EINVAL - NULL parameters are passed
775 * - ENOSPC - Defer queue is full. This condition can not happen
776 * if the defer queue size is equal (or larger) than the
777 * number of elements in the data structure.
781 rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e);
785 * @b EXPERIMENTAL: this API may change without prior notice
787 * Free resources from the defer queue.
789 * This API is multi-thread safe.
792 * Defer queue to free an entry from.
794 * Maximum number of resources to free.
796 * Number of resources that were freed.
798 * Number of resources pending on the defer queue. This number might not
799 * be accurate if multi-thread safety is configured.
801 * Number of resources that can be added to the defer queue.
802 * This number might not be accurate if multi-thread safety is configured.
804 * On successful reclamation of at least 1 resource - 0
805 * On error - 1 with rte_errno set to
806 * - EINVAL - NULL parameters are passed
810 rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n,
811 unsigned int *freed, unsigned int *pending, unsigned int *available);
815 * @b EXPERIMENTAL: this API may change without prior notice
817 * Delete a defer queue.
819 * It tries to reclaim all the resources on the defer queue.
820 * If any of the resources have not completed the grace period
821 * the reclamation stops and returns immediately. The rest of
822 * the resources are not reclaimed and the defer queue is not
826 * Defer queue to delete.
830 * Possible rte_errno codes are:
831 * - EAGAIN - Some of the resources have not completed at least 1 grace
836 rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq);
842 #endif /* _RTE_RCU_QSBR_H_ */