1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022 Ericsson AB
5 #ifndef _RTE_SEQCOUNT_H_
6 #define _RTE_SEQCOUNT_H_
16 * The sequence counter synchronizes a single writer with multiple,
17 * parallel readers. It is used as the basis for the RTE sequence
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_compat.h>
31 * The RTE seqcount type.
34 uint32_t sn; /**< A sequence number for the protected data. */
38 * A static seqcount initializer.
40 #define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
44 * @b EXPERIMENTAL: this API may change without prior notice.
46 * Initialize the sequence counter.
49 * A pointer to the sequence counter.
53 rte_seqcount_init(rte_seqcount_t *seqcount)
60 * @b EXPERIMENTAL: this API may change without prior notice.
62 * Begin a read-side critical section.
64 * A call to this function marks the beginning of a read-side critical
65 * section, for @p seqcount.
67 * rte_seqcount_read_begin() returns a sequence number, which is later
68 * used in rte_seqcount_read_retry() to check if the protected data
69 * underwent any modifications during the read transaction.
71 * After (in program order) rte_seqcount_read_begin() has been called,
72 * the calling thread reads the protected data, for later use. The
73 * protected data read *must* be copied (either in pristine form, or
74 * in the form of some derivative), since the caller may only read the
75 * data from within the read-side critical section (i.e., after
76 * rte_seqcount_read_begin() and before rte_seqcount_read_retry()),
77 * but must not act upon the retrieved data while in the critical
78 * section, since it does not yet know if it is consistent.
80 * The protected data may be read using atomic and/or non-atomic
83 * After (in program order) all required data loads have been
84 * performed, rte_seqcount_read_retry() should be called, marking
85 * the end of the read-side critical section.
87 * If rte_seqcount_read_retry() returns true, the just-read data is
88 * inconsistent and should be discarded. The caller has the option to
89 * either restart the whole procedure right away (i.e., calling
90 * rte_seqcount_read_begin() again), or do the same at some later time.
92 * If rte_seqcount_read_retry() returns false, the data was read
93 * atomically and the copied data is consistent.
96 * A pointer to the sequence counter.
98 * The seqcount sequence number for this critical section, to
99 * later be passed to rte_seqcount_read_retry().
101 * @see rte_seqcount_read_retry()
105 static inline uint32_t
106 rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
108 /* __ATOMIC_ACQUIRE to prevent loads after (in program order)
109 * from happening before the sn load. Synchronizes-with the
110 * store release in rte_seqcount_write_end().
112 return __atomic_load_n(&seqcount->sn, __ATOMIC_ACQUIRE);
117 * @b EXPERIMENTAL: this API may change without prior notice.
119 * End a read-side critical section.
121 * A call to this function marks the end of a read-side critical
122 * section, for @p seqcount. The application must supply the sequence
123 * number produced by the corresponding rte_seqcount_read_begin() call.
125 * After this function has been called, the caller should not access
126 * the protected data.
128 * In case rte_seqcount_read_retry() returns true, the just-read data
129 * was modified as it was being read and may be inconsistent, and thus
130 * should be discarded.
132 * In case this function returns false, the data is consistent and the
133 * set of atomic and non-atomic load operations performed between
134 * rte_seqcount_read_begin() and rte_seqcount_read_retry() were atomic,
138 * A pointer to the sequence counter.
140 * The sequence number returned by rte_seqcount_read_begin().
142 * true or false, if the just-read seqcount-protected data was
143 * inconsistent or consistent, respectively, at the time it was
146 * @see rte_seqcount_read_begin()
151 rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
155 /* An odd sequence number means the protected data was being
156 * modified already at the point of the rte_seqcount_read_begin()
159 if (unlikely(begin_sn & 1))
162 /* make sure the data loads happens before the sn load */
163 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
165 end_sn = __atomic_load_n(&seqcount->sn, __ATOMIC_RELAXED);
167 /* A writer incremented the sequence number during this read
170 return begin_sn != end_sn;
175 * @b EXPERIMENTAL: this API may change without prior notice.
177 * Begin a write-side critical section.
179 * A call to this function marks the beginning of a write-side
180 * critical section, after which the caller may go on to modify (both
181 * read and write) the protected data, in an atomic or non-atomic
184 * After the necessary updates have been performed, the application
185 * calls rte_seqcount_write_end().
187 * Multiple, parallel writers must use some external serialization.
189 * This function is not preemption-safe in the sense that preemption
190 * of the calling thread may block reader progress until the writer
191 * thread is rescheduled.
194 * A pointer to the sequence counter.
196 * @see rte_seqcount_write_end()
201 rte_seqcount_write_begin(rte_seqcount_t *seqcount)
205 sn = seqcount->sn + 1;
207 __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELAXED);
209 /* __ATOMIC_RELEASE to prevent stores after (in program order)
210 * from happening before the sn store.
212 rte_atomic_thread_fence(__ATOMIC_RELEASE);
217 * @b EXPERIMENTAL: this API may change without prior notice.
219 * End a write-side critical section.
221 * A call to this function marks the end of the write-side critical
222 * section, for @p seqcount. After this call has been made, the
223 * protected data may no longer be modified.
226 * A pointer to the sequence counter.
228 * @see rte_seqcount_write_begin()
232 rte_seqcount_write_end(rte_seqcount_t *seqcount)
236 sn = seqcount->sn + 1;
238 /* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */
239 __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELEASE);
246 #endif /* _RTE_SEQCOUNT_H_ */