1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2022 Ericsson AB
5 #ifndef _RTE_SEQLOCK_H_
6 #define _RTE_SEQLOCK_H_
16 * A sequence lock (seqlock) is a synchronization primitive allowing
17 * multiple, parallel, readers to efficiently and safely (i.e., in a
18 * data-race free manner) access lock-protected data. The RTE seqlock
19 * permits multiple writers as well. A spinlock is used for
20 * writer-writer synchronization.
22 * A reader never blocks a writer. Very high frequency writes may
23 * prevent readers from making progress.
25 * A seqlock is not preemption-safe on the writer side. If a writer is
26 * preempted, it may block readers until the writer thread is allowed
27 * to continue. Heavy computations should be kept out of the
28 * writer-side critical section, to avoid delaying readers.
30 * Seqlocks are useful for data which are read by many cores, at a
31 * high frequency, and relatively infrequently written to.
33 * One way to think about seqlocks is that they provide means to
34 * perform atomic operations on objects larger than what the native
35 * machine instructions allow for.
37 * To avoid resource reclamation issues, the data protected by a
38 * seqlock should typically be kept self-contained (e.g., no pointers
39 * to mutable, dynamically allocated data).
43 * #define MAX_Y_LEN 16
44 * // Application-defined example data structure, protected by a seqlock.
48 * char param_y[MAX_Y_LEN];
51 * // Accessor function for reading config fields.
53 * config_read(const struct config *config, int *param_x, char *param_y)
58 * sn = rte_seqlock_read_begin(&config->lock);
60 * // Loads may be atomic or non-atomic, as in this example.
61 * *param_x = config->param_x;
62 * strcpy(param_y, config->param_y);
63 * // An alternative to an immediate retry is to abort and
64 * // try again at some later time, assuming progress is
65 * // possible without the data.
66 * } while (rte_seqlock_read_retry(&config->lock, sn));
69 * // Accessor function for writing config fields.
71 * config_update(struct config *config, int param_x, const char *param_y)
73 * rte_seqlock_write_lock(&config->lock);
74 * // Stores may be atomic or non-atomic, as in this example.
75 * config->param_x = param_x;
76 * strcpy(config->param_y, param_y);
77 * rte_seqlock_write_unlock(&config->lock);
81 * In case there is only a single writer, or writer-writer
82 * serialization is provided by other means, the use of sequence lock
83 * (i.e., rte_seqlock_t) can be replaced with the use of the "raw"
84 * rte_seqcount_t type instead.
87 * https://en.wikipedia.org/wiki/Seqlock.
93 #include <rte_atomic.h>
94 #include <rte_branch_prediction.h>
95 #include <rte_compat.h>
96 #include <rte_seqcount.h>
97 #include <rte_spinlock.h>
100 * The RTE seqlock type.
103 rte_seqcount_t count; /**< Sequence count for the protected data. */
104 rte_spinlock_t lock; /**< Spinlock used to serialize writers. */
108 * A static seqlock initializer.
110 #define RTE_SEQLOCK_INITIALIZER \
112 .count = RTE_SEQCOUNT_INITIALIZER, \
113 .lock = RTE_SPINLOCK_INITIALIZER \
118 * @b EXPERIMENTAL: this API may change without prior notice.
120 * Initialize the seqlock.
122 * This function initializes the seqlock, and leaves the writer-side
126 * A pointer to the seqlock.
130 rte_seqlock_init(rte_seqlock_t *seqlock)
132 rte_seqcount_init(&seqlock->count);
133 rte_spinlock_init(&seqlock->lock);
138 * @b EXPERIMENTAL: this API may change without prior notice.
140 * Begin a read-side critical section.
142 * See rte_seqcount_read_retry() for details.
145 * A pointer to the seqlock.
147 * The seqlock sequence number for this critical section, to
148 * later be passed to rte_seqlock_read_retry().
150 * @see rte_seqlock_read_retry()
151 * @see rte_seqcount_read_retry()
155 static inline uint32_t
156 rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
158 return rte_seqcount_read_begin(&seqlock->count);
163 * @b EXPERIMENTAL: this API may change without prior notice.
165 * End a read-side critical section.
167 * See rte_seqcount_read_retry() for details.
170 * A pointer to the seqlock.
172 * The seqlock sequence number returned by rte_seqlock_read_begin().
174 * true or false, if the just-read seqlock-protected data was
175 * inconsistent or consistent, respectively, at the time it was
178 * @see rte_seqlock_read_begin()
182 rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
184 return rte_seqcount_read_retry(&seqlock->count, begin_sn);
189 * @b EXPERIMENTAL: this API may change without prior notice.
191 * Begin a write-side critical section.
193 * A call to this function acquires the write lock associated @p
194 * seqlock, and marks the beginning of a write-side critical section.
196 * After having called this function, the caller may go on to modify
197 * (both read and write) the protected data, in an atomic or
200 * After the necessary updates have been performed, the application
201 * calls rte_seqlock_write_unlock().
203 * This function is not preemption-safe in the sense that preemption
204 * of the calling thread may block reader progress until the writer
205 * thread is rescheduled.
207 * Unlike rte_seqlock_read_begin(), each call made to
208 * rte_seqlock_write_lock() must be matched with an unlock call.
211 * A pointer to the seqlock.
213 * @see rte_seqlock_write_unlock()
217 rte_seqlock_write_lock(rte_seqlock_t *seqlock)
219 /* To synchronize with other writers. */
220 rte_spinlock_lock(&seqlock->lock);
222 rte_seqcount_write_begin(&seqlock->count);
227 * @b EXPERIMENTAL: this API may change without prior notice.
229 * End a write-side critical section.
231 * A call to this function marks the end of the write-side critical
232 * section, for @p seqlock. After this call has been made, the protected
233 * data may no longer be modified.
236 * A pointer to the seqlock.
238 * @see rte_seqlock_write_lock()
242 rte_seqlock_write_unlock(rte_seqlock_t *seqlock)
244 rte_seqcount_write_end(&seqlock->count);
246 rte_spinlock_unlock(&seqlock->lock);
253 #endif /* _RTE_SEQLOCK_H_ */