]> git.droids-corp.org - dpdk.git/commitdiff
eal: add seqlock
authorMattias Rönnblom <mattias.ronnblom@ericsson.com>
Mon, 23 May 2022 14:23:46 +0000 (16:23 +0200)
committerDavid Marchand <david.marchand@redhat.com>
Tue, 7 Jun 2022 11:33:14 +0000 (13:33 +0200)
A sequence lock (seqlock) is a synchronization primitive which allows
for data-race free, low-overhead, high-frequency reads, suitable for
data structures shared across many cores and which are updated
relatively infrequently.

A seqlock permits multiple parallel readers. A spinlock is used to
serialize writers. In cases where there is only a single writer, or
writer-writer synchronization is done by some external means, the
"raw" sequence counter type (and accompanying rte_seqcount_*()
functions) may be used instead.

To avoid resource reclamation and other issues, the data protected by
a seqlock is best off being self-contained (i.e., no pointers [except
to constant data]).

One way to think about seqlocks is that they provide means to perform
atomic operations on data objects larger than what the native atomic
machine instructions allow for.

DPDK seqlocks (and the underlying sequence counters) are not
preemption safe on the writer side. A thread preemption affects
performance, not correctness.

A seqlock contains a sequence number, which can be thought of as the
generation of the data it protects.

A reader will
  1. Load the sequence number (sn).
  2. Load, in arbitrary order, the seqlock-protected data.
  3. Load the sn again.
  4. Check if the first and second sn are equal, and even numbered.
     If they are not, discard the loaded data, and restart from 1.

The first three steps need to be ordered using suitable memory fences.

A writer will
  1. Take the spinlock, to serialize writer access.
  2. Load the sn.
  3. Store the original sn + 1 as the new sn.
  4. Perform load and stores to the seqlock-protected data.
  5. Store the original sn + 2 as the new sn.
  6. Release the spinlock.

Proper memory fencing is required to make sure the first sn store, the
data stores, and the second sn store appear to the reader in the
mentioned order.

The sn loads and stores must be atomic, but the data loads and stores
need not be.

The original seqlock design and implementation was done by Stephen
Hemminger. This is an independent implementation, using C11 atomics.

For more information on seqlocks, see
https://en.wikipedia.org/wiki/Seqlock

Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
MAINTAINERS
app/test/meson.build
app/test/test_seqlock.c [new file with mode: 0644]
doc/api/doxy-api-index.md
doc/guides/rel_notes/release_22_07.rst
lib/eal/include/meson.build
lib/eal/include/rte_seqcount.h [new file with mode: 0644]
lib/eal/include/rte_seqlock.h [new file with mode: 0644]

index f34f6fa2e90d6cac01756dce844e327b64bb917a..a1075613c09f8052287357c3592f9839c699f0f7 100644 (file)
@@ -258,6 +258,12 @@ M: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
 F: lib/eal/include/generic/rte_mcslock.h
 F: app/test/test_mcslock.c
 
+Sequence Lock
+M: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
+F: lib/eal/include/rte_seqcount.h
+F: lib/eal/include/rte_seqlock.h
+F: app/test/test_seqlock.c
+
 Ticketlock
 M: Joyce Kong <joyce.kong@arm.com>
 F: lib/eal/include/generic/rte_ticketlock.h
index 0f712680de74bc79f71981e3742ccc2eeae3d3d7..7fe261cae88d39fed617cba5b28ff8e02062058f 100644 (file)
@@ -126,6 +126,7 @@ test_sources = files(
         'test_sched.c',
         'test_security.c',
         'test_security_inline_proto.c',
+        'test_seqlock.c',
         'test_service_cores.c',
         'test_spinlock.c',
         'test_stack.c',
@@ -217,6 +218,7 @@ fast_tests = [
         ['rwlock_rde_wro_autotest', true, true],
         ['sched_autotest', true, true],
         ['security_autotest', false, true],
+        ['seqlock_autotest', true, true],
         ['spinlock_autotest', true, true],
         ['stack_autotest', false, true],
         ['stack_lf_autotest', false, true],
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
new file mode 100644 (file)
index 0000000..d26d2c0
--- /dev/null
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Ericsson AB
+ */
+
+#include <rte_seqlock.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include <inttypes.h>
+
+#include "test.h"
+
+struct data {
+       rte_seqlock_t lock;
+
+       uint64_t a;
+       uint64_t b __rte_cache_aligned;
+       uint64_t c __rte_cache_aligned;
+} __rte_cache_aligned;
+
+struct reader {
+       struct data *data;
+       uint8_t stop;
+};
+
+#define WRITER_RUNTIME 2.0 /* s */
+
+#define WRITER_MAX_DELAY 100 /* us */
+
+#define INTERRUPTED_WRITER_FREQUENCY 1000
+#define WRITER_INTERRUPT_TIME 1 /* us */
+
+static int
+writer_run(void *arg)
+{
+       struct data *data = arg;
+       uint64_t deadline;
+
+       deadline = rte_get_timer_cycles() +
+               WRITER_RUNTIME * rte_get_timer_hz();
+
+       while (rte_get_timer_cycles() < deadline) {
+               bool interrupted;
+               uint64_t new_value;
+               unsigned int delay;
+
+               new_value = rte_rand();
+
+               interrupted = rte_rand_max(INTERRUPTED_WRITER_FREQUENCY) == 0;
+
+               rte_seqlock_write_lock(&data->lock);
+
+               data->c = new_value;
+               data->b = new_value;
+
+               if (interrupted)
+                       rte_delay_us_block(WRITER_INTERRUPT_TIME);
+
+               data->a = new_value;
+
+               rte_seqlock_write_unlock(&data->lock);
+
+               delay = rte_rand_max(WRITER_MAX_DELAY);
+
+               rte_delay_us_block(delay);
+       }
+
+       return TEST_SUCCESS;
+}
+
+#define INTERRUPTED_READER_FREQUENCY 1000
+#define READER_INTERRUPT_TIME 1000 /* us */
+
+static int
+reader_run(void *arg)
+{
+       struct reader *r = arg;
+       int rc = TEST_SUCCESS;
+
+       while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+                       rc == TEST_SUCCESS) {
+               struct data *data = r->data;
+               bool interrupted;
+               uint32_t sn;
+               uint64_t a;
+               uint64_t b;
+               uint64_t c;
+
+               interrupted = rte_rand_max(INTERRUPTED_READER_FREQUENCY) == 0;
+
+               do {
+                       sn = rte_seqlock_read_begin(&data->lock);
+
+                       a = data->a;
+                       if (interrupted)
+                               rte_delay_us_block(READER_INTERRUPT_TIME);
+                       c = data->c;
+                       b = data->b;
+
+               } while (rte_seqlock_read_retry(&data->lock, sn));
+
+               if (a != b || b != c) {
+                       printf("Reader observed inconsistent data values "
+                               "%" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+                               a, b, c);
+                       rc = TEST_FAILED;
+               }
+       }
+
+       return rc;
+}
+
+static void
+reader_stop(struct reader *reader)
+{
+       __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+}
+
+#define NUM_WRITERS 2 /* main lcore + one worker */
+#define MIN_NUM_READERS 2
+#define MIN_LCORE_COUNT (NUM_WRITERS + MIN_NUM_READERS)
+
+/* Only a compile-time test */
+static rte_seqlock_t __rte_unused static_init_lock = RTE_SEQLOCK_INITIALIZER;
+
+static int
+test_seqlock(void)
+{
+       struct reader readers[RTE_MAX_LCORE];
+       unsigned int num_lcores;
+       unsigned int num_readers;
+       struct data *data;
+       unsigned int i;
+       unsigned int lcore_id;
+       unsigned int reader_lcore_ids[RTE_MAX_LCORE];
+       unsigned int worker_writer_lcore_id = 0;
+       int rc = TEST_SUCCESS;
+
+       num_lcores = rte_lcore_count();
+
+       if (num_lcores < MIN_LCORE_COUNT) {
+               printf("Too few cores to run test. Skipping.\n");
+               return TEST_SKIPPED;
+       }
+
+       num_readers = num_lcores - NUM_WRITERS;
+
+       data = rte_zmalloc(NULL, sizeof(struct data), 0);
+
+       if (data == NULL) {
+               printf("Failed to allocate memory for seqlock data\n");
+               return TEST_FAILED;
+       }
+
+       i = 0;
+       RTE_LCORE_FOREACH_WORKER(lcore_id) {
+               if (i == 0) {
+                       rte_eal_remote_launch(writer_run, data, lcore_id);
+                       worker_writer_lcore_id = lcore_id;
+               } else {
+                       unsigned int reader_idx = i - 1;
+                       struct reader *reader = &readers[reader_idx];
+
+                       reader->data = data;
+                       reader->stop = 0;
+
+                       rte_eal_remote_launch(reader_run, reader, lcore_id);
+                       reader_lcore_ids[reader_idx] = lcore_id;
+               }
+               i++;
+       }
+
+       if (writer_run(data) != 0 ||
+                       rte_eal_wait_lcore(worker_writer_lcore_id) != 0)
+               rc = TEST_FAILED;
+
+       for (i = 0; i < num_readers; i++) {
+               reader_stop(&readers[i]);
+               if (rte_eal_wait_lcore(reader_lcore_ids[i]) != 0)
+                       rc = TEST_FAILED;
+       }
+
+       rte_free(data);
+
+       return rc;
+}
+
+REGISTER_TEST_COMMAND(seqlock_autotest, test_seqlock);
index bb19cf9bc6ee1c6d5a56b3e722f7e1c0796a30bc..186a258be4f14a8e1063c0d7a371ea527fa9b83c 100644 (file)
@@ -76,6 +76,8 @@ The public API headers are grouped by topics:
   [mcslock](@ref rte_mcslock.h),
   [pflock](@ref rte_pflock.h),
   [rwlock](@ref rte_rwlock.h),
+  [seqcount](@ref rte_seqcount.h),
+  [seqlock](@ref rte_seqlock.h),
   [spinlock](@ref rte_spinlock.h),
   [ticketlock](@ref rte_ticketlock.h),
   [RCU](@ref rte_rcu_qsbr.h)
index f2cf41def9a2cd7c64e51531b1d196216a60172f..02c6ca40ec63da0845f0fdc78c1aea24908ae582 100644 (file)
@@ -55,6 +55,12 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added Sequence Lock.**
+
+  Added a new synchronization primitive: the sequence lock
+  (seqlock). A seqlock allows for low overhead, parallel reads. The
+  DPDK seqlock uses a spinlock to serialize multiple writing threads.
+
 * **Added protocol based input color selection for meter.**
 
   Added new functions ``rte_mtr_color_in_protocol_set()``,
index 9700494816e45f4f091be531c44ec299e4d3c527..40ebb5b63d9ce9998844673365bba2d5f127a4c6 100644 (file)
@@ -36,6 +36,8 @@ headers += files(
         'rte_per_lcore.h',
         'rte_random.h',
         'rte_reciprocal.h',
+        'rte_seqcount.h',
+        'rte_seqlock.h',
         'rte_service.h',
         'rte_service_component.h',
         'rte_string_fns.h',
diff --git a/lib/eal/include/rte_seqcount.h b/lib/eal/include/rte_seqcount.h
new file mode 100644 (file)
index 0000000..ff62708
--- /dev/null
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Ericsson AB
+ */
+
+#ifndef _RTE_SEQCOUNT_H_
+#define _RTE_SEQCOUNT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Seqcount
+ *
+ * The sequence counter synchronizes a single writer with multiple,
+ * parallel readers. It is used as the basis for the RTE sequence
+ * lock.
+ *
+ * @see rte_seqlock.h
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_compat.h>
+
+/**
+ * The RTE seqcount type.
+ */
+typedef struct {
+       uint32_t sn; /**< A sequence number for the protected data. */
+} rte_seqcount_t;
+
+/**
+ * A static seqcount initializer.
+ */
+#define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Initialize the sequence counter.
+ *
+ * @param seqcount
+ *   A pointer to the sequence counter.
+ */
+__rte_experimental
+static inline void
+rte_seqcount_init(rte_seqcount_t *seqcount)
+{
+       seqcount->sn = 0;
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Begin a read-side critical section.
+ *
+ * A call to this function marks the beginning of a read-side critical
+ * section, for @p seqcount.
+ *
+ * rte_seqcount_read_begin() returns a sequence number, which is later
+ * used in rte_seqcount_read_retry() to check if the protected data
+ * underwent any modifications during the read transaction.
+ *
+ * After (in program order) rte_seqcount_read_begin() has been called,
+ * the calling thread reads the protected data, for later use. The
+ * protected data read *must* be copied (either in pristine form, or
+ * in the form of some derivative), since the caller may only read the
+ * data from within the read-side critical section (i.e., after
+ * rte_seqcount_read_begin() and before rte_seqcount_read_retry()),
+ * but must not act upon the retrieved data while in the critical
+ * section, since it does not yet know if it is consistent.
+ *
+ * The protected data may be read using atomic and/or non-atomic
+ * operations.
+ *
+ * After (in program order) all required data loads have been
+ * performed, rte_seqcount_read_retry() should be called, marking
+ * the end of the read-side critical section.
+ *
+ * If rte_seqcount_read_retry() returns true, the just-read data is
+ * inconsistent and should be discarded. The caller has the option to
+ * either restart the whole procedure right away (i.e., calling
+ * rte_seqcount_read_begin() again), or do the same at some later time.
+ *
+ * If rte_seqcount_read_retry() returns false, the data was read
+ * atomically and the copied data is consistent.
+ *
+ * @param seqcount
+ *   A pointer to the sequence counter.
+ * @return
+ *   The seqcount sequence number for this critical section, to
+ *   later be passed to rte_seqcount_read_retry().
+ *
+ * @see rte_seqcount_read_retry()
+ */
+
+__rte_experimental
+static inline uint32_t
+rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
+{
+       /* __ATOMIC_ACQUIRE to prevent loads after (in program order)
+        * from happening before the sn load. Synchronizes-with the
+        * store release in rte_seqcount_write_end().
+        */
+       return __atomic_load_n(&seqcount->sn, __ATOMIC_ACQUIRE);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * End a read-side critical section.
+ *
+ * A call to this function marks the end of a read-side critical
+ * section, for @p seqcount. The application must supply the sequence
+ * number produced by the corresponding rte_seqcount_read_begin() call.
+ *
+ * After this function has been called, the caller should not access
+ * the protected data.
+ *
+ * In case rte_seqcount_read_retry() returns true, the just-read data
+ * was modified as it was being read and may be inconsistent, and thus
+ * should be discarded.
+ *
+ * In case this function returns false, the data is consistent and the
+ * set of atomic and non-atomic load operations performed between
+ * rte_seqcount_read_begin() and rte_seqcount_read_retry() were atomic,
+ * as a whole.
+ *
+ * @param seqcount
+ *   A pointer to the sequence counter.
+ * @param begin_sn
+ *   The sequence number returned by rte_seqcount_read_begin().
+ * @return
+ *   true or false, if the just-read seqcount-protected data was
+ *   inconsistent or consistent, respectively, at the time it was
+ *   read.
+ *
+ * @see rte_seqcount_read_begin()
+ */
+
+__rte_experimental
+static inline bool
+rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
+{
+       uint32_t end_sn;
+
+       /* An odd sequence number means the protected data was being
+        * modified already at the point of the rte_seqcount_read_begin()
+        * call.
+        */
+       if (unlikely(begin_sn & 1))
+               return true;
+
+       /* make sure the data loads happens before the sn load */
+       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+       end_sn = __atomic_load_n(&seqcount->sn, __ATOMIC_RELAXED);
+
+       /* A writer incremented the sequence number during this read
+        * critical section.
+        */
+       return begin_sn != end_sn;
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Begin a write-side critical section.
+ *
+ * A call to this function marks the beginning of a write-side
+ * critical section, after which the caller may go on to modify (both
+ * read and write) the protected data, in an atomic or non-atomic
+ * manner.
+ *
+ * After the necessary updates have been performed, the application
+ * calls rte_seqcount_write_end().
+ *
+ * Multiple, parallel writers must use some external serialization.
+ *
+ * This function is not preemption-safe in the sense that preemption
+ * of the calling thread may block reader progress until the writer
+ * thread is rescheduled.
+ *
+ * @param seqcount
+ *   A pointer to the sequence counter.
+ *
+ * @see rte_seqcount_write_end()
+ */
+
+__rte_experimental
+static inline void
+rte_seqcount_write_begin(rte_seqcount_t *seqcount)
+{
+       uint32_t sn;
+
+       sn = seqcount->sn + 1;
+
+       __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELAXED);
+
+       /* __ATOMIC_RELEASE to prevent stores after (in program order)
+        * from happening before the sn store.
+        */
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * End a write-side critical section.
+ *
+ * A call to this function marks the end of the write-side critical
+ * section, for @p seqcount. After this call has been made, the
+ * protected data may no longer be modified.
+ *
+ * @param seqcount
+ *   A pointer to the sequence counter.
+ *
+ * @see rte_seqcount_write_begin()
+ */
+__rte_experimental
+static inline void
+rte_seqcount_write_end(rte_seqcount_t *seqcount)
+{
+       uint32_t sn;
+
+       sn = seqcount->sn + 1;
+
+       /* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */
+       __atomic_store_n(&seqcount->sn, sn, __ATOMIC_RELEASE);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SEQCOUNT_H_ */
diff --git a/lib/eal/include/rte_seqlock.h b/lib/eal/include/rte_seqlock.h
new file mode 100644 (file)
index 0000000..1663af6
--- /dev/null
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Ericsson AB
+ */
+
+#ifndef _RTE_SEQLOCK_H_
+#define _RTE_SEQLOCK_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Seqlock
+ *
+ * A sequence lock (seqlock) is a synchronization primitive allowing
+ * multiple, parallel, readers to efficiently and safely (i.e., in a
+ * data-race free manner) access lock-protected data. The RTE seqlock
+ * permits multiple writers as well. A spinlock is used for
+ * writer-writer synchronization.
+ *
+ * A reader never blocks a writer. Very high frequency writes may
+ * prevent readers from making progress.
+ *
+ * A seqlock is not preemption-safe on the writer side. If a writer is
+ * preempted, it may block readers until the writer thread is allowed
+ * to continue. Heavy computations should be kept out of the
+ * writer-side critical section, to avoid delaying readers.
+ *
+ * Seqlocks are useful for data which are read by many cores, at a
+ * high frequency, and relatively infrequently written to.
+ *
+ * One way to think about seqlocks is that they provide means to
+ * perform atomic operations on objects larger than what the native
+ * machine instructions allow for.
+ *
+ * To avoid resource reclamation issues, the data protected by a
+ * seqlock should typically be kept self-contained (e.g., no pointers
+ * to mutable, dynamically allocated data).
+ *
+ * Example usage:
+ * @code{.c}
+ * #define MAX_Y_LEN 16
+ * // Application-defined example data structure, protected by a seqlock.
+ * struct config {
+ *         rte_seqlock_t lock;
+ *         int param_x;
+ *         char param_y[MAX_Y_LEN];
+ * };
+ *
+ * // Accessor function for reading config fields.
+ * void
+ * config_read(const struct config *config, int *param_x, char *param_y)
+ * {
+ *         uint32_t sn;
+ *
+ *         do {
+ *                 sn = rte_seqlock_read_begin(&config->lock);
+ *
+ *                 // Loads may be atomic or non-atomic, as in this example.
+ *                 *param_x = config->param_x;
+ *                 strcpy(param_y, config->param_y);
+ *                 // An alternative to an immediate retry is to abort and
+ *                 // try again at some later time, assuming progress is
+ *                 // possible without the data.
+ *         } while (rte_seqlock_read_retry(&config->lock, sn));
+ * }
+ *
+ * // Accessor function for writing config fields.
+ * void
+ * config_update(struct config *config, int param_x, const char *param_y)
+ * {
+ *         rte_seqlock_write_lock(&config->lock);
+ *         // Stores may be atomic or non-atomic, as in this example.
+ *         config->param_x = param_x;
+ *         strcpy(config->param_y, param_y);
+ *         rte_seqlock_write_unlock(&config->lock);
+ * }
+ * @endcode
+ *
+ * In case there is only a single writer, or writer-writer
+ * serialization is provided by other means, the use of sequence lock
+ * (i.e., rte_seqlock_t) can be replaced with the use of the "raw"
+ * rte_seqcount_t type instead.
+ *
+ * @see
+ * https://en.wikipedia.org/wiki/Seqlock.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_compat.h>
+#include <rte_seqcount.h>
+#include <rte_spinlock.h>
+
+/**
+ * The RTE seqlock type.
+ */
+typedef struct {
+       rte_seqcount_t count; /**< Sequence count for the protected data. */
+       rte_spinlock_t lock; /**< Spinlock used to serialize writers. */
+} rte_seqlock_t;
+
+/**
+ * A static seqlock initializer.
+ */
+#define RTE_SEQLOCK_INITIALIZER \
+       {                                                       \
+               .count = RTE_SEQCOUNT_INITIALIZER,              \
+               .lock = RTE_SPINLOCK_INITIALIZER                \
+       }
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Initialize the seqlock.
+ *
+ * This function initializes the seqlock, and leaves the writer-side
+ * spinlock unlocked.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ */
+__rte_experimental
+static inline void
+rte_seqlock_init(rte_seqlock_t *seqlock)
+{
+       rte_seqcount_init(&seqlock->count);
+       rte_spinlock_init(&seqlock->lock);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Begin a read-side critical section.
+ *
+ * See rte_seqcount_read_retry() for details.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ * @return
+ *   The seqlock sequence number for this critical section, to
+ *   later be passed to rte_seqlock_read_retry().
+ *
+ * @see rte_seqlock_read_retry()
+ * @see rte_seqcount_read_retry()
+ */
+
+__rte_experimental
+static inline uint32_t
+rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
+{
+       return rte_seqcount_read_begin(&seqlock->count);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * End a read-side critical section.
+ *
+ * See rte_seqcount_read_retry() for details.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ * @param begin_sn
+ *   The seqlock sequence number returned by rte_seqlock_read_begin().
+ * @return
+ *   true or false, if the just-read seqlock-protected data was
+ *   inconsistent or consistent, respectively, at the time it was
+ *   read.
+ *
+ * @see rte_seqlock_read_begin()
+ */
+__rte_experimental
+static inline bool
+rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
+{
+       return rte_seqcount_read_retry(&seqlock->count, begin_sn);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Begin a write-side critical section.
+ *
+ * A call to this function acquires the write lock associated @p
+ * seqlock, and marks the beginning of a write-side critical section.
+ *
+ * After having called this function, the caller may go on to modify
+ * (both read and write) the protected data, in an atomic or
+ * non-atomic manner.
+ *
+ * After the necessary updates have been performed, the application
+ * calls rte_seqlock_write_unlock().
+ *
+ * This function is not preemption-safe in the sense that preemption
+ * of the calling thread may block reader progress until the writer
+ * thread is rescheduled.
+ *
+ * Unlike rte_seqlock_read_begin(), each call made to
+ * rte_seqlock_write_lock() must be matched with an unlock call.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ *
+ * @see rte_seqlock_write_unlock()
+ */
+__rte_experimental
+static inline void
+rte_seqlock_write_lock(rte_seqlock_t *seqlock)
+{
+       /* To synchronize with other writers. */
+       rte_spinlock_lock(&seqlock->lock);
+
+       rte_seqcount_write_begin(&seqlock->count);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * End a write-side critical section.
+ *
+ * A call to this function marks the end of the write-side critical
+ * section, for @p seqlock. After this call has been made, the protected
+ * data may no longer be modified.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ *
+ * @see rte_seqlock_write_lock()
+ */
+__rte_experimental
+static inline void
+rte_seqlock_write_unlock(rte_seqlock_t *seqlock)
+{
+       rte_seqcount_write_end(&seqlock->count);
+
+       rte_spinlock_unlock(&seqlock->lock);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SEQLOCK_H_ */