Convert rte_atomic usages to compiler atomic built-ins for lcores
sync in spinlock testcases.
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static rte_atomic32_t synchro;
+static uint32_t synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
static int
test_spinlock_per_core(__rte_unused void *arg)
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- while (rte_atomic32_read(&synchro) == 0);
+ rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- rte_atomic32_set(&synchro, 0);
+ __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- rte_atomic32_set(&synchro, 1);
+ __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();