/**
* Stress test for ring enqueue/dequeue operations.
- * Performs the following pattern on each slave worker:
+ * Performs the following pattern on each worker:
* dequeue/read-write data from the dequeued objects/enqueue.
* Serves as both functional and performance test of ring
* enqueue/dequeue operations under high contention
WRK_CMD_RUN,
};
-static volatile uint32_t wrk_cmd __rte_cache_aligned;
+static uint32_t wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
"offending object: %p\n",
__func__, rte_lcore_id(), num, i, elm[i]);
rte_memdump(stdout, "expected", check, sizeof(*check));
- rte_memdump(stdout, "result", elm[i], sizeof(elm[i]));
+ rte_memdump(stdout, "result", elm[i], sizeof(*elm[i]));
rte_spinlock_unlock(&dump_lock);
return -EINVAL;
}
fill_ring_elm(&def_elm, UINT32_MAX);
fill_ring_elm(&loc_elm, lc);
- while (wrk_cmd != WRK_CMD_RUN) {
- rte_smp_rmb();
+ /* Acquire ordering is not required as the main is not
+ * really releasing any data through 'wrk_cmd' to
+ * the worker.
+ */
+ while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
rte_pause();
- }
cl = rte_rdtsc_precise();
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (wrk_cmd == WRK_CMD_RUN);
+ } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
memset(arg, 0, sizeof(arg));
- /* launch on all slaves */
- RTE_LCORE_FOREACH_SLAVE(lc) {
+ /* launch on all workers */
+ RTE_LCORE_FOREACH_WORKER(lc) {
arg[lc].rng = r;
arg[lc].stats = init_stat;
rte_eal_remote_launch(test, &arg[lc], lc);
}
/* signal worker to start test */
- wrk_cmd = WRK_CMD_RUN;
- rte_smp_wmb();
+ __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
- usleep(run_time * US_PER_S);
+ rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- wrk_cmd = WRK_CMD_STOP;
- rte_smp_wmb();
+ __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
- /* wait for slaves and collect stats. */
+ /* wait for workers and collect stats. */
mc = rte_lcore_id();
arg[mc].stats = init_stat;
rc = 0;
- RTE_LCORE_FOREACH_SLAVE(lc) {
+ RTE_LCORE_FOREACH_WORKER(lc) {
rc |= rte_eal_wait_lcore(lc);
lcore_stat_aggr(&arg[mc].stats, &arg[lc].stats);
if (verbose != 0)