#include <rte_memory.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
-#include <rte_atomic.h>
#include <rte_rwlock.h>
#include <rte_eal.h>
#include <rte_lcore.h>
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static rte_atomic32_t synchro;
+static uint32_t synchro;
enum {
LC_TYPE_RDLOCK,
static struct {
rte_rwlock_t lock;
uint64_t tick;
+
volatile union {
uint8_t u8[RTE_CACHE_LINE_SIZE];
uint64_t u64[RTE_CACHE_LINE_SIZE / sizeof(uint64_t)];
uint64_t lcount = 0;
const unsigned int lcore = rte_lcore_id();
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
- while (rte_atomic32_read(&synchro) == 0)
- ;
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
+ rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
- /* clear synchro and start slaves */
- rte_atomic32_set(&synchro, 0);
- if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+ /* clear synchro and start workers */
+ __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
- /* start synchro and launch test on master */
- rte_atomic32_set(&synchro, 1);
+ /* start synchro and launch test on main */
+ __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
* - There is a global rwlock and a table of rwlocks (one per lcore).
*
* - The test function takes all of these locks and launches the
- * ``test_rwlock_per_core()`` function on each core (except the master).
+ * ``test_rwlock_per_core()`` function on each core (except the main).
*
* - The function takes the global write lock, display something,
* then releases the global lock.
int i;
rte_rwlock_init(&sl);
- for (i=0; i<RTE_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rwlock_init(&sl_tab[i]);
rte_rwlock_write_lock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_rwlock_write_lock(&sl_tab[i]);
rte_eal_remote_launch(test_rwlock_per_core, NULL, i);
}
rte_rwlock_write_unlock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_rwlock_write_unlock(&sl_tab[i]);
rte_delay_ms(100);
}
rte_rwlock_write_lock(&sl);
/* this message should be the last message of test */
- printf("Global write lock taken on master core %u\n", rte_lcore_id());
+ printf("Global write lock taken on main core %u\n", rte_lcore_id());
rte_rwlock_write_unlock(&sl);
rte_eal_mp_wait_lcore();
if (rc != 0)
return rc;
- /* update by bytes in reverese order */
+ /* update by bytes in reverse order */
for (i = RTE_DIM(try_rwlock_data.data.u8); i-- != 0; ) {
/* race condition occurred, lock doesn't work properly */
try_rwlock_data.data.u8[i] = v;
}
- /* restore by bytes in reverese order */
+ /* restore by bytes in reverse order */
for (i = RTE_DIM(try_rwlock_data.data.u8); i-- != 0; ) {
/* race condition occurred, lock doesn't work properly */
{
try_test_reset();
- /* start read test on all avaialble lcores */
- rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MASTER);
+ /* start read test on all available lcores */
+ rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MAIN);
rte_eal_mp_wait_lcore();
return process_try_lcore_stats();
}
-/* all slave lcores grab RDLOCK, master one grabs WRLOCK */
+/* all worker lcores grab RDLOCK, main one grabs WRLOCK */
static int
try_rwlock_test_rds_wrm(void)
{
try_test_reset();
- rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MAIN);
try_write_lcore(NULL);
rte_eal_mp_wait_lcore();
return process_try_lcore_stats();
}
-/* master and even slave lcores grab RDLOCK, odd lcores grab WRLOCK */
+/* main and even worker lcores grab RDLOCK, odd lcores grab WRLOCK */
static int
try_rwlock_test_rde_wro(void)
{
try_test_reset();
- mlc = rte_get_master_lcore();
+ mlc = rte_get_main_lcore();
RTE_LCORE_FOREACH(lc) {
if (lc != mlc) {