rte_exit(EXIT_FAILURE, "failed to wait\n");
}
- /* master core */
+ /* main core */
while (!quit_signal)
;
}
if (config.trace_file != NULL)
tracef_init();
- RTE_LCORE_FOREACH_SLAVE(lcore)
+ RTE_LCORE_FOREACH_WORKER(lcore)
rte_eal_remote_launch(search_ip5tuples, NULL, lcore);
search_ip5tuples(NULL);
rte_atomic16_set(&op_params->sync, SYNC_WAIT);
- /* Master core is set at first entry */
+ /* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
t_params[0].lcore_id = rte_lcore_id();
t_params[0].op_params = op_params;
t_params[0].queue_id = ad->queue_ids[used_cores++];
t_params[0].iter_count = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (used_cores >= num_lcores)
break;
rte_atomic16_set(&op_params->sync, SYNC_START);
ret = bler_function(&t_params[0]);
- /* Master core is always used */
+ /* Main core is always used */
for (used_cores = 1; used_cores < num_lcores; used_cores++)
ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
rte_atomic16_set(&op_params->sync, SYNC_WAIT);
- /* Master core is set at first entry */
+ /* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
t_params[0].lcore_id = rte_lcore_id();
t_params[0].op_params = op_params;
t_params[0].queue_id = ad->queue_ids[used_cores++];
t_params[0].iter_count = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (used_cores >= num_lcores)
break;
rte_atomic16_set(&op_params->sync, SYNC_START);
ret = throughput_function(&t_params[0]);
- /* Master core is always used */
+ /* Main core is always used */
for (used_cores = 1; used_cores < num_lcores; used_cores++)
ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
/* In interrupt TC we need to wait for the interrupt callback to deqeue
* all pending operations. Skip waiting for queues which reported an
* error using processing_status variable.
- * Wait for master lcore operations.
+ * Wait for main lcore operations.
*/
tp = &t_params[0];
while ((rte_atomic16_read(&tp->nb_dequeued) <
tp->mbps /= TEST_REPETITIONS;
ret |= (int)rte_atomic16_read(&tp->processing_status);
- /* Wait for slave lcores operations */
+ /* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
i = 0;
uint8_t qp_id = 0, cdev_index = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
while (test_data->level <= test_data->level_lst.max) {
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
i++;
}
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
case ST_DURING_TEST:
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
i = 0;
uint8_t qp_id = 0, cdev_index = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
distribution_total[buffer_size_count - 1];
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
i++;
}
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
while (opts.test_buffer_size <= opts.max_buffer_size) {
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
i++;
}
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
}
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
err:
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (i == total_nb_qps)
break;
evt_dump("verbose_level", "%d", opt->verbose_level);
evt_dump("socket_id", "%d", opt->socket_id);
evt_dump("pool_sz", "%d", opt->pool_sz);
- evt_dump("master lcore", "%d", rte_get_master_lcore());
+ evt_dump("main lcore", "%d", rte_get_main_lcore());
evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
evt_dump("nb_timers", "%"PRIu64, opt->nb_timers);
evt_dump_begin("available lcores");
return -1;
}
- /* 1 producer + N workers + 1 master */
+ /* 1 producer + N workers + main */
if (rte_lcore_count() < 3) {
evt_err("test need minimum 3 lcores");
return -1;
}
/* Validate worker lcores */
- if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
- evt_err("worker lcores overlaps with master lcore");
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+ evt_err("worker lcores overlaps with main lcore");
return -1;
}
}
/* Validate producer lcore */
- if (plcore == (int)rte_get_master_lcore()) {
- evt_err("producer lcore and master lcore should be different");
+ if (plcore == (int)rte_get_main_lcore()) {
+ evt_err("producer lcore and main lcore should be different");
return -1;
}
if (!rte_lcore_is_enabled(plcore)) {
int wkr_idx = 0;
/* launch workers */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (!(opt->wlcores[lcore_id]))
continue;
int port_idx = 0;
/* launch workers */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (!(opt->wlcores[lcore_id]))
continue;
}
/* launch producers */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (!(opt->plcores[lcore_id]))
continue;
{
unsigned int lcores;
- /* N producer + N worker + 1 master when producer cores are used
- * Else N worker + 1 master when Rx adapter is used
+ /* N producer + N worker + main when producer cores are used
+ * Else N worker + main when Rx adapter is used
*/
lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
}
/* Validate worker lcores */
- if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
- evt_err("worker lcores overlaps with master lcore");
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+ evt_err("worker lcores overlaps with main lcore");
return -1;
}
if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
/* Validate producer lcores */
if (evt_lcores_has_overlap(opt->plcores,
- rte_get_master_lcore())) {
- evt_err("producer lcores overlaps with master lcore");
+ rte_get_main_lcore())) {
+ evt_err("producer lcores overlaps with main lcore");
return -1;
}
if (evt_has_disabled_lcore(opt->plcores)) {
int port_idx = 0;
/* launch workers */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (!(opt->wlcores[lcore_id]))
continue;
pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
{
unsigned int lcores;
- /*
- * N worker + 1 master
- */
+
+ /* N worker + main */
lcores = 2;
if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
}
/* Validate worker lcores */
- if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
- evt_err("worker lcores overlaps with master lcore");
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+ evt_err("worker lcores overlaps with main lcore");
return -1;
}
if (evt_has_disabled_lcore(opt->wlcores)) {
if (enable_fwd) {
init_lcore_info();
- rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MAIN);
}
RTE_ETH_FOREACH_DEV(port) {
app_init();
/* Launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore) {
if (rte_eal_wait_lcore(lcore) < 0)
return -1;
}
printf("Read CLI commands from %s\n", filename);
}
-/* prompt function, called from main on MASTER lcore */
+/* prompt function, called from main on MAIN lcore */
void
prompt(void)
{
printf("lcore %u not enabled\n", lcore_cpuid);
return -1;
}
- if (lcore_cpuid == rte_get_master_lcore()) {
+ if (lcore_cpuid == rte_get_main_lcore()) {
printf("lcore %u cannot be masked on for running "
- "packet forwarding, which is the master lcore "
+ "packet forwarding, which is the main lcore "
"and reserved for command line parsing only\n",
lcore_cpuid);
return -1;
printf(" --nb-ports=N: set the number of forwarding ports "
"(1 <= N <= %d).\n", nb_ports);
printf(" --coremask=COREMASK: hexadecimal bitmask of cores running "
- "the packet forwarding test. The master lcore is reserved for "
+ "the packet forwarding test. The main lcore is reserved for "
"command line parsing only, and cannot be masked on for "
"packet forwarding.\n");
printf(" --portmask=PORTMASK: hexadecimal bitmask of ports used "
uint16_t verbose_level = 0; /**< Silent by default. */
int testpmd_logtype; /**< Log type for testpmd logs */
-/* use master core for command line ? */
+/* use main core for command line ? */
uint8_t interactive = 0;
uint8_t auto_start = 0;
uint8_t tx_first;
}
socket_ids[num_sockets++] = sock_num;
}
- if (i == rte_get_master_lcore())
+ if (i == rte_get_main_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
}
add_rules(sad, 10);
if (config.parallel_lookup)
- rte_eal_mp_remote_launch(lookup, sad, SKIP_MASTER);
+ rte_eal_mp_remote_launch(lookup, sad, SKIP_MAIN);
lookup(sad);
if (config.parallel_lookup)
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
- "Global write lock taken on master "
+ "Global write lock taken on main "
"core ([0-9]*)",
pexpect.TIMEOUT], timeout=10)
# ok
['cycles_autotest', true],
['debug_autotest', true],
['eal_flags_c_opt_autotest', false],
- ['eal_flags_master_opt_autotest', false],
+ ['eal_flags_main_opt_autotest', false],
['eal_flags_n_opt_autotest', false],
['eal_flags_hpet_autotest', false],
['eal_flags_no_huge_autotest', false],
#endif
#endif
{ "test_missing_c_flag", no_action },
- { "test_master_lcore_flag", no_action },
+ { "test_main_lcore_flag", no_action },
{ "test_invalid_n_flag", no_action },
{ "test_no_hpet_flag", no_action },
{ "test_whitelist_flag", no_action },
printf("usual inc/dec/add/sub functions\n");
- rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_set(&synchro, 0);
rte_atomic32_set(&a32, 0);
rte_atomic16_set(&a16, 0);
rte_atomic64_set(&count, 0);
- rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_set(&synchro, 0);
rte_atomic16_set(&a16, 0);
rte_atomic64_set(&count, 0);
rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_set(&synchro, 0);
}
/*
- * Set a64, a32 and a16 with the same value of minus "number of slave
- * lcores", launch all slave lcores to atomically increase by one and
+ * Set a64, a32 and a16 with the same value of minus "number of worker
+ * lcores", launch all worker lcores to atomically increase by one and
* test them respectively.
* Each lcore should have only one chance to increase a64 by one and
* then check if it is equal to 0, but there should be only one lcore
* Then a variable of "count", initialized to zero, is increased by
* one if a64, a32 or a16 is 0 after being increased and tested
* atomically.
- * We can check if "count" is finally equal to 3 to see if all slave
+ * We can check if "count" is finally equal to 3 to see if all worker
* lcores performed "atomic inc and test" right.
*/
printf("inc and test\n");
rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
- rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
}
/*
- * Same as above, but this time we set the values to "number of slave
+ * Same as above, but this time we set the values to "number of worker
* lcores", and decrement instead of increment.
*/
printf("dec and test\n");
rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
- rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
/*
* This case tests the functionality of rte_atomic128_cmp_exchange
* API. It calls rte_atomic128_cmp_exchange with four kinds of memory
- * models successively on each slave core. Once each 128-bit atomic
+ * models successively on each worker core. Once each 128-bit atomic
* compare and swap operation is successful, it updates the global
* 128-bit counter by 2 for the first 64-bit and 1 for the second
- * 64-bit. Each slave core iterates this test N times.
+ * 64-bit. Each worker core iterates this test N times.
* At the end of test, verify whether the first 64-bits of the 128-bit
* counter and the second 64bits is differ by the total iterations. If
* it is, the test passes.
count128.val[1] = 0;
rte_eal_mp_remote_launch(test_atomic128_cmp_exchange, NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
token64 = ((uint64_t)get_crc8(&t.u8[0], sizeof(token64) - 1) << 56)
| (t.u64 & 0x00ffffffffffffff);
- rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MAIN);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
/* test phase - start and wait for completion on each active lcore */
- rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MASTER);
+ rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MAIN);
rte_eal_mp_wait_lcore();
/* validation phase - make sure that shared and local data match */
" servitude: I will no longer endure it, though yet I\n"
" know no wise remedy how to avoid it.\n"
"\n"
- "ADAM Yonder comes my master, your brother.\n"
+ "ADAM Yonder comes my main, your brother.\n"
"\n"
"ORLANDO Go apart, Adam, and thou shalt hear how he will\n";
/* Identify the Worker Cores
* Use 2 worker cores for the device args
*/
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
if (worker_core_count > 1)
break;
snprintf(vdev_args, sizeof(vdev_args),
sizeof(worker_params.name));
rte_eal_mp_remote_launch(handle_work,
- &worker_params, SKIP_MASTER);
+ &worker_params, SKIP_MAIN);
if (sanity_test(&worker_params, p) < 0)
goto err;
quit_workers(&worker_params, p);
rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
- &worker_params, SKIP_MASTER);
+ &worker_params, SKIP_MAIN);
if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
goto err;
quit_workers(&worker_params, p);
if (rte_lcore_count() > 2) {
rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
&worker_params,
- SKIP_MASTER);
+ SKIP_MAIN);
if (sanity_test_with_worker_shutdown(&worker_params,
p) < 0)
goto err;
rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
&worker_params,
- SKIP_MASTER);
+ SKIP_MAIN);
if (test_flush_with_worker_shutdown(&worker_params,
p) < 0)
goto err;
quit_workers(&worker_params, p);
rte_eal_mp_remote_launch(handle_and_mark_work,
- &worker_params, SKIP_MASTER);
+ &worker_params, SKIP_MAIN);
if (sanity_mark_test(&worker_params, p) < 0)
goto err;
quit_workers(&worker_params, p);
/* allocate a full cache line for data, we use only first byte of it */
uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
- unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
+ unsigned int i, workerid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
volatile uint64_t *pdata = &data[0];
*pdata = 1;
- rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
+ rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], workerid);
while (*pdata)
rte_pause();
while (*pdata)
rte_pause();
*pdata = 2;
- rte_eal_wait_lcore(slaveid);
+ rte_eal_wait_lcore(workerid);
printf("==== Cache line switch test ===\n");
printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER_CL),
end_time-start_time);
}
printf("=== Performance test of distributor (single mode) ===\n");
- rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER);
+ rte_eal_mp_remote_launch(handle_work, ds, SKIP_MAIN);
if (perf_test(ds, p) < 0)
return -1;
quit_workers(ds, p);
printf("=== Performance test of distributor (burst mode) ===\n");
- rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER);
+ rte_eal_mp_remote_launch(handle_work, db, SKIP_MAIN);
if (perf_test(db, p) < 0)
return -1;
quit_workers(db, p);
}
/*
- * Test --master-lcore option with matching coremask
+ * Test --main-lcore option with matching coremask
*/
static int
-test_master_lcore_flag(void)
+test_main_lcore_flag(void)
{
#ifdef RTE_EXEC_ENV_FREEBSD
/* BSD target doesn't support prefixes at this point */
if (!rte_lcore_is_enabled(0) || !rte_lcore_is_enabled(1))
return TEST_SKIPPED;
- /* --master-lcore flag but no value */
+ /* --main-lcore flag but no value */
const char *argv1[] = { prgname, prefix, mp_flag,
- "-c", "3", "--master-lcore"};
- /* --master-lcore flag with invalid value */
+ "-c", "3", "--main-lcore"};
+ /* --main-lcore flag with invalid value */
const char *argv2[] = { prgname, prefix, mp_flag,
- "-c", "3", "--master-lcore", "-1"};
+ "-c", "3", "--main-lcore", "-1"};
const char *argv3[] = { prgname, prefix, mp_flag,
- "-c", "3", "--master-lcore", "X"};
- /* master lcore not in coremask */
+ "-c", "3", "--main-lcore", "X"};
+ /* main lcore not in coremask */
const char *argv4[] = { prgname, prefix, mp_flag,
- "-c", "3", "--master-lcore", "2"};
+ "-c", "3", "--main-lcore", "2"};
/* valid value */
const char *argv5[] = { prgname, prefix, mp_flag,
- "-c", "3", "--master-lcore", "1"};
+ "-c", "3", "--main-lcore", "1"};
/* valid value set before coremask */
const char *argv6[] = { prgname, prefix, mp_flag,
- "--master-lcore", "1", "-c", "3"};
+ "--main-lcore", "1", "-c", "3"};
if (launch_proc(argv1) == 0
|| launch_proc(argv2) == 0
|| launch_proc(argv3) == 0
|| launch_proc(argv4) == 0) {
- printf("Error - process ran without error with wrong --master-lcore\n");
+ printf("Error - process ran without error with wrong --main-lcore\n");
return -1;
}
if (launch_proc(argv5) != 0
|| launch_proc(argv6) != 0) {
- printf("Error - process did not run ok with valid --master-lcore\n");
+ printf("Error - process did not run ok with valid --main-lcore\n");
return -1;
}
return 0;
return ret;
}
- ret = test_master_lcore_flag();
+ ret = test_main_lcore_flag();
if (ret < 0) {
- printf("Error in test_master_lcore_flag()\n");
+ printf("Error in test_main_lcore_flag()\n");
return ret;
}
/* subtests used in meson for CI */
REGISTER_TEST_COMMAND(eal_flags_c_opt_autotest, test_missing_c_flag);
-REGISTER_TEST_COMMAND(eal_flags_master_opt_autotest, test_master_lcore_flag);
+REGISTER_TEST_COMMAND(eal_flags_main_opt_autotest, test_main_lcore_flag);
REGISTER_TEST_COMMAND(eal_flags_n_opt_autotest, test_invalid_n_flag);
REGISTER_TEST_COMMAND(eal_flags_hpet_autotest, test_no_hpet_flag);
REGISTER_TEST_COMMAND(eal_flags_no_huge_autotest, test_no_huge_flag);
{
uint8_t all_cpu_sockets_bitmask = 0;
unsigned int i;
- unsigned int next_lcore = rte_get_master_lcore();
+ unsigned int next_lcore = rte_get_main_lcore();
const int val_true = 1, val_false = 0;
for (i = 0; i < rte_lcore_count(); i++) {
all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
{
uint8_t all_cpu_sockets_bitmask = 0;
unsigned int i;
- unsigned int next_lcore = rte_get_master_lcore();
+ unsigned int next_lcore = rte_get_main_lcore();
const int val_true = 1, val_false = 0;
for (i = 0; i < rte_lcore_count(); i++) {
all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
-#define WAIT_SYNCHRO_FOR_SLAVES() do{ \
- if (lcore_self != rte_get_master_lcore()) \
+#define WAIT_SYNCHRO_FOR_WORKERS() do { \
+ if (lcore_self != rte_get_main_lcore()) \
while (rte_atomic32_read(&synchro) == 0); \
} while(0)
{
unsigned lcore_self = rte_lcore_id();
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
if (rte_eal_init(0, NULL) != -1)
char ring_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same ring simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
char mempool_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same mempool simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
.socket_id = 0,
};
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same hash simultaneously on all threads */
hash_params.name = "fr_test_once";
.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
};
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same fbk hash table simultaneously on all threads */
fbk_params.name = "fr_test_once";
char lpm_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same lpm simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
rte_atomic32_set(&obj_count, 0);
rte_atomic32_set(&synchro, 0);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
ret = -1;
cores = cores_save;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
/*
* Calculate offset for entries based on the position of the
- * logical core, from the master core (not counting not enabled cores)
+ * logical core, from the main core (not counting not enabled cores)
*/
offset = pos_core * tbl_multiwriter_test_params.nb_tsx_insertion;
/* Fire all threads. */
rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
- enabled_core_ids, CALL_MASTER);
+ enabled_core_ids, CALL_MAIN);
rte_eal_mp_wait_lcore();
count = rte_hash_count(handle);
#define NUM_TEST 3
unsigned int core_cnt[NUM_TEST] = {2, 4, 8};
-unsigned int slave_core_ids[RTE_MAX_LCORE];
+unsigned int worker_core_ids[RTE_MAX_LCORE];
struct perf {
uint32_t single_read;
uint32_t single_write;
ret = rte_malloc(NULL, sizeof(int) *
tbl_rw_test_param.num_insert, 0);
for (i = 0; i < rte_lcore_count(); i++) {
- if (slave_core_ids[i] == lcore_id)
+ if (worker_core_ids[i] == lcore_id)
break;
}
offset = tbl_rw_test_param.num_insert * i;
uint32_t duplicated_keys = 0;
uint32_t lost_keys = 0;
int use_jhash = 1;
- int slave_cnt = rte_lcore_count() - 1;
+ int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
rte_atomic64_init(&gcycles);
tot_insert = TOTAL_INSERT;
tbl_rw_test_param.num_insert =
- tot_insert / slave_cnt;
+ tot_insert / worker_cnt;
tbl_rw_test_param.rounded_tot_insert =
- tbl_rw_test_param.num_insert
- * slave_cnt;
+ tbl_rw_test_param.num_insert * worker_cnt;
printf("\nHTM = %d, RW-LF = %d, EXT-Table = %d\n",
use_htm, use_rw_lf, use_ext);
/* Fire all threads. */
rte_eal_mp_remote_launch(test_hash_readwrite_worker,
- NULL, SKIP_MASTER);
+ NULL, SKIP_MAIN);
rte_eal_mp_wait_lcore();
while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
uint64_t offset;
for (i = 0; i < rte_lcore_count(); i++) {
- if (slave_core_ids[i] == lcore_id)
+ if (worker_core_ids[i] == lcore_id)
break;
}
perf_results->single_read = end / i;
for (n = 0; n < NUM_TEST; n++) {
- unsigned int tot_slave_lcore = rte_lcore_count() - 1;
- if (tot_slave_lcore < core_cnt[n] * 2)
+ unsigned int tot_worker_lcore = rte_lcore_count() - 1;
+ if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
rte_atomic64_clear(&greads);
for (i = 0; i < core_cnt[n]; i++)
rte_eal_remote_launch(test_rw_reader,
(void *)(uintptr_t)read_cnt,
- slave_core_ids[i]);
+ worker_core_ids[i]);
rte_eal_mp_wait_lcore();
for (; i < core_cnt[n] * 2; i++)
rte_eal_remote_launch(test_rw_writer,
(void *)((uintptr_t)start_coreid),
- slave_core_ids[i]);
+ worker_core_ids[i]);
rte_eal_mp_wait_lcore();
for (i = core_cnt[n]; i < core_cnt[n] * 2; i++)
rte_eal_remote_launch(test_rw_writer,
(void *)((uintptr_t)start_coreid),
- slave_core_ids[i]);
+ worker_core_ids[i]);
for (i = 0; i < core_cnt[n]; i++)
rte_eal_remote_launch(test_rw_reader,
(void *)(uintptr_t)read_cnt,
- slave_core_ids[i]);
+ worker_core_ids[i]);
} else {
for (i = 0; i < core_cnt[n]; i++)
rte_eal_remote_launch(test_rw_reader,
(void *)(uintptr_t)read_cnt,
- slave_core_ids[i]);
+ worker_core_ids[i]);
for (; i < core_cnt[n] * 2; i++)
rte_eal_remote_launch(test_rw_writer,
(void *)((uintptr_t)start_coreid),
- slave_core_ids[i]);
+ worker_core_ids[i]);
}
rte_eal_mp_wait_lcore();
return TEST_SKIPPED;
}
- RTE_LCORE_FOREACH_SLAVE(core_id) {
- slave_core_ids[i] = core_id;
+ RTE_LCORE_FOREACH_WORKER(core_id) {
+ worker_core_ids[i] = core_id;
i++;
}
return TEST_SKIPPED;
}
- RTE_LCORE_FOREACH_SLAVE(core_id) {
- slave_core_ids[i] = core_id;
+ RTE_LCORE_FOREACH_WORKER(core_id) {
+ worker_core_ids[i] = core_id;
i++;
}
.config_promiscusity = NULL,
};
-static unsigned lcore_master, lcore_ingress, lcore_egress;
+static unsigned int lcore_main, lcore_ingress, lcore_egress;
static struct rte_kni *test_kni_ctx;
static struct test_kni_stats stats;
* supported by KNI kernel module. The ingress lcore will allocate mbufs and
* transmit them to kernel space; while the egress lcore will receive the mbufs
* from kernel space and free them.
- * On the master lcore, several commands will be run to check handling the
+ * On the main lcore, several commands will be run to check handling the
* kernel requests. And it will finally set the flag to exit the KNI
* transmitting/receiving to/from the kernel space.
*
const unsigned lcore_id = rte_lcore_id();
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
- if (lcore_id == lcore_master) {
+ if (lcore_id == lcore_main) {
rte_delay_ms(KNI_TIMEOUT_MS);
/* tests of handling kernel request */
if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
{
unsigned i, count = 0;
- lcore_master = rte_get_master_lcore();
- printf("master lcore: %u\n", lcore_master);
+ lcore_main = rte_get_main_lcore();
+ printf("main lcore: %u\n", lcore_main);
for (i = 0; i < RTE_MAX_LCORE; i++) {
if (count >=2 )
break;
- if (rte_lcore_is_enabled(i) && i != lcore_master) {
+ if (rte_lcore_is_enabled(i) && i != lcore_main) {
count ++;
if (count == 1)
lcore_ingress = i;
if (ret != 0)
goto fail_kni;
- rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(i) {
if (rte_eal_wait_lcore(i) < 0) {
ret = -1;
goto fail_kni;
}
num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
+ RTE_LCORE_FOREACH_WORKER(core_id) {
enabled_core_ids[num_cores] = core_id;
num_cores++;
}
}
num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
+ RTE_LCORE_FOREACH_WORKER(core_id) {
enabled_core_ids[num_cores] = core_id;
num_cores++;
}
else printf("test_realloc() passed\n");
/*----------------------------*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id);
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
}
else printf("test_align_overlap_per_lcore() passed\n");
/*----------------------------*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id);
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
}
else printf("test_reordered_free_per_lcore() passed\n");
/*----------------------------*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(test_random_alloc_free, NULL, lcore_id);
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
}
#ifdef RTE_MBUF_REFCNT_ATOMIC
-static volatile uint32_t refcnt_stop_slaves;
+static volatile uint32_t refcnt_stop_workers;
static unsigned refcnt_lcore[RTE_MAX_LCORE];
#endif
#ifdef RTE_MBUF_REFCNT_ATOMIC
static int
-test_refcnt_slave(void *arg)
+test_refcnt_worker(void *arg)
{
unsigned lcore, free;
void *mp = 0;
printf("%s started at lcore %u\n", __func__, lcore);
free = 0;
- while (refcnt_stop_slaves == 0) {
+ while (refcnt_stop_workers == 0) {
if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
free++;
rte_pktmbuf_free(mp);
/* For each mbuf in the pool:
* - allocate mbuf,
* - increment it's reference up to N+1,
- * - enqueue it N times into the ring for slave cores to free.
+ * - enqueue it N times into the ring for worker cores to free.
*/
for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
rte_panic("(lcore=%u, iter=%u): was able to allocate only "
"%u from %u mbufs\n", lcore, iter, i, n);
- /* wait till slave lcores will consume all mbufs */
+ /* wait till worker lcores will consume all mbufs */
while (!rte_ring_empty(refcnt_mbuf_ring))
;
}
static int
-test_refcnt_master(struct rte_mempool *refcnt_pool,
+test_refcnt_main(struct rte_mempool *refcnt_pool,
struct rte_ring *refcnt_mbuf_ring)
{
unsigned i, lcore;
for (i = 0; i != REFCNT_MAX_ITER; i++)
test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
- refcnt_stop_slaves = 1;
+ refcnt_stop_workers = 1;
rte_wmb();
printf("%s finished at lcore %u\n", __func__, lcore);
test_refcnt_mbuf(void)
{
#ifdef RTE_MBUF_REFCNT_ATOMIC
- unsigned int master, slave, tref;
+ unsigned int main_lcore, worker, tref;
int ret = -1;
struct rte_mempool *refcnt_pool = NULL;
struct rte_ring *refcnt_mbuf_ring = NULL;
SOCKET_ID_ANY);
if (refcnt_pool == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
- __func__);
+ __func__);
return -1;
}
refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
- rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
- RING_F_SP_ENQ);
+ rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
+ RING_F_SP_ENQ);
if (refcnt_mbuf_ring == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
- "\n", __func__);
+ "\n", __func__);
goto err;
}
- refcnt_stop_slaves = 0;
+ refcnt_stop_workers = 0;
memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
- rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
- SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
- test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
+ test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
rte_eal_mp_wait_lcore();
/* check that we porcessed all references */
tref = 0;
- master = rte_get_master_lcore();
+ main_lcore = rte_get_main_lcore();
- RTE_LCORE_FOREACH_SLAVE(slave)
- tref += refcnt_lcore[slave];
+ RTE_LCORE_FOREACH_WORKER(worker)
+ tref += refcnt_lcore[worker];
- if (tref != refcnt_lcore[master])
+ if (tref != refcnt_lcore[main_lcore])
rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
- tref, refcnt_lcore[master]);
+ tref, refcnt_lcore[main_lcore]);
rte_mempool_dump(stdout, refcnt_pool);
rte_ring_dump(stdout, refcnt_mbuf_ring);
* These tests are derived from spin lock test cases.
*
* - The functional test takes all of these locks and launches the
- * ''test_mcslock_per_core()'' function on each core (except the master).
+ * ''test_mcslock_per_core()'' function on each core (except the main).
*
* - The function takes the global lock, display something, then releases
* the global lock on each core.
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
rte_atomic32_set(&synchro, 0);
- rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+ rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
load_loop_fn(&lock);
rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
- /* Locked ml_try in the master lcore, so it should fail
- * when trying to lock it in the slave lcore.
+ /* Locked ml_try in the main lcore, so it should fail
+ * when trying to lock it in the worker lcore.
*/
if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {
rte_mcslock_lock(&p_ml, &ml_me);
* Test mcs lock & unlock on each core
*/
- /* slave cores should be waiting: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be waiting: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_mcslock_lock(&p_ml, &ml_me);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_mcslock_per_core, NULL, i);
}
- /* slave cores should be busy: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be busy: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
/*
* Test if it could return immediately from try-locking a locked object.
* Here it will lock the mcs lock object first, then launch all the
- * slave lcores to trylock the same mcs lock object.
- * All the slave lcores should give up try-locking a locked object and
+ * worker lcores to trylock the same mcs lock object.
+ * All the worker lcores should give up try-locking a locked object and
* return immediately, and then increase the "count" initialized with
* zero by one per times.
* We can check if the "count" is finally equal to the number of all
- * slave lcores to see if the behavior of try-locking a locked
+ * worker lcores to see if the behavior of try-locking a locked
* mcslock object is correct.
*/
if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)
return -1;
count = 0;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_mcslock_try, NULL, i);
}
rte_eal_mp_wait_lcore();
stats[lcore_id].enq_count = 0;
- /* wait synchro for slaves */
- if (lcore_id != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore_id != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0);
start_cycles = rte_get_timer_cycles();
return -1;
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
mp, lcore_id);
}
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
ret = per_lcore_mempool_test(mp);
cores = cores_save;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
#endif
snprintf(coremask, sizeof(coremask), "%x", \
- (1 << rte_get_master_lcore()));
+ (1 << rte_get_main_lcore()));
ret |= launch_proc(argv1);
printf("### Testing rte_mp_disable() reject:\n");
};
snprintf(coremask, sizeof(coremask), "%x",
- (1 << rte_get_master_lcore()));
+ (1 << rte_get_main_lcore()));
ret = test_pdump_init();
ret |= launch_p(argv1);
unsigned lcore_id;
int ret;
- rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
- rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
/* test if it could do remote launch twice at the same time or not */
- ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+ ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
if (ret < 0) {
printf("It fails to do remote launch but it should able to do\n");
return -1;
}
/* it should not be able to launch a lcore which is running */
- ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+ ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
if (ret == 0) {
printf("It does remote launch successfully but it should not at this time\n");
return -1;
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (LCORE_AVAIL != lcore_conf[lcore_id].status ||
lcore_conf[lcore_id].socketid != socketid ||
- lcore_id == rte_get_master_lcore())
+ lcore_id == rte_get_main_lcore())
continue;
lcore_conf[lcore_id].status = LCORE_USED;
lcore_conf[lcore_id].nb_ports = 0;
static int
test_pmd_perf(void)
{
- uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1;
+ uint16_t nb_ports, num, nb_lcores, worker_id = (uint16_t)-1;
uint16_t nb_rxd = MAX_TRAFFIC_BURST;
uint16_t nb_txd = MAX_TRAFFIC_BURST;
uint16_t portid;
RTE_ETH_FOREACH_DEV(portid) {
if (socketid == -1) {
socketid = rte_eth_dev_socket_id(portid);
- slave_id = alloc_lcore(socketid);
- if (slave_id == (uint16_t)-1) {
+ worker_id = alloc_lcore(socketid);
+ if (worker_id == (uint16_t)-1) {
printf("No avail lcore to run test\n");
return -1;
}
printf("Performance test runs on lcore %u socket %u\n",
- slave_id, socketid);
+ worker_id, socketid);
}
if (socketid != rte_eth_dev_socket_id(portid)) {
"rte_eth_promiscuous_enable: err=%s, port=%d\n",
rte_strerror(-ret), portid);
- lcore_conf[slave_id].portlist[num++] = portid;
- lcore_conf[slave_id].nb_ports++;
+ lcore_conf[worker_id].portlist[num++] = portid;
+ lcore_conf[worker_id].nb_ports++;
}
check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
if (NULL == do_measure)
do_measure = measure_rxtx;
- rte_eal_remote_launch(main_loop, NULL, slave_id);
+ rte_eal_remote_launch(main_loop, NULL, worker_id);
- if (rte_eal_wait_lcore(slave_id) < 0)
+ if (rte_eal_wait_lcore(worker_id) < 0)
return -1;
} else if (sc_flag == SC_BURST_POLL_FIRST ||
sc_flag == SC_BURST_XMIT_FIRST)
- if (exec_burst(sc_flag, slave_id) < 0)
+ if (exec_burst(sc_flag, worker_id) < 0)
return -1;
/* port tear down */
}
num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
+ RTE_LCORE_FOREACH_WORKER(core_id) {
enabled_core_ids[num_cores] = core_id;
num_cores++;
}
rte_atomic64_init(&check_cycles);
num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
+ RTE_LCORE_FOREACH_WORKER(core_id) {
enabled_core_ids[num_cores] = core_id;
num_cores++;
}
lcore_count = 0;
param1.size = param2.size = bulk_sizes[i];
param1.r = param2.r = r;
- if (cores->c1 == rte_get_master_lcore()) {
+ if (cores->c1 == rte_get_main_lcore()) {
rte_eal_remote_launch(f2, ¶m2, cores->c2);
f1(¶m1);
rte_eal_wait_lcore(cores->c2);
if (burst == NULL)
return -1;
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0)
rte_pause();
param.size = bulk_sizes[i];
param.r = r;
- /* clear synchro and start slaves */
+ /* clear synchro and start workers */
rte_atomic32_set(&synchro, 0);
- if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MASTER) < 0)
+ if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
lcore_f(¶m);
goto test_fail;
}
- printf("\n### Testing using all slave nodes ###\n");
+ printf("\n### Testing using all worker nodes ###\n");
if (run_on_all_cores(r, esize) < 0)
goto test_fail;
/**
* Stress test for ring enqueue/dequeue operations.
- * Performs the following pattern on each slave worker:
+ * Performs the following pattern on each worker:
* dequeue/read-write data from the dequeued objects/enqueue.
* Serves as both functional and performance test of ring
* enqueue/dequeue operations under high contention
memset(arg, 0, sizeof(arg));
- /* launch on all slaves */
- RTE_LCORE_FOREACH_SLAVE(lc) {
+ /* launch on all workers */
+ RTE_LCORE_FOREACH_WORKER(lc) {
arg[lc].rng = r;
arg[lc].stats = init_stat;
rte_eal_remote_launch(test, &arg[lc], lc);
wrk_cmd = WRK_CMD_STOP;
rte_smp_wmb();
- /* wait for slaves and collect stats. */
+ /* wait for workers and collect stats. */
mc = rte_lcore_id();
arg[mc].stats = init_stat;
rc = 0;
- RTE_LCORE_FOREACH_SLAVE(lc) {
+ RTE_LCORE_FOREACH_WORKER(lc) {
rc |= rte_eal_wait_lcore(lc);
lcore_stat_aggr(&arg[mc].stats, &arg[lc].stats);
if (verbose != 0)
uint64_t lcount = 0;
const unsigned int lcore = rte_lcore_id();
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0)
;
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
- /* clear synchro and start slaves */
+ /* clear synchro and start workers */
rte_atomic32_set(&synchro, 0);
- if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+ if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
load_loop_fn(NULL);
* - There is a global rwlock and a table of rwlocks (one per lcore).
*
* - The test function takes all of these locks and launches the
- * ``test_rwlock_per_core()`` function on each core (except the master).
+ * ``test_rwlock_per_core()`` function on each core (except the main).
*
* - The function takes the global write lock, display something,
* then releases the global lock.
rte_rwlock_write_lock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_rwlock_write_lock(&sl_tab[i]);
rte_eal_remote_launch(test_rwlock_per_core, NULL, i);
}
rte_rwlock_write_unlock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_rwlock_write_unlock(&sl_tab[i]);
rte_delay_ms(100);
}
rte_rwlock_write_lock(&sl);
/* this message should be the last message of test */
- printf("Global write lock taken on master core %u\n", rte_lcore_id());
+ printf("Global write lock taken on main core %u\n", rte_lcore_id());
rte_rwlock_write_unlock(&sl);
rte_eal_mp_wait_lcore();
try_test_reset();
/* start read test on all avaialble lcores */
- rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MAIN);
rte_eal_mp_wait_lcore();
return process_try_lcore_stats();
}
-/* all slave lcores grab RDLOCK, master one grabs WRLOCK */
+/* all worker lcores grab RDLOCK, main one grabs WRLOCK */
static int
try_rwlock_test_rds_wrm(void)
{
try_test_reset();
- rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MAIN);
try_write_lcore(NULL);
rte_eal_mp_wait_lcore();
return process_try_lcore_stats();
}
-/* master and even slave lcores grab RDLOCK, odd lcores grab WRLOCK */
+/* main and even worker lcores grab RDLOCK, odd lcores grab WRLOCK */
static int
try_rwlock_test_rde_wro(void)
{
try_test_reset();
- mlc = rte_get_master_lcore();
+ mlc = rte_get_main_lcore();
RTE_LCORE_FOREACH(lc) {
if (lc != mlc) {
testsuite_setup(void)
{
slcore_id = rte_get_next_lcore(/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
return TEST_SUCCESS;
TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
"Service core count not equal to one");
uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
"Service core add did not return zero");
uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
"Service core add did not return zero");
/* add next 2 cores */
uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
"mt safe lcore add fail");
uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
"mt safe lcore add fail");
int i;
uint32_t lcore = rte_get_next_lcore(/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
uint32_t slcore = rte_get_next_lcore(/* start core */ lcore,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
/* start the service on the second available lcore */
* - There is a global spinlock and a table of spinlocks (one per lcore).
*
* - The test function takes all of these locks and launches the
- * ``test_spinlock_per_core()`` function on each core (except the master).
+ * ``test_spinlock_per_core()`` function on each core (except the main).
*
* - The function takes the global lock, display something, then releases
* the global lock.
const int use_lock = *(int*)func_param;
const unsigned lcore = rte_lcore_id();
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0);
begin = rte_get_timer_cycles();
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
- /* Clear synchro and start slaves */
+ /* Clear synchro and start workers */
rte_atomic32_set(&synchro, 0);
- rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+ rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
load_loop_fn(&lock);
int ret = 0;
int i;
- /* slave cores should be waiting: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be waiting: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_spinlock_lock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_spinlock_lock(&sl_tab[i]);
rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
}
- /* slave cores should be busy: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be busy: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_spinlock_unlock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_spinlock_unlock(&sl_tab[i]);
rte_delay_ms(10);
}
} else
rte_spinlock_recursive_unlock(&slr);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
}
rte_spinlock_recursive_unlock(&slr);
/*
* Test if it could return immediately from try-locking a locked object.
- * Here it will lock the spinlock object first, then launch all the slave
+ * Here it will lock the spinlock object first, then launch all the worker
* lcores to trylock the same spinlock object.
- * All the slave lcores should give up try-locking a locked object and
+ * All the worker lcores should give up try-locking a locked object and
* return immediately, and then increase the "count" initialized with zero
* by one per times.
- * We can check if the "count" is finally equal to the number of all slave
+ * We can check if the "count" is finally equal to the number of all worker
* lcores to see if the behavior of try-locking a locked spinlock object
* is correct.
*/
return -1;
}
count = 0;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_spinlock_try, NULL, i);
}
rte_eal_mp_wait_lcore();
thread_test_args.s = s;
- if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MASTER))
+ if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MAIN))
rte_panic("Failed to launch tests\n");
RTE_LCORE_FOREACH(lcore_id) {
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
- if (cores->c1 == rte_get_master_lcore()) {
+ if (cores->c1 == rte_get_main_lcore()) {
rte_eal_remote_launch(fn, &args[1], cores->c2);
fn(&args[0]);
rte_eal_wait_lcore(cores->c2);
rte_atomic32_set(&lcore_barrier, n);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
break;
avg = args[rte_lcore_id()].avg;
cnt = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
break;
avg += args[lcore_id].avg;
* - There is a global ticketlock and a table of ticketlocks (one per lcore).
*
* - The test function takes all of these locks and launches the
- * ``test_ticketlock_per_core()`` function on each core (except the master).
+ * ``test_ticketlock_per_core()`` function on each core (except the main).
*
* - The function takes the global lock, display something, then releases
* the global lock.
const int use_lock = *(int *)func_param;
const unsigned int lcore = rte_lcore_id();
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0)
;
lcount = 0;
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
- /* Clear synchro and start slaves */
+ /* Clear synchro and start workers */
rte_atomic32_set(&synchro, 0);
- rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+ rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
load_loop_fn(&lock);
int ret = 0;
int i;
- /* slave cores should be waiting: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be waiting: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_ticketlock_init(&tl);
rte_ticketlock_init(&tl_try);
rte_ticketlock_recursive_init(&tlr);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_ticketlock_init(&tl_tab[i]);
}
rte_ticketlock_lock(&tl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_ticketlock_lock(&tl_tab[i]);
rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
}
- /* slave cores should be busy: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be busy: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_ticketlock_unlock(&tl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_ticketlock_unlock(&tl_tab[i]);
rte_delay_ms(10);
}
} else
rte_ticketlock_recursive_unlock(&tlr);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_ticketlock_recursive_per_core,
NULL, i);
}
/*
* Test if it could return immediately from try-locking a locked object.
* Here it will lock the ticketlock object first, then launch all the
- * slave lcores to trylock the same ticketlock object.
- * All the slave lcores should give up try-locking a locked object and
+ * worker lcores to trylock the same ticketlock object.
+ * All the worker lcores should give up try-locking a locked object and
* return immediately, and then increase the "count" initialized with
* zero by one per times.
* We can check if the "count" is finally equal to the number of all
- * slave lcores to see if the behavior of try-locking a locked
+ * worker lcores to see if the behavior of try-locking a locked
* ticketlock object is correct.
*/
if (rte_ticketlock_trylock(&tl_try) == 0)
return -1;
count = 0;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_ticketlock_try, NULL, i);
}
rte_eal_mp_wait_lcore();
* - All cores then simultaneously are set to schedule all the timers at
* the same time, so conflicts should occur.
* - Then there is a delay while we wait for the timers to expire
- * - Then the master lcore calls timer_manage() and we check that all
+ * - Then the main lcore calls timer_manage() and we check that all
* timers have had their callbacks called exactly once - no more no less.
* - Then we repeat the process, except after setting up the timers, we have
* all cores randomly reschedule them.
*
* - timer0
*
- * - At initialization, timer0 is loaded by the master core, on master core
+ * - At initialization, timer0 is loaded by the main core, on main core
* in "single" mode (time = 1 second).
* - In the first 19 callbacks, timer0 is reloaded on the same core,
* then, it is explicitly stopped at the 20th call.
*
* - timer1
*
- * - At initialization, timer1 is loaded by the master core, on the
- * master core in "single" mode (time = 2 seconds).
+ * - At initialization, timer1 is loaded by the main core, on the
+ * main core in "single" mode (time = 2 seconds).
* - In the first 9 callbacks, timer1 is reloaded on another
* core. After the 10th callback, timer1 is not reloaded anymore.
*
* - timer2
*
- * - At initialization, timer2 is loaded by the master core, on the
- * master core in "periodical" mode (time = 1 second).
+ * - At initialization, timer2 is loaded by the main core, on the
+ * main core in "periodical" mode (time = 1 second).
* - In the callback, when t=25s, it stops timer3 and reloads timer0
* on the current core.
*
* - timer3
*
- * - At initialization, timer3 is loaded by the master core, on
+ * - At initialization, timer3 is loaded by the main core, on
* another core in "periodical" mode (time = 1 second).
* - It is stopped at t=25s by timer2.
*/
return 0;
}
-/* Need to synchronize slave lcores through multiple steps. */
-enum { SLAVE_WAITING = 1, SLAVE_RUN_SIGNAL, SLAVE_RUNNING, SLAVE_FINISHED };
-static rte_atomic16_t slave_state[RTE_MAX_LCORE];
+/* Need to synchronize worker lcores through multiple steps. */
+enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
+static rte_atomic16_t lcore_state[RTE_MAX_LCORE];
static void
-master_init_slaves(void)
+main_init_workers(void)
{
unsigned i;
- RTE_LCORE_FOREACH_SLAVE(i) {
- rte_atomic16_set(&slave_state[i], SLAVE_WAITING);
+ RTE_LCORE_FOREACH_WORKER(i) {
+ rte_atomic16_set(&lcore_state[i], WORKER_WAITING);
}
}
static void
-master_start_slaves(void)
+main_start_workers(void)
{
unsigned i;
- RTE_LCORE_FOREACH_SLAVE(i) {
- rte_atomic16_set(&slave_state[i], SLAVE_RUN_SIGNAL);
+ RTE_LCORE_FOREACH_WORKER(i) {
+ rte_atomic16_set(&lcore_state[i], WORKER_RUN_SIGNAL);
}
- RTE_LCORE_FOREACH_SLAVE(i) {
- while (rte_atomic16_read(&slave_state[i]) != SLAVE_RUNNING)
+ RTE_LCORE_FOREACH_WORKER(i) {
+ while (rte_atomic16_read(&lcore_state[i]) != WORKER_RUNNING)
rte_pause();
}
}
static void
-master_wait_for_slaves(void)
+main_wait_for_workers(void)
{
unsigned i;
- RTE_LCORE_FOREACH_SLAVE(i) {
- while (rte_atomic16_read(&slave_state[i]) != SLAVE_FINISHED)
+ RTE_LCORE_FOREACH_WORKER(i) {
+ while (rte_atomic16_read(&lcore_state[i]) != WORKER_FINISHED)
rte_pause();
}
}
static void
-slave_wait_to_start(void)
+worker_wait_to_start(void)
{
unsigned lcore_id = rte_lcore_id();
- while (rte_atomic16_read(&slave_state[lcore_id]) != SLAVE_RUN_SIGNAL)
+ while (rte_atomic16_read(&lcore_state[lcore_id]) != WORKER_RUN_SIGNAL)
rte_pause();
- rte_atomic16_set(&slave_state[lcore_id], SLAVE_RUNNING);
+ rte_atomic16_set(&lcore_state[lcore_id], WORKER_RUNNING);
}
static void
-slave_finish(void)
+worker_finish(void)
{
unsigned lcore_id = rte_lcore_id();
- rte_atomic16_set(&slave_state[lcore_id], SLAVE_FINISHED);
+ rte_atomic16_set(&lcore_state[lcore_id], WORKER_FINISHED);
}
static volatile int cb_count = 0;
/* callback for second stress test. will only be called
- * on master lcore */
+ * on main lcore
+ */
static void
timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
{
static struct rte_timer *timers;
int i, ret;
uint64_t delay = rte_get_timer_hz() / 20;
- unsigned lcore_id = rte_lcore_id();
- unsigned master = rte_get_master_lcore();
+ unsigned int lcore_id = rte_lcore_id();
+ unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
static rte_atomic32_t collisions;
- if (lcore_id == master) {
+ if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
rte_atomic32_set(&collisions, 0);
- master_init_slaves();
+ main_init_workers();
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
printf("- Cannot allocate memory for timers\n" );
test_failed = 1;
- master_start_slaves();
+ main_start_workers();
goto cleanup;
}
for (i = 0; i < NB_STRESS2_TIMERS; i++)
rte_timer_init(&timers[i]);
- master_start_slaves();
+ main_start_workers();
} else {
- slave_wait_to_start();
+ worker_wait_to_start();
if (test_failed)
goto cleanup;
}
- /* have all cores schedule all timers on master lcore */
+ /* have all cores schedule all timers on main lcore */
for (i = 0; i < NB_STRESS2_TIMERS; i++) {
- ret = rte_timer_reset(&timers[i], delay, SINGLE, master,
+ ret = rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
timer_stress2_cb, NULL);
/* there will be collisions when multiple cores simultaneously
* configure the same timers */
rte_delay_ms(100);
/* all cores rendezvous */
- if (lcore_id == master) {
- master_wait_for_slaves();
+ if (lcore_id == main_lcore) {
+ main_wait_for_workers();
} else {
- slave_finish();
+ worker_finish();
}
/* now check that we get the right number of callbacks */
- if (lcore_id == master) {
+ if (lcore_id == main_lcore) {
my_collisions = rte_atomic32_read(&collisions);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
cb_count);
test_failed = 1;
- master_start_slaves();
+ main_start_workers();
goto cleanup;
}
cb_count = 0;
/* proceed */
- master_start_slaves();
+ main_start_workers();
} else {
/* proceed */
- slave_wait_to_start();
+ worker_wait_to_start();
if (test_failed)
goto cleanup;
}
/* now test again, just stop and restart timers at random after init*/
for (i = 0; i < NB_STRESS2_TIMERS; i++)
- rte_timer_reset(&timers[i], delay, SINGLE, master,
+ rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
timer_stress2_cb, NULL);
/* pick random timer to reset, stopping them first half the time */
int r = rand() % NB_STRESS2_TIMERS;
if (i % 2)
rte_timer_stop(&timers[r]);
- rte_timer_reset(&timers[r], delay, SINGLE, master,
+ rte_timer_reset(&timers[r], delay, SINGLE, main_lcore,
timer_stress2_cb, NULL);
}
rte_delay_ms(100);
/* now check that we get the right number of callbacks */
- if (lcore_id == master) {
- master_wait_for_slaves();
+ if (lcore_id == main_lcore) {
+ main_wait_for_workers();
rte_timer_manage();
if (cb_count != NB_STRESS2_TIMERS) {
}
cleanup:
- if (lcore_id == master) {
- master_wait_for_slaves();
+ if (lcore_id == main_lcore) {
+ main_wait_for_workers();
if (timers != NULL) {
rte_free(timers);
timers = NULL;
}
} else {
- slave_finish();
+ worker_finish();
}
return 0;
int64_t diff = 0;
/* launch all timers on core 0 */
- if (lcore_id == rte_get_master_lcore()) {
+ if (lcore_id == rte_get_main_lcore()) {
mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
timer_basic_cb);
mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
/* start other cores */
printf("Start timer stress tests\n");
- rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MAIN);
rte_eal_mp_wait_lcore();
/* stop timer 0 used for stress test */
/* run a second, slightly different set of stress tests */
printf("\nStart timer stress tests 2\n");
test_failed = 0;
- rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MAIN);
rte_eal_mp_wait_lcore();
if (test_failed)
return TEST_FAILED;
/* start other cores */
printf("\nStart timer basic tests\n");
- rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MAIN);
rte_eal_mp_wait_lcore();
/* stop all timers */
#define N_TIMERS 50
static struct rte_timer timer[N_TIMERS];
-static unsigned timer_lcore_id[N_TIMERS];
+static unsigned int timer_lcore_id[N_TIMERS];
-static unsigned master;
-static volatile unsigned stop_slaves;
+static unsigned int main_lcore;
+static volatile unsigned int stop_workers;
static int reload_timer(struct rte_timer *tim);
(tim - timer);
int ret;
- ret = rte_timer_reset(tim, ticks, PERIODICAL, master, timer_cb, NULL);
+ ret = rte_timer_reset(tim, ticks, PERIODICAL, main_lcore, timer_cb, NULL);
if (ret != 0) {
rte_log(RTE_LOG_DEBUG, timer_logtype_test,
"- core %u failed to reset timer %" PRIuPTR " (OK)\n",
}
static int
-slave_main_loop(__rte_unused void *arg)
+worker_main_loop(__rte_unused void *arg)
{
unsigned lcore_id = rte_lcore_id();
unsigned i;
printf("Starting main loop on core %u\n", lcore_id);
- while (!stop_slaves) {
+ while (!stop_workers) {
/* Wait until the timer manager is running.
* We know it's running when we see timer[0] NOT pending.
*/
unsigned lcore_id;
unsigned i;
- master = lcore_id = rte_lcore_id();
+ main_lcore = lcore_id = rte_lcore_id();
hz = rte_get_timer_hz();
/* init and start timers */
ret = reload_timer(&timer[i]);
TEST_ASSERT(ret == 0, "reload_timer failed");
- /* Distribute timers to slaves.
- * Note that we assign timer[0] to the master.
+ /* Distribute timers to workers.
+ * Note that we assign timer[0] to the main.
*/
timer_lcore_id[i] = lcore_id;
lcore_id = rte_get_next_lcore(lcore_id, 1, 1);
cur_time = rte_get_timer_cycles();
end_time = cur_time + (hz * TEST_DURATION_S);
- /* start slave cores */
- stop_slaves = 0;
+ /* start worker cores */
+ stop_workers = 0;
printf("Start timer manage race condition test (%u seconds)\n",
TEST_DURATION_S);
- rte_eal_mp_remote_launch(slave_main_loop, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(worker_main_loop, NULL, SKIP_MAIN);
while (diff >= 0) {
/* run the timers */
diff = end_time - cur_time;
}
- /* stop slave cores */
+ /* stop worker cores */
printf("Stopping timer manage race condition test\n");
- stop_slaves = 1;
+ stop_workers = 1;
rte_eal_mp_wait_lcore();
/* stop timers */
#define launch_proc(ARGV) process_dup(ARGV, RTE_DIM(ARGV), __func__)
struct test_info {
- unsigned int mstr_lcore;
+ unsigned int main_lcore;
unsigned int mgr_lcore;
unsigned int sec_lcore;
uint32_t timer_data_id;
TEST_ASSERT_SUCCESS(ret, "Failed to allocate timer data "
"instance");
- unsigned int *mstr_lcorep = &test_info->mstr_lcore;
+ unsigned int *main_lcorep = &test_info->main_lcore;
unsigned int *mgr_lcorep = &test_info->mgr_lcore;
unsigned int *sec_lcorep = &test_info->sec_lcore;
- *mstr_lcorep = rte_get_master_lcore();
- *mgr_lcorep = rte_get_next_lcore(*mstr_lcorep, 1, 1);
+ *main_lcorep = rte_get_main_lcore();
+ *mgr_lcorep = rte_get_next_lcore(*main_lcorep, 1, 1);
*sec_lcorep = rte_get_next_lcore(*mgr_lcorep, 1, 1);
ret = rte_eal_remote_launch(timer_manage_loop,
memset(data, 0, sz);
data->nb_workers = rte_lcore_count() - 1;
- RTE_LCORE_FOREACH_SLAVE(id)
+ RTE_LCORE_FOREACH_WORKER(id)
rte_eal_remote_launch(f, &data->ldata[worker++], id);
wait_till_workers_are_ready(data);
measure_perf(str, data);
signal_workers_to_finish(data);
- RTE_LCORE_FOREACH_SLAVE(id)
+ RTE_LCORE_FOREACH_WORKER(id)
rte_eal_wait_lcore(id);
}
typedef int (lcore_function_t)(void *);
/* launch a function of lcore_function_t type */
- int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id);
+ int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned worker_id);
C Indentation
If your system has a lot (>1 GB size) of hugepage memory, not all of it will be allocated.
Due to hugepages typically being allocated on a local NUMA node, the hugepages allocation the application gets during the initialization depends on which
NUMA node it is running on (the EAL does not affinitize cores until much later in the initialization process).
-Sometimes, the Linux OS runs the DPDK application on a core that is located on a different NUMA node from DPDK master core and
+Sometimes, the Linux OS runs the DPDK application on a core that is located on a different NUMA node from DPDK main core and
therefore all the hugepages are allocated on the wrong socket.
To avoid this scenario, either lower the amount of hugepage memory available to 1 GB size (or less), or run the application with taskset
-affinitizing the application to a would-be master core.
+affinitizing the application to a would-be main core.
-For example, if your EAL coremask is 0xff0, the master core will usually be the first core in the coremask (0x10); this is what you have to supply to taskset::
+For example, if your EAL coremask is 0xff0, the main core will usually be the first core in the coremask (0x10); this is what you have to supply to taskset::
taskset 0x10 ./l2fwd -l 4-11 -n 2
cores.
* For high-performance execution logic ensure running it on correct NUMA
- and non-master core.
+ and worker core.
* Analyze run logic with ``rte_dump_stack`` and
``rte_memdump`` for more insights.
At a given instance only one core option ``--lcores``, ``-l`` or ``-c`` can
be used.
-* ``--master-lcore <core ID>``
+* ``--main-lcore <core ID>``
- Core ID that is used as master.
+ Core ID that is used as main.
* ``-s <service core mask>``
EAL: coremask set to 1
EAL: Detected lcore 0 on socket 0
...
- EAL: Master core 0 is ready (tid=1b2ad720)
+ EAL: Main core 0 is ready (tid=1b2ad720)
RTE>>
Applications
EAL: Virtual area found at 0x7f0a5c000000 (size = 0x200000)
EAL: Requesting 1024 pages of size 2MB from socket 0
EAL: Requesting 1024 pages of size 2MB from socket 1
- EAL: Master core 0 is ready (tid=de25b700)
+ EAL: Main core 0 is ready (tid=de25b700)
EAL: Core 1 is ready (tid=5b7fe700)
EAL: Core 3 is ready (tid=5a7fc700)
EAL: Core 2 is ready (tid=5affd700)
.. code-block:: console
-  testpmd -l 1,3,5 --master-lcore 1 --txq=2 –rxq=2 --nb-cores=2
+  testpmd -l 1,3,5 --main-lcore 1 --txq=2 –rxq=2 --nb-cores=2
**TSS**
.. note::
Initialization of objects, such as memory zones, rings, memory pools, lpm tables and hash tables,
- should be done as part of the overall application initialization on the master lcore.
+ should be done as part of the overall application initialization on the main lcore.
The creation and initialization functions for these objects are not multi-thread safe.
However, once initialized, the objects themselves can safely be used in multiple threads simultaneously.
Additional restrictions are present when running in 32-bit mode. In dynamic
memory mode, by default maximum of 2 gigabytes of VA space will be preallocated,
-and all of it will be on master lcore NUMA node unless ``--socket-mem`` flag is
+and all of it will be on main lcore NUMA node unless ``--socket-mem`` flag is
used.
In legacy mode, VA space will only be preallocated for segments that were
- with affinity restricted to 2-4, the Control Threads will end up on
CPU 4.
- with affinity restricted to 2-3, the Control Threads will end up on
- CPU 2 (master lcore, which is the default when no CPU is available).
+ CPU 2 (main lcore, which is the default when no CPU is available).
.. _known_issue_label:
received on a polled Rx queue. The interrupt thread is affinitized to the same
CPUs as the lcores of the Rx adapter service function, if the Rx adapter
service function has not been mapped to any lcores, the interrupt thread
-is mapped to the master lcore.
+is mapped to the main lcore.
Rx Callback for SW Rx Adapter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LPM
Longest Prefix Match
-master lcore
+main lcore
The execution unit that executes the main() function and that launches
other lcores.
+master lcore
+ Deprecated name for *main lcore*. No longer used.
+
mbuf
An mbuf is a data structure used internally to carry messages (mainly
network packets). The name is derived from BSD stacks. To understand the
Reception
Slave lcore
- Any *lcore* that is not the *master lcore*.
+ Deprecated name for *worker lcore*. No longer used.
Socket
A physical CPU, that includes several *cores*.
Wr
Write
+Worker lcore
+ Any *lcore* that is not the *main lcore*.
+
WRED
Weighted Random Early Detection
sodipodi:role="line"
id="tspan3165"
x="114.71806"
- y="46.6479">Master lcore</tspan></text>
+ y="46.6479">main lcore</tspan></text>
<text
xml:space="preserve"
style="font-size:20px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
* kvargs: The function ``rte_kvargs_process`` will get a new parameter
for returning key match count. It will ease handling of no-match case.
-* eal: To be more inclusive in choice of naming, the DPDK project
- will replace uses of master/slave in the API's and command line arguments.
-
- References to master/slave in relation to lcore will be renamed
- to initial/worker. The function ``rte_get_master_lcore()``
- will be renamed to ``rte_get_initial_lcore()``.
- For the 20.11 release, both names will be present and the
- old function will be marked with the deprecated tag.
- The old function will be removed in a future version.
-
- The iterator for worker lcores will also change:
- ``RTE_LCORE_FOREACH_SLAVE`` will be replaced with
- ``RTE_LCORE_FOREACH_WORKER``.
-
- The ``master-lcore`` argument to testpmd will be replaced
- with ``initial-lcore``. The old ``master-lcore`` argument
- will produce a runtime notification in 20.11 release, and
- be removed completely in a future release.
-
* eal: The terms blacklist and whitelist to describe devices used
by DPDK will be replaced in the 20.11 relase.
This will apply to command line arguments as well as macros.
The information provided by these macros is available through standard
compiler macros.
+* eal: Replaced the function ``rte_get_master_lcore()`` to
+ ``rte_get_main_lcore()``. The old function is deprecated.
+
+ The iterator for worker lcores is also changed:
+ ``RTE_LCORE_FOREACH_SLAVE`` is replaced with
+ ``RTE_LCORE_FOREACH_WORKER``.
+
* eal: The ``rte_logs`` struct and global symbol was made private
and is no longer part of the API.
the parameter -w.
3 cores are allocated to the application, and assigned as:
- - core 3 is the master and used to print the stats live on screen,
+ - core 3 is the main and used to print the stats live on screen,
- core 4 is the encoding lcore performing Rx and Turbo Encode operations
-----------
The sample program has two parts: A background `packet reflector`_
-that runs on a slave core, and a foreground `Ethtool Shell`_ that
-runs on the master core. These are described below.
+that runs on a worker core, and a foreground `Ethtool Shell`_ that
+runs on the main core. These are described below.
Packet Reflector
~~~~~~~~~~~~~~~~
-.. SPDX-License-Identifier: BSD-3-Clause
+o.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2010-2014 Intel Corporation.
Hello World Sample Application
.. code-block:: c
- /* call lcore_hello() on every slave lcore */
+ /* call lcore_hello() on every worker lcore */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
}
- /* call it on master lcore too */
+ /* call it on main lcore too */
lcore_hello(NULL);
.. code-block:: c
- rte_eal_mp_remote_launch(lcore_hello, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(lcore_hello, NULL, CALL_MAIN);
Refer to the *DPDK API Reference* for detailed information on the rte_eal_mp_remote_launch() function.
incoming traffic and makes a copy of each packet. The second lcore then
updates MAC address and sends the copy. If one lcore per port is used,
both operations are done sequentially. For each configuration an additional
-lcore is needed since the master lcore does not handle traffic but is
+lcore is needed since the main lcore does not handle traffic but is
responsible for configuration, statistics printing and safe shutdown of
all ports and devices.
The application can use a maximum of 8 ports.
-To run the application in a Linux environment with 3 lcores (the master lcore,
+To run the application in a Linux environment with 3 lcores (the main lcore,
plus two forwarding cores), a single port (port 0), software copying and MAC
updating issue the command:
$ ./build/ioatfwd -l 0-2 -n 2 -- -p 0x1 --mac-updating -c sw
-To run the application in a Linux environment with 2 lcores (the master lcore,
+To run the application in a Linux environment with 2 lcores (the main lcore,
plus one forwarding core), 2 ports (ports 0 and 1), hardware copying and no MAC
updating issue the command:
cfg.nb_lcores = rte_lcore_count() - 1;
if (cfg.nb_lcores < 1)
rte_exit(EXIT_FAILURE,
- "There should be at least one slave lcore.\n");
+ "There should be at least one worker lcore.\n");
ret = 0;
statistics is allocated.
Finally ``main()`` function starts all packet handling lcores and starts
-printing stats in a loop on the master lcore. The application can be
-interrupted and closed using ``Ctrl-C``. The master lcore waits for
-all slave processes to finish, deallocates resources and exits.
+printing stats in a loop on the main lcore. The application can be
+interrupted and closed using ``Ctrl-C``. The main lcore waits for
+all worker lcores to finish, deallocates resources and exits.
The processing lcores launching function are described below.
Run-time
~~~~~~~~
-The master thread is creating and managing all the application objects based on CLI input.
+The main thread is creating and managing all the application objects based on CLI input.
Each data plane thread runs one or several pipelines previously assigned to it in round-robin order. Each data plane thread
executes two tasks in time-sharing mode:
1. *Packet processing task*: Process bursts of input packets read from the pipeline input ports.
2. *Message handling task*: Periodically, the data plane thread pauses the packet processing task and polls for request
- messages send by the master thread. Examples: add/remove pipeline to/from current data plane thread, add/delete rules
+ messages send by the main thread. Examples: add/remove pipeline to/from current data plane thread, add/delete rules
to/from given table of a specific pipeline owned by the current data plane thread, read statistics, etc.
Examples
--------
The application demonstrates how to protect against 'silent outages'
-on packet processing cores. A Keep Alive Monitor Agent Core (master)
+on packet processing cores. A Keep Alive Monitor Agent Core (main)
monitors the state of packet processing cores (worker cores) by
dispatching pings at a regular time interval (default is 5ms) and
monitoring the state of the cores. Cores states are: Alive, MIA, Dead
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= timer_period)) {
- /* do this only on master core */
- if (lcore_id == rte_get_master_lcore()) {
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()) {
print_stats();
/* reset the timer */
timer_tsc = 0;
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- /* do this only on master core */
-
- if (lcore_id == rte_get_master_lcore()) {
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()) {
print_stats();
/* reset the timer */
Difference is that forwarding logic starting from Rx, followed by LPM lookup,
TTL update and finally Tx is implemented inside graph nodes. These nodes are
interconnected in graph framework. Application main loop needs to walk over
-graph using ``rte_graph_walk()`` with graph objects created one per slave lcore.
+graph using ``rte_graph_walk()`` with graph objects created one per worker lcore.
The lookup method is as per implementation of ``ip4_lookup`` graph node.
The ID of the output interface for the input packet is the next hop returned by
Since currently ``ip4_lookup`` and ``ip4_rewrite`` nodes don't support
lock-less mechanisms(RCU, etc) to add run-time forwarding data like route and
rewrite data, forwarding data is added before packet processing loop is
- launched on slave lcore.
+ launched on worker lcore.
.. code-block:: c
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that all the device configurations are done, graph creations are done and
-forwarding data is updated with nodes, slave lcores will be launched with graph
+forwarding data is updated with nodes, worker lcores will be launched with graph
main loop. Graph main loop is very simple in the sense that it needs to
continuously call a non-blocking API ``rte_graph_walk()`` with it's lcore
specific graph object that was already created.
``l3fwd-power`` does simple l3fwding along with calculating empty polls, full polls,
and busy percentage for each forwarding core. The aggregation of these
values of all cores is reported as application level telemetry to metric
-library for every 500ms from the master core.
+library for every 500ms from the main core.
The busy percentage is calculated by recording the poll_count
and when the count reaches a defined value the total
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- /* do this only on master core */
-
- if (lcore_id == rte_get_master_lcore()) {
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()) {
print_stats();
/* reset the timer */
EAL: Virtual area found at 0x7ff200000000 (size = 0x40000000)
...
- EAL: Master core 0 is ready (tid=54e41820)
+ EAL: check module finished
+ EAL: Main core 0 is ready (tid=54e41820)
EAL: Core 1 is ready (tid=53b32700)
Starting core 1
.. code-block:: console
- EAL: Master core 10 is ready (tid=b5f89820) EAL: Master core 8 is ready (tid=864a3820)
+ EAL: Main core 10 is ready (tid=b5f89820) EAL: Main core 8 is ready (tid=864a3820)
EAL: Core 11 is ready (tid=84ffe700) EAL: Core 9 is ready (tid=85995700)
Starting core 11 Starting core 9
simple_mp > send hello_secondary simple_mp > core 9: Received 'hello_secondary'
.. note::
- In the server process, a single thread, the master thread, that is, the lowest numbered lcore in the coremask/corelist, performs all packet I/O.
+ In the server process, a single thread, the main thread, that is, the lowest numbered lcore in the coremask/corelist, performs all packet I/O.
If a coremask/corelist is specified with more than a single lcore bit set in it,
an additional lcore will be used for a thread to periodically print packet count statistics.
The application uses at least three CPU cores:
-* RX core (maser core) receives traffic from the NIC ports and feeds Worker
+* RX core (main core) receives traffic from the NIC ports and feeds Worker
cores with traffic through SW queues.
-* Worker core (slave core) basically do some light work on the packet.
+* Worker (worker core) basically do some light work on the packet.
Currently it modifies the output port of the packet for configurations with
more than one port enabled.
-* TX Core (slave core) receives traffic from Worker cores through software queues,
+* TX Core (worker core) receives traffic from Worker cores through software queues,
inserts out-of-order packets into reorder buffer, extracts ordered packets
from the reorder buffer and sends them to the NIC ports for transmission.
./packet_ordering [EAL options] -- -p PORTMASK [--disable-reorder] [--insight-worker]
The -c EAL CPU_COREMASK option has to contain at least 3 CPU cores.
-The first CPU core in the core mask is the master core and would be assigned to
+The first CPU core in the core mask is the main core and would be assigned to
RX core, the last to TX core and the rest to Worker cores.
The PORTMASK parameter must contain either 1 or even enabled port numbers.
interconnected via software rings.
On initialization an L-thread scheduler is started on every EAL thread. On all
-but the master EAL thread only a dummy L-thread is initially started.
-The L-thread started on the master EAL thread then spawns other L-threads on
+but the main EAL thread only a dummy L-thread is initially started.
+The L-thread started on the main EAL thread then spawns other L-threads on
different L-thread schedulers according the command line parameters.
The RX threads poll the network interface queues and post received packets
queue usage, and these statistics can be displayed by calling the function
``lthread_diag_stats_display()``. This function also performs a consistency
check on the caches and queues. The function should only be called from the
-master EAL thread after all slave threads have stopped and returned to the C
+main EAL thread after all worker threads have stopped and returned to the C
main program, otherwise the consistency check will fail.
a PTP client using the DPDK IEEE1588 API.
In order to keep the application simple the following assumptions are made:
-* The first discovered master is the master for the session.
+* The first discovered master is the main for the session.
* Only L2 PTP packets are supported.
* Only the PTP v2 protocol is supported.
* Only the slave clock is implemented.
In this mode, the application shows a command line that can be used for obtaining statistics while
scheduling is taking place (see interactive mode below for more information).
-* --mst n: Master core index (the default value is 1).
+* --mnc n: Main core index (the default value is 1).
* --rsz "A, B, C": Ring sizes:
Note that independent cores for the packet flow configurations for each of the RX, WT and TX thread are also supported,
providing flexibility to balance the work.
-The EAL coremask/corelist is constrained to contain the default mastercore 1 and the RX, WT and TX cores only.
+The EAL coremask/corelist is constrained to contain the default main core 1 and the RX, WT and TX cores only.
Explanation
-----------
rte_timer_subsystem_init();
-After timer creation (see the next paragraph),
-the main loop is executed on each slave lcore using the well-known rte_eal_remote_launch() and also on the master.
+After timer creation (see the next paragraph), the main loop is
+executed on each worker lcore using the well-known
+rte_eal_remote_launch() and also on the main.
.. code-block:: c
- /* call lcore_mainloop() on every slave lcore */
-
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* call lcore_mainloop() on every worker lcore */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
}
- /* call it on master lcore too */
+ /* call it on main lcore too */
(void) lcore_mainloop(NULL);
Then, the two timers are configured:
-* The first timer (timer0) is loaded on the master lcore and expires every second.
+* The first timer (timer0) is loaded on the main lcore and expires every second.
Since the PERIODICAL flag is provided, the timer is reloaded automatically by the timer subsystem.
The callback function is timer0_cb().
.. code-block:: c
- /* load timer0, every second, on master lcore, reloaded automatically */
+ /* load timer0, every second, on main lcore, reloaded automatically */
hz = rte_get_hpet_hz();
* ``--coremask=0xXX``
Set the hexadecimal bitmask of the cores running the packet forwarding test.
- The master lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
+ The main lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
* ``--portmask=0xXX``
.. note::
- The master lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
+ The main lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
set portmask
~~~~~~~~~~~~
BUS_INIT_FUNC_TRACE();
if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
- lcore = rte_get_master_lcore();
+ lcore = rte_get_main_lcore();
else
if (lcore >= RTE_MAX_LCORE)
return -1;
static int
-launch_workers_and_wait(int (*master_worker)(void *),
- int (*slave_workers)(void *), uint32_t total_events,
+launch_workers_and_wait(int (*main_worker)(void *),
+ int (*workers)(void *), uint32_t total_events,
uint8_t nb_workers, uint8_t sched_type)
{
uint8_t port = 0;
w_lcore = rte_get_next_lcore(
/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
- rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
+ rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
for (port = 1; port < nb_workers; port++) {
param[port].total_events = &atomic_total_events;
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
rte_smp_wmb();
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
- rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
+ rte_eal_remote_launch(workers, ¶m[port], w_lcore);
}
ret = wait_workers_to_join(w_lcore, &atomic_total_events);
static inline int
-launch_workers_and_wait(int (*master_worker)(void *),
- int (*slave_workers)(void *), uint32_t total_events,
+launch_workers_and_wait(int (*main_worker)(void *),
+ int (*worker)(void *), uint32_t total_events,
uint8_t nb_workers, uint8_t sched_type)
{
uint8_t port = 0;
w_lcore = rte_get_next_lcore(
/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
- rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
+ rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
for (port = 1; port < nb_workers; port++) {
param[port].total_events = &atomic_total_events;
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
rte_smp_wmb();
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
- rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
+ rte_eal_remote_launch(worker, ¶m[port], w_lcore);
}
ret = wait_workers_to_join(w_lcore, &atomic_total_events);
}
static inline int
-launch_workers_and_wait(int (*master_worker)(void *),
- int (*slave_workers)(void *), uint32_t total_events,
+launch_workers_and_wait(int (*main_thread)(void *),
+ int (*worker_thread)(void *), uint32_t total_events,
uint8_t nb_workers, uint8_t sched_type)
{
rte_atomic32_t atomic_total_events;
w_lcore = rte_get_next_lcore(
/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
- rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
+ rte_eal_remote_launch(main_thread, ¶m[0], w_lcore);
for (port = 1; port < nb_workers; port++) {
param[port].total_events = &atomic_total_events;
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
rte_smp_wmb();
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
- rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
+ rte_eal_remote_launch(worker_thread, ¶m[port], w_lcore);
}
rte_smp_wmb();
p_lcore = rte_get_next_lcore(
/* start core */ -1,
- /* skip master */ 1,
+ /* skip main */ 1,
/* wrap */ 0);
w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+ socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
nqr = rte_zmalloc_socket("nqr",
sizeof(struct bnxt_cp_ring_info),
if (BNXT_NUM_ASYNC_CPR(bp) == 0)
return 0;
- socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+ socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
cpr = rte_zmalloc_socket("cpr",
sizeof(struct bnxt_cp_ring_info),
unsigned int core_id = rte_lcore_id();
if (core_id == LCORE_ID_ANY)
- core_id = rte_get_master_lcore();
+ core_id = rte_get_main_lcore();
hif = mrvl_get_hif(priv, core_id);
core_id = rte_lcore_id();
if (core_id == LCORE_ID_ANY)
- core_id = rte_get_master_lcore();
+ core_id = rte_get_main_lcore();
hif = mrvl_get_hif(rxq->priv, core_id);
if (!hif)
unsigned int core_id = rte_lcore_id();
if (core_id == LCORE_ID_ANY)
- core_id = rte_get_master_lcore();
+ core_id = rte_get_main_lcore();
if (!q)
return;
snprintf(mz_name, sizeof(mz_name), "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = rte_get_master_lcore();
+ core_id = rte_get_main_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
snprintf(mz_name, sizeof(mz_name), "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = rte_get_master_lcore();
+ core_id = rte_get_main_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
RTE_MEMZONE_IOVA_CONTIG, align);
#endif
/**
- * Master thead: data plane thread context
+ * Main thread: data plane thread context
*/
struct softnic_thread {
struct rte_ring *msgq_req;
#include "rte_eth_softnic_internals.h"
/**
- * Master thread: data plane thread init
+ * Main thread: data plane thread init
*/
void
softnic_thread_free(struct pmd_internals *softnic)
{
uint32_t i;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
struct softnic_thread *t = &softnic->thread[i];
/* MSGQs */
return -1;
}
- /* Master thread records */
+ /* Main thread records */
t->msgq_req = msgq_req;
t->msgq_rsp = msgq_rsp;
t->service_id = UINT32_MAX;
static inline int
thread_is_valid(struct pmd_internals *softnic, uint32_t thread_id)
{
- if (thread_id == rte_get_master_lcore())
+ if (thread_id == rte_get_main_lcore())
return 0; /* FALSE */
if (softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_SERVICE))
}
/**
- * Master thread & data plane threads: message passing
+ * Main thread & data plane threads: message passing
*/
enum thread_req_type {
THREAD_REQ_PIPELINE_ENABLE = 0,
};
/**
- * Master thread
+ * Main thread
*/
static struct thread_msg_req *
thread_msg_alloc(void)
}
/**
- * Master thread & data plane threads: message passing
+ * Main thread & data plane threads: message passing
*/
enum pipeline_req_type {
/* Port IN */
};
/**
- * Master thread
+ * Main thread
*/
static struct pipeline_msg_req *
pipeline_msg_alloc(void)
struct stats_lcore_params stats_lcore;
struct rte_ring *enc_to_dec_ring;
bool stats_thread_started = false;
- unsigned int master_lcore_id = rte_get_master_lcore();
+ unsigned int main_lcore_id = rte_get_main_lcore();
rte_atomic16_init(&global_exit_flag);
stats_lcore.app_params = &app_params;
stats_lcore.lconf = lcore_conf;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (lcore_conf[lcore_id].core_type != 0)
- /* launch per-lcore processing loop on slave lcores */
+ /* launch per-lcore processing loop on worker lcores */
rte_eal_remote_launch(processing_loop,
&lcore_conf[lcore_id], lcore_id);
else if (!stats_thread_started) {
}
if (!stats_thread_started &&
- lcore_conf[master_lcore_id].core_type != 0)
+ lcore_conf[main_lcore_id].core_type != 0)
rte_exit(EXIT_FAILURE,
"Not enough lcores to run the statistics printing loop!");
- else if (lcore_conf[master_lcore_id].core_type != 0)
- processing_loop(&lcore_conf[master_lcore_id]);
+ else if (lcore_conf[main_lcore_id].core_type != 0)
+ processing_loop(&lcore_conf[main_lcore_id]);
else if (!stats_thread_started)
stats_loop(&stats_lcore);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
ret |= rte_eal_wait_lcore(lcore_id);
}
struct cmdline *cl,
__rte_unused void *data)
{
- int slave_core_id = rte_lcore_id();
+ int worker_core_id = rte_lcore_id();
rte_spinlock_trylock(&global_flag_stru_p->lock);
if (global_flag_stru_p->LcoreMainIsRunning == 0) {
return;
}
- /* start lcore main on core != master_core - ARP response thread */
- slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
- if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
+ /* start lcore main on core != main_core - ARP response thread */
+ worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
+ if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
return;
rte_spinlock_trylock(&global_flag_stru_p->lock);
cmdline_printf(cl,
"Starting lcore_main on core %d:%d "
"Our IP:%d.%d.%d.%d\n",
- slave_core_id,
- rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
+ worker_core_id,
+ rte_eal_remote_launch(lcore_main, NULL, worker_core_id),
BOND_IP_1,
BOND_IP_2,
BOND_IP_3,
NULL,
};
-/* prompt function, called from main on MASTER lcore */
+/* prompt function, called from main on MAIN lcore */
static void prompt(__rte_unused void *arg1)
{
struct cmdline *cl;
int
main(int argc, char *argv[])
{
- int ret, slave_core_id;
+ int ret, worker_core_id;
uint16_t nb_ports, i;
/* init EAL */
rte_spinlock_init(&global_flag_stru_p->lock);
/* check state of lcores */
- RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
- if (rte_eal_get_lcore_state(slave_core_id) != WAIT)
+ RTE_LCORE_FOREACH_WORKER(worker_core_id) {
+ if (rte_eal_get_lcore_state(worker_core_id) != WAIT)
return -EBUSY;
}
- /* start lcore main on core != master_core - ARP response thread */
- slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
- if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
+ /* start lcore main on core != main_core - ARP response thread */
+ worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
+ if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
return -EPERM;
global_flag_stru_p->LcoreMainIsRunning = 1;
- global_flag_stru_p->LcoreMainCore = slave_core_id;
+ global_flag_stru_p->LcoreMainCore = worker_core_id;
printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
- slave_core_id,
+ worker_core_id,
rte_eal_remote_launch((lcore_function_t *)lcore_main,
NULL,
- slave_core_id),
+ worker_core_id),
BOND_IP_1,
BOND_IP_2,
BOND_IP_3,
init_power_library(void)
{
int ret = 0, lcore_id;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
/* init power management library */
ret = rte_power_init(lcore_id);
if (ret) {
* available, the higher frequency cores will go to the
* distributor first, then rx, then tx.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_power_get_capabilities(lcore_id, &lcore_cap);
* after the high performing core assignment above, pre-assign
* them here.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (lcore_id == (unsigned int)distr_core_id ||
lcore_id == (unsigned int)rx_core_id ||
lcore_id == (unsigned int)tx_core_id)
* Kick off all the worker threads first, avoiding the pre-assigned
* lcore_ids for tx, rx and distributor workloads.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (lcore_id == (unsigned int)distr_core_id ||
lcore_id == (unsigned int)rx_core_id ||
lcore_id == (unsigned int)tx_core_id)
usleep(1000);
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_ether_addr_copy(&ptr_port->mac_addr, &ptr_mac_hdr->s_addr);
}
-static int slave_main(__rte_unused void *ptr_data)
+static int worker_main(__rte_unused void *ptr_data)
{
struct app_port *ptr_port;
struct rte_mbuf *ptr_frame;
app_cfg.cnt_ports = cnt_ports;
if (rte_lcore_count() < 2)
- rte_exit(EXIT_FAILURE, "No available slave core!\n");
- /* Assume there is an available slave.. */
+ rte_exit(EXIT_FAILURE, "No available worker core!\n");
+
+ /* Assume there is an available worker.. */
id_core = rte_lcore_id();
id_core = rte_get_next_lcore(id_core, 1, 1);
- rte_eal_remote_launch(slave_main, NULL, id_core);
+ rte_eal_remote_launch(worker_main, NULL, id_core);
ethapp_main();
app_cfg.exit_now = 1;
- RTE_LCORE_FOREACH_SLAVE(id_core) {
+ RTE_LCORE_FOREACH_WORKER(id_core) {
if (rte_eal_wait_lcore(id_core) < 0)
return -1;
}
}
int worker_idx = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (lcore_id >= MAX_NUM_CORE)
break;
rte_exit(EXIT_FAILURE, "Failed to add rules\n");
}
- /* Call lcore_main on the master core only. */
+ /* Call lcore_main on the main core only. */
lcore_main(cls_app);
return 0;
if (ret < 0)
rte_panic("Cannot init EAL\n");
- /* call lcore_hello() on every slave lcore */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* call lcore_hello() on every worker lcore */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
}
- /* call it on master lcore too */
+ /* call it on main lcore too */
lcore_hello(NULL);
rte_eal_mp_wait_lcore();
ioat_tx_port(&cfg.ports[i]);
}
-/* Main rx and tx loop if only one slave lcore available */
+/* Main rx and tx loop if only one worker lcore available */
static void
rxtx_main_loop(void)
{
cfg.nb_lcores = rte_lcore_count() - 1;
if (cfg.nb_lcores < 1)
rte_exit(EXIT_FAILURE,
- "There should be at least one slave lcore.\n");
+ "There should be at least one worker lcore.\n");
if (copy_mode == COPY_MODE_IOAT_NUM)
assign_rawdevs();
assign_rings();
start_forwarding_cores();
- /* master core prints stats while other cores forward */
+ /* main core prints stats while other cores forward */
print_stats(argv[0]);
/* force_quit is true when we get here */
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_eal_mp_remote_launch(
thread_main,
NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
/* Script */
if (app.script_name)
#endif
/**
- * Master thead: data plane thread context
+ * Main thread: data plane thread context
*/
struct thread {
struct rte_ring *msgq_req;
static struct thread_data thread_data[RTE_MAX_LCORE];
/**
- * Master thread: data plane thread init
+ * Main thread: data plane thread init
*/
static void
thread_free(void)
{
uint32_t i;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
char name[NAME_MAX];
struct rte_ring *msgq_req, *msgq_rsp;
struct thread *t = &thread[i];
return -1;
}
- /* Master thread records */
+ /* Main thread records */
t->msgq_req = msgq_req;
t->msgq_rsp = msgq_rsp;
t->enabled = 1;
}
/**
- * Master thread & data plane threads: message passing
+ * Main thread & data plane threads: message passing
*/
enum thread_req_type {
THREAD_REQ_PIPELINE_ENABLE = 0,
};
/**
- * Master thread
+ * Main thread
*/
static struct thread_msg_req *
thread_msg_alloc(void)
}
/**
- * Master thread & data plane threads: message passing
+ * Main thread & data plane threads: message passing
*/
enum pipeline_req_type {
/* Port IN */
};
/**
- * Master thread
+ * Main thread
*/
static struct pipeline_msg_req *
pipeline_msg_alloc(void)
signal(SIGINT, signal_handler);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
/* Set two cores as eth cores for Rx & Tx */
- /* Use first core other than master core as Rx core */
+ /* Use first core other than main core as Rx core */
eth_core_id = rte_get_next_lcore(0, /* curr core */
- 1, /* skip master core */
+ 1, /* skip main core */
0 /* wrap */);
rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
/* Use next core as Tx core */
eth_core_id = rte_get_next_lcore(eth_core_id, /* curr core */
- 1, /* skip master core */
+ 1, /* skip main core */
0 /* wrap */);
rte_bitmap_set(em_conf->eth_core_mask, eth_core_id);
#endif /* STATS_INTERVAL */
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n");
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
conf.mbuf_size = MAX_PACKET_SZ;
/*
* The first KNI device associated to a port
- * is the master, for multiple kernel thread
+ * is the main, for multiple kernel thread
* environment.
*/
if (i == 0) {
"Could not create link status thread!\n");
/* Launch per-lcore function on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(i) {
if (rte_eal_wait_lcore(i) < 0)
return -1;
}
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
- /* Call lcore_main on the master core only. */
+ /* Call lcore_main on the main core only. */
lcore_main();
return 0;
if (unlikely(timer_tsc >=
(uint64_t)timer_period)) {
- /* do this only on master core */
- if (lcore_id == rte_get_master_lcore()
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()
&& options->refresh_period) {
print_stats();
timer_tsc = 0;
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options,
- CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
- /* Ignore Master core and service cores. */
+ /* Ignore Main core and service cores. */
num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
- /* Ignore Master core. */
+ /* Ignore Main core. */
num_workers = rte_lcore_count() - 1;
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
- rx_lcore_id == rte_get_master_lcore() ||
+ rx_lcore_id == rte_get_main_lcore() ||
poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
rsrc->rx_queue_per_lcore) {
rx_lcore_id++;
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, rsrc,
- SKIP_MASTER);
+ SKIP_MAIN);
l2fwd_event_print_stats(rsrc);
if (rsrc->event_mode) {
struct l2fwd_event_resources *evt_rsrc =
RTE_LOG(INFO, L2FWD, "Stats display disabled\n");
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
) != 0 )
rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
}
- /* launch per-lcore init on every slave lcore */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* launch per-lcore init on every worker lcore */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
if (qconf->n_rx_port == 0)
rte_delay_ms(5);
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= timer_period)) {
- /* do this only on master core */
- if (lcore_id == rte_get_master_lcore()) {
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()) {
print_stats();
/* reset the timer */
timer_tsc = 0;
ret = 0;
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0) {
ret = -1;
break;
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
return -1;
}
- if (lcore == rte_get_master_lcore()) {
- printf("Error: lcore %u is master lcore\n", lcore);
+ if (lcore == rte_get_main_lcore()) {
+ printf("Error: lcore %u is main lcore\n", lcore);
return -1;
}
socketid = rte_lcore_to_socket_id(lcore);
route_str, i);
}
- /* Launch per-lcore init on every slave lcore */
- rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MASTER);
+ /* Launch per-lcore init on every worker lcore */
+ rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MAIN);
- /* Accumulate and print stats on master until exit */
+ /* Accumulate and print stats on main until exit */
if (rte_graph_has_stats_feature())
print_stats();
- /* Wait for slave cores to exit */
+ /* Wait for worker cores to exit */
ret = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
ret = rte_eal_wait_lcore(lcore_id);
/* Destroy graph */
if (ret < 0 || rte_graph_destroy(
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
- printf("cannot enable master core %d in config for telemetry mode\n",
+ printf("cannot enable main core %d in config for telemetry mode\n",
rte_lcore_id());
return -1;
}
uint64_t app_eps = 0, app_fps = 0, app_br = 0;
uint64_t count = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
qconf = &lcore_conf[lcore_id];
if (qconf->n_rx_queue == 0)
continue;
RTE_SET_USED(lcore_id);
- if (rte_get_master_lcore() != lcore_id) {
- rte_panic("timer on lcore:%d which is not master core:%d\n",
+ if (rte_get_main_lcore() != lcore_id) {
+ rte_panic("timer on lcore:%d which is not main core:%d\n",
lcore_id,
- rte_get_master_lcore());
+ rte_get_main_lcore());
}
RTE_LOG(INFO, POWER, "Bring up the Timer\n");
/* launch per-lcore init on every lcore */
if (app_mode == APP_MODE_LEGACY) {
- rte_eal_mp_remote_launch(main_legacy_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(main_legacy_loop, NULL, CALL_MAIN);
} else if (app_mode == APP_MODE_EMPTY_POLL) {
empty_poll_stop = false;
rte_eal_mp_remote_launch(main_empty_poll_loop, NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
} else if (app_mode == APP_MODE_TELEMETRY) {
unsigned int i;
else
rte_exit(EXIT_FAILURE, "failed to register metrics names");
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_spinlock_init(&stats[lcore_id].telemetry_lock);
}
rte_timer_init(&telemetry_timer);
handle_app_stats,
"Returns global power stats. Parameters: None");
rte_eal_mp_remote_launch(main_telemetry_loop, NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
} else if (app_mode == APP_MODE_INTERRUPT) {
- rte_eal_mp_remote_launch(main_intr_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(main_intr_loop, NULL, CALL_MAIN);
}
if (app_mode == APP_MODE_EMPTY_POLL || app_mode == APP_MODE_TELEMETRY)
launch_timer(rte_lcore_id());
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
ret = 0;
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(l3fwd_lkp.main_loop, NULL, CALL_MAIN);
if (evt_rsrc->enabled) {
for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
rte_event_eth_rx_adapter_stop(
/* if timer has reached its timeout */
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- /* do this only on master core */
- if (lcore_id == rte_get_master_lcore()) {
+ /* do this only on main core */
+ if (lcore_id == rte_get_main_lcore()) {
print_stats();
/* reset the timer */
timer_tsc = 0;
check_all_ports_link_status(nb_ports, lsi_enabled_port_mask);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
/*
* This function displays the recorded statistics for each port
* and for each client. It uses ANSI terminal codes to clear
- * screen when called. It is called from a single non-master
+ * screen when called. It is called from a single worker
* thread in the server process, when the process is run with more
* than one lcore enabled.
*/
}
/*
- * The function called from each non-master lcore used by the process.
+ * The function called from each worker lcore used by the process.
* The test_and_set function is used to randomly pick a single lcore on which
* the code to display the statistics will run. Otherwise, the code just
* repeatedly sleeps.
}
/*
- * Function called by the master lcore of the DPDK process.
+ * Function called by the main lcore of the DPDK process.
*/
static void
do_packet_forwarding(void)
/* clear statistics */
clear_stats();
- /* put all other cores to sleep bar master */
- rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
+ /* put all other cores to sleep except main */
+ rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MAIN);
do_packet_forwarding();
return 0;
RTE_LOG(INFO, APP, "Finished Process Init.\n");
- /* call lcore_recv() on every slave lcore */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* call lcore_recv() on every worker lcore */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_recv, NULL, lcore_id);
}
- /* call cmd prompt on master lcore */
+ /* call cmd prompt on main lcore */
struct cmdline *cl = cmdline_stdin_new(simple_mp_ctx, "\nsimple_mp > ");
if (cl == NULL)
rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
RTE_LOG(INFO, APP, "Finished Process Init.\n");
- rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN);
return 0;
}
uint32_t lcore_id;
/* Stop transmission first. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (!conf->nb_stream)
uint8_t lcore_num, nb_extra;
lcore_num = rte_lcore_count();
- /* Exclude master core */
+ /* Exclude main core */
lcore_num--;
nb_streams = (fwd_mode == IOFWD) ? num_queues * 2 : num_queues;
sm_id = 0;
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (i < nb_extra) {
}
/* Print packet forwading config. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (!conf->nb_stream)
assign_stream_to_lcores();
in_test = 1;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (!conf->nb_stream)
struct ntb_fwd_lcore_conf *conf;
uint32_t lcore_id;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (!conf->nb_stream)
NULL,
};
-/* prompt function, called from main on MASTER lcore */
+/* prompt function, called from main on MAIN lcore */
static void
prompt(void)
{
{
uint16_t i;
struct rte_eth_stats eth_stats;
- unsigned int lcore_id, last_lcore_id, master_lcore_id, end_w_lcore_id;
+ unsigned int lcore_id, last_lcore_id, main_lcore_id, end_w_lcore_id;
last_lcore_id = get_last_lcore_id();
- master_lcore_id = rte_get_master_lcore();
+ main_lcore_id = rte_get_main_lcore();
end_w_lcore_id = get_previous_lcore_id(last_lcore_id);
printf("\nRX thread stats:\n");
for (lcore_id = 0; lcore_id <= end_w_lcore_id; lcore_id++) {
if (insight_worker
&& rte_lcore_is_enabled(lcore_id)
- && lcore_id != master_lcore_id) {
+ && lcore_id != main_lcore_id) {
printf("\nWorker thread stats on core [%u]:\n",
lcore_id);
printf(" - Pkts deqd from workers ring: %"PRIu64"\n",
{
int ret;
unsigned nb_ports;
- unsigned int lcore_id, last_lcore_id, master_lcore_id;
+ unsigned int lcore_id, last_lcore_id, main_lcore_id;
uint16_t port_id;
uint16_t nb_ports_available;
struct worker_thread_args worker_args = {NULL, NULL};
}
last_lcore_id = get_last_lcore_id();
- master_lcore_id = rte_get_master_lcore();
+ main_lcore_id = rte_get_main_lcore();
worker_args.ring_in = rx_to_workers;
worker_args.ring_out = workers_to_tx;
- /* Start worker_thread() on all the available slave cores but the last 1 */
+ /* Start worker_thread() on all the available worker cores but the last 1 */
for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++)
- if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id)
+ if (rte_lcore_is_enabled(lcore_id) && lcore_id != main_lcore_id)
rte_eal_remote_launch(worker_thread, (void *)&worker_args,
lcore_id);
if (disable_reorder) {
- /* Start tx_thread() on the last slave core */
+ /* Start tx_thread() on the last worker core */
rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx,
last_lcore_id);
} else {
send_args.ring_in = workers_to_tx;
- /* Start send_thread() on the last slave core */
+ /* Start send_thread() on the last worker core */
rte_eal_remote_launch((lcore_function_t *)send_thread,
(void *)&send_args, last_lcore_id);
}
- /* Start rx_thread() on the master core */
+ /* Start rx_thread() on the main core */
rx_thread(rx_to_workers);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
/*
* Start scheduler with initial lthread on lcore
*
- * This lthread loop spawns all rx and tx lthreads on master lcore
+ * This lthread loop spawns all rx and tx lthreads on main lcore
*/
static void *
}
/*
- * Start master scheduler with initial lthread spawning rx and tx lthreads
- * (main_lthread_master).
+ * Start main scheduler with initial lthread spawning rx and tx lthreads
+ * (main_lthread_main).
*/
static int
-lthread_master_spawner(__rte_unused void *arg) {
+lthread_main_spawner(__rte_unused void *arg) {
struct lthread *lt;
int lcore_id = rte_lcore_id();
#endif
lthread_num_schedulers_set(nb_lcores);
- rte_eal_mp_remote_launch(sched_spawner, NULL, SKIP_MASTER);
- lthread_master_spawner(NULL);
+ rte_eal_mp_remote_launch(sched_spawner, NULL, SKIP_MAIN);
+ lthread_main_spawner(NULL);
} else {
printf("Starting P-Threading Model\n");
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(pthread_run, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(pthread_run, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
lthread_num_schedulers_set(num_sched);
/* launch all threads */
- rte_eal_mp_remote_launch(lthread_scheduler, (void *)NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(lthread_scheduler, (void *)NULL, CALL_MAIN);
/* wait for threads to stop */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_wait_lcore(lcore_id);
}
return 0;
rte_eal_mp_remote_launch(
thread_main,
NULL,
- SKIP_MASTER);
+ SKIP_MAIN);
/* Script */
if (app.script_name)
{
uint32_t i;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
char name[NAME_MAX];
struct rte_ring *msgq_req, *msgq_rsp;
struct thread *t = &thread[i];
}
/*
- * Parse the PTP FOLLOWUP message and send DELAY_REQ to the master clock.
+ * Parse the PTP FOLLOWUP message and send DELAY_REQ to the main clock.
*/
static void
parse_fup(struct ptpv2_data_slave_ordinary *ptp_data)
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
- /* Call lcore_main on the master core only. */
+ /* Call lcore_main on the main core only. */
lcore_main();
return 0;
rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
/* Launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
#define MAX_OPT_VALUES 8
#define SYS_CPU_DIR "/sys/devices/system/cpu/cpu%u/topology/"
-static uint32_t app_master_core = 1;
+static uint32_t app_main_core = 1;
static uint32_t app_numa_mask;
static uint64_t app_used_core_mask = 0;
static uint64_t app_used_port_mask = 0;
" \n"
"Application optional parameters: \n"
" --i : run in interactive mode (default value is %u) \n"
- " --mst I : master core index (default value is %u) \n"
+ " --mnc I : main core index (default value is %u) \n"
" --rsz \"A, B, C\" : Ring sizes \n"
" A = Size (in number of buffer descriptors) of each of the NIC RX \n"
" rings read by the I/O RX lcores (default value is %u) \n"
static void
app_usage(const char *prgname)
{
- printf(usage, prgname, APP_INTERACTIVE_DEFAULT, app_master_core,
+ printf(usage, prgname, APP_INTERACTIVE_DEFAULT, app_main_core,
APP_RX_DESC_DEFAULT, APP_RING_SIZE, APP_TX_DESC_DEFAULT,
MAX_PKT_RX_BURST, PKT_ENQUEUE, PKT_DEQUEUE,
MAX_PKT_TX_BURST, NB_MBUF,
cm |= (1ULL << i);
}
- cm |= (1ULL << rte_get_master_lcore());
+ cm |= (1ULL << rte_get_main_lcore());
return cm;
}
static struct option lgopts[] = {
{ "pfc", 1, 0, 0 },
- { "mst", 1, 0, 0 },
+ { "mnc", 1, 0, 0 },
{ "rsz", 1, 0, 0 },
{ "bsz", 1, 0, 0 },
{ "msz", 1, 0, 0 },
}
break;
}
- if (str_is(optname, "mst")) {
- app_master_core = (uint32_t)atoi(optarg);
+ if (str_is(optname, "mnc")) {
+ app_main_core = (uint32_t)atoi(optarg);
break;
}
if (str_is(optname, "rsz")) {
}
}
- /* check master core index validity */
- for(i = 0; i <= app_master_core; i++) {
- if (app_used_core_mask & (1u << app_master_core)) {
- RTE_LOG(ERR, APP, "Master core index is not configured properly\n");
+ /* check main core index validity */
+ for (i = 0; i <= app_main_core; i++) {
+ if (app_used_core_mask & (1u << app_main_core)) {
+ RTE_LOG(ERR, APP, "Main core index is not configured properly\n");
app_usage(prgname);
return -1;
}
}
- app_used_core_mask |= 1u << app_master_core;
+ app_used_core_mask |= 1u << app_main_core;
if ((app_used_core_mask != app_eal_core_mask()) ||
- (app_master_core != rte_get_master_lcore())) {
+ (app_main_core != rte_get_main_lcore())) {
RTE_LOG(ERR, APP, "EAL core mask not configured properly, must be %" PRIx64
" instead of %" PRIx64 "\n" , app_used_core_mask, app_eal_core_mask());
return -1;
NULL,
};
-/* prompt function, called from main on MASTER lcore */
+/* prompt function, called from main on MAIN lcore */
void
prompt(void)
{
return -1;
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(app_main_loop, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(app_main_loop, NULL, SKIP_MAIN);
if (interactive) {
sleep(1);
printf("\nWARNING: Too much enabled lcores - "
"App uses only 1 lcore\n");
- /* call lcore_main on master core only */
+ /* call lcore_main on main core only */
lcore_main();
return 0;
}
/*
* This function displays the recorded statistics for each port
* and for each node. It uses ANSI terminal codes to clear
- * screen when called. It is called from a single non-master
+ * screen when called. It is called from a single worker
* thread in the server process, when the process is run with more
* than one lcore enabled.
*/
}
/*
- * The function called from each non-master lcore used by the process.
+ * The function called from each non-main lcore used by the process.
* The test_and_set function is used to randomly pick a single lcore on which
* the code to display the statistics will run. Otherwise, the code just
* repeatedly sleeps.
}
/*
- * Function called by the master lcore of the DPDK process.
+ * Function called by the main lcore of the DPDK process.
*/
static void
do_packet_forwarding(void)
/* clear statistics */
clear_stats();
- /* put all other cores to sleep bar master */
- rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
+ /* put all other cores to sleep except main */
+ rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MAIN);
do_packet_forwarding();
return 0;
if (rte_lcore_count() > 1)
printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
- /* Call lcore_main on the master core only. */
+ /* Call lcore_main on the main core only. */
lcore_main();
return 0;
{
int lcore;
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
lcore_info[lcore].lcore_ll =
malloc(sizeof(struct lcore_ll_info));
if (lcore_info[lcore].lcore_ll == NULL) {
rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
/* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
lcore_info[lcore].lcore_ll->dev_removal_flag =
REQUEST_DEV_REMOVAL;
}
* the device removed from the linked lists and that the devices
* are no longer in use.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
while (lcore_info[lcore].lcore_ll->dev_removal_flag
!= ACK_DEV_REMOVAL)
rte_pause();
vdev->remove = 0;
/* Find a suitable lcore to add the device. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
device_num_min = lcore_info[lcore].lcore_ll->device_num;
core_add = lcore;
}
/* Launch all data cores. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(switch_worker,
mbuf_pool, lcore_id);
}
"failed to start vhost driver.\n");
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
rte_eal_wait_lcore(lcore_id);
return 0;
rte_timer_init(&timer0);
rte_timer_init(&timer1);
- /* load timer0, every second, on master lcore, reloaded automatically */
+ /* load timer0, every second, on main lcore, reloaded automatically */
hz = rte_get_timer_hz();
lcore_id = rte_lcore_id();
rte_timer_reset(&timer0, hz, PERIODICAL, lcore_id, timer0_cb, NULL);
lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
- /* call lcore_mainloop() on every slave lcore */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ /* call lcore_mainloop() on every worker lcore */
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
}
- /* call it on master lcore too */
+ /* call it on main lcore too */
(void) lcore_mainloop(NULL);
return 0;
/* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore)
+ RTE_LCORE_FOREACH_WORKER(lcore)
lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
/*
* we can be sure that they can no longer access the device removed
* from the linked lists and that the devices are no longer in use.
*/
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
rte_pause();
}
vdev->remove = 0;
/* Find a suitable lcore to add the device. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
+ RTE_LCORE_FOREACH_WORKER(lcore) {
if (lcore_info[lcore].device_num < device_num_min) {
device_num_min = lcore_info[lcore].device_num;
core_add = lcore;
}
/* Launch all data cores. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
rte_eal_remote_launch(switch_worker, NULL, lcore_id);
if (client_mode)
}
}
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ RTE_LCORE_FOREACH_WORKER(lcore_id)
rte_eal_wait_lcore(lcore_id);
return 0;
}
/* call lcore_main() on every lcore */
- rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
}
- /* call lcore_main() on every slave lcore */
+ /* call lcore_main() on every worker lcore */
i = 0;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
}
- /* call on master too */
+ /* call on main too */
(void) lcore_main((void*)i);
return 0;
total_size -= default_size;
}
#else
- /* in 32-bit mode, allocate all of the memory only on master
+ /* in 32-bit mode, allocate all of the memory only on main
* lcore socket
*/
total_size = internal_conf->memory;
for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
socket++) {
struct rte_config *cfg = rte_eal_get_configuration();
- unsigned int master_lcore_socket;
+ unsigned int main_lcore_socket;
- master_lcore_socket =
- rte_lcore_to_socket_id(cfg->master_lcore);
+ main_lcore_socket =
+ rte_lcore_to_socket_id(cfg->main_lcore);
- if (master_lcore_socket != socket)
+ if (main_lcore_socket != socket)
continue;
/* Update sizes */
* Wait until a lcore finished its job.
*/
int
-rte_eal_wait_lcore(unsigned slave_id)
+rte_eal_wait_lcore(unsigned worker_id)
{
- if (lcore_config[slave_id].state == WAIT)
+ if (lcore_config[worker_id].state == WAIT)
return 0;
- while (lcore_config[slave_id].state != WAIT &&
- lcore_config[slave_id].state != FINISHED)
+ while (lcore_config[worker_id].state != WAIT &&
+ lcore_config[worker_id].state != FINISHED)
rte_pause();
rte_rmb();
/* we are in finished state, go to wait state */
- lcore_config[slave_id].state = WAIT;
- return lcore_config[slave_id].ret;
+ lcore_config[worker_id].state = WAIT;
+ return lcore_config[worker_id].ret;
}
/*
- * Check that every SLAVE lcores are in WAIT state, then call
- * rte_eal_remote_launch() for all of them. If call_master is true
- * (set to CALL_MASTER), also call the function on the master lcore.
+ * Check that every WORKER lcores are in WAIT state, then call
+ * rte_eal_remote_launch() for all of them. If call_main is true
+ * (set to CALL_MAIN), also call the function on the main lcore.
*/
int
rte_eal_mp_remote_launch(int (*f)(void *), void *arg,
- enum rte_rmt_call_master_t call_master)
+ enum rte_rmt_call_main_t call_main)
{
int lcore_id;
- int master = rte_get_master_lcore();
+ int main_lcore = rte_get_main_lcore();
/* check state of lcores */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (lcore_config[lcore_id].state != WAIT)
return -EBUSY;
}
/* send messages to cores */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_remote_launch(f, arg, lcore_id);
}
- if (call_master == CALL_MASTER) {
- lcore_config[master].ret = f(arg);
- lcore_config[master].state = FINISHED;
+ if (call_main == CALL_MAIN) {
+ lcore_config[main_lcore].ret = f(arg);
+ lcore_config[main_lcore].state = FINISHED;
}
return 0;
}
/*
- * Return the state of the lcore identified by slave_id.
+ * Return the state of the lcore identified by worker_id.
*/
enum rte_lcore_state_t
rte_eal_get_lcore_state(unsigned lcore_id)
{
unsigned lcore_id;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
rte_eal_wait_lcore(lcore_id);
}
}
#include "eal_private.h"
#include "eal_thread.h"
-unsigned int rte_get_master_lcore(void)
+unsigned int rte_get_main_lcore(void)
{
- return rte_eal_get_configuration()->master_lcore;
+ return rte_eal_get_configuration()->main_lcore;
}
unsigned int rte_lcore_count(void)
return cfg->lcore_role[lcore_id] == ROLE_RTE;
}
-unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap)
+unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
{
i++;
if (wrap)
while (i < RTE_MAX_LCORE) {
if (!rte_lcore_is_enabled(i) ||
- (skip_master && (i == rte_get_master_lcore()))) {
+ (skip_main && (i == rte_get_main_lcore()))) {
i++;
if (wrap)
i %= RTE_MAX_LCORE;
{OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM },
{OPT_TRACE_MODE, 1, NULL, OPT_TRACE_MODE_NUM },
{OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM },
+ {OPT_MAIN_LCORE, 1, NULL, OPT_MAIN_LCORE_NUM },
{OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM},
{OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM },
{OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM },
static struct device_option_list devopt_list =
TAILQ_HEAD_INITIALIZER(devopt_list);
-static int master_lcore_parsed;
+static int main_lcore_parsed;
static int mem_parsed;
static int core_parsed;
for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE;
j++, idx++) {
if ((1 << j) & val) {
- /* handle master lcore already parsed */
+ /* handle main lcore already parsed */
uint32_t lcore = idx;
- if (master_lcore_parsed &&
- cfg->master_lcore == lcore) {
+ if (main_lcore_parsed &&
+ cfg->main_lcore == lcore) {
RTE_LOG(ERR, EAL,
- "lcore %u is master lcore, cannot use as service core\n",
+ "lcore %u is main lcore, cannot use as service core\n",
idx);
return -1;
}
min = idx;
for (idx = min; idx <= max; idx++) {
if (cfg->lcore_role[idx] != ROLE_SERVICE) {
- /* handle master lcore already parsed */
+ /* handle main lcore already parsed */
uint32_t lcore = idx;
- if (cfg->master_lcore == lcore &&
- master_lcore_parsed) {
+ if (cfg->main_lcore == lcore &&
+ main_lcore_parsed) {
RTE_LOG(ERR, EAL,
- "Error: lcore %u is master lcore, cannot use as service core\n",
+ "Error: lcore %u is main lcore, cannot use as service core\n",
idx);
return -1;
}
return 0;
}
-/* Changes the lcore id of the master thread */
+/* Changes the lcore id of the main thread */
static int
-eal_parse_master_lcore(const char *arg)
+eal_parse_main_lcore(const char *arg)
{
char *parsing_end;
struct rte_config *cfg = rte_eal_get_configuration();
errno = 0;
- cfg->master_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
+ cfg->main_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
if (errno || parsing_end[0] != 0)
return -1;
- if (cfg->master_lcore >= RTE_MAX_LCORE)
+ if (cfg->main_lcore >= RTE_MAX_LCORE)
return -1;
- master_lcore_parsed = 1;
+ main_lcore_parsed = 1;
- /* ensure master core is not used as service core */
- if (lcore_config[cfg->master_lcore].core_role == ROLE_SERVICE) {
+ /* ensure main core is not used as service core */
+ if (lcore_config[cfg->main_lcore].core_role == ROLE_SERVICE) {
RTE_LOG(ERR, EAL,
- "Error: Master lcore is used as a service core\n");
+ "Error: Main lcore is used as a service core\n");
return -1;
}
break;
case OPT_MASTER_LCORE_NUM:
- if (eal_parse_master_lcore(optarg) < 0) {
+ fprintf(stderr,
+ "Option --" OPT_MASTER_LCORE
+ " is deprecated use " OPT_MAIN_LCORE "\n");
+ /* fallthrough */
+ case OPT_MAIN_LCORE_NUM:
+ if (eal_parse_main_lcore(optarg) < 0) {
RTE_LOG(ERR, EAL, "invalid parameter for --"
- OPT_MASTER_LCORE "\n");
+ OPT_MAIN_LCORE "\n");
return -1;
}
break;
RTE_CPU_AND(cpuset, cpuset, &default_set);
- /* if no remaining cpu, use master lcore cpu affinity */
+ /* if no remaining cpu, use main lcore cpu affinity */
if (!CPU_COUNT(cpuset)) {
- memcpy(cpuset, &lcore_config[rte_get_master_lcore()].cpuset,
+ memcpy(cpuset, &lcore_config[rte_get_main_lcore()].cpuset,
sizeof(*cpuset));
}
}
if (internal_conf->process_type == RTE_PROC_AUTO)
internal_conf->process_type = eal_proc_type_detect();
- /* default master lcore is the first one */
- if (!master_lcore_parsed) {
- cfg->master_lcore = rte_get_next_lcore(-1, 0, 0);
- if (cfg->master_lcore >= RTE_MAX_LCORE)
+ /* default main lcore is the first one */
+ if (!main_lcore_parsed) {
+ cfg->main_lcore = rte_get_next_lcore(-1, 0, 0);
+ if (cfg->main_lcore >= RTE_MAX_LCORE)
return -1;
- lcore_config[cfg->master_lcore].core_role = ROLE_RTE;
+ lcore_config[cfg->main_lcore].core_role = ROLE_RTE;
}
compute_ctrl_threads_cpuset(internal_cfg);
const struct internal_config *internal_conf =
eal_get_internal_configuration();
- if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
- RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
+ if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
+ RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n");
return -1;
}
" '( )' can be omitted for single element group,\n"
" '@' can be omitted if cpus and lcores have the same value\n"
" -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n"
- " --"OPT_MASTER_LCORE" ID Core ID that is used as master\n"
+ " --"OPT_MAIN_LCORE" ID Core ID that is used as main\n"
" --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n"
" -n CHANNELS Number of memory channels\n"
" -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
OPT_TRACE_BUF_SIZE_NUM,
#define OPT_TRACE_MODE "trace-mode"
OPT_TRACE_MODE_NUM,
+#define OPT_MAIN_LCORE "main-lcore"
+ OPT_MAIN_LCORE_NUM,
#define OPT_MASTER_LCORE "master-lcore"
OPT_MASTER_LCORE_NUM,
#define OPT_MBUF_POOL_OPS_NAME "mbuf-pool-ops-name"
*/
struct lcore_config {
pthread_t thread_id; /**< pthread identifier */
- int pipe_master2slave[2]; /**< communication pipe with master */
- int pipe_slave2master[2]; /**< communication pipe with master */
+ int pipe_main2worker[2]; /**< communication pipe with main */
+ int pipe_worker2main[2]; /**< communication pipe with main */
lcore_function_t * volatile f; /**< function to call */
void * volatile arg; /**< argument of function */
* The global RTE configuration structure.
*/
struct rte_config {
- uint32_t master_lcore; /**< Id of the master lcore */
+ uint32_t main_lcore; /**< Id of the main lcore */
uint32_t lcore_count; /**< Number of available logical cores. */
uint32_t numa_node_count; /**< Number of detected NUMA nodes. */
uint32_t numa_nodes[RTE_MAX_NUMA_NODES]; /**< List of detected NUMA nodes. */
lcore_id = rte_lcore_id();
if (unlikely(lcore_id == LCORE_ID_ANY))
- lcore_id = rte_get_master_lcore();
+ lcore_id = rte_get_main_lcore();
return &rand_states[lcore_id];
}
struct rte_config *cfg = rte_eal_get_configuration();
for (i = 0; i < RTE_MAX_LCORE; i++) {
if (lcore_config[i].core_role == ROLE_SERVICE) {
- if ((unsigned int)i == cfg->master_lcore)
+ if ((unsigned int)i == cfg->main_lcore)
continue;
rte_service_lcore_add(i);
count++;
int socket_id;
const struct rte_config *config = rte_eal_get_configuration();
- socket_id = rte_lcore_to_socket_id(config->master_lcore);
+ socket_id = rte_lcore_to_socket_id(config->main_lcore);
if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
- RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
+ RTE_LOG(WARNING, EAL, "WARNING: Main core has no memory on local socket!\n");
}
eal_check_mem_on_local_socket();
if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
- &lcore_config[config->master_lcore].cpuset) != 0) {
+ &lcore_config[config->main_lcore].cpuset) != 0) {
rte_eal_init_alert("Cannot set affinity");
rte_errno = EINVAL;
return -1;
}
- __rte_thread_init(config->master_lcore,
- &lcore_config[config->master_lcore].cpuset);
+ __rte_thread_init(config->main_lcore,
+ &lcore_config[config->main_lcore].cpuset);
ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
- RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
- config->master_lcore, thread_id, cpuset,
+ RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
+ config->main_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
/*
- * create communication pipes between master thread
+ * create communication pipes between main thread
* and children
*/
- if (pipe(lcore_config[i].pipe_master2slave) < 0)
+ if (pipe(lcore_config[i].pipe_main2worker) < 0)
rte_panic("Cannot create pipe\n");
- if (pipe(lcore_config[i].pipe_slave2master) < 0)
+ if (pipe(lcore_config[i].pipe_worker2main) < 0)
rte_panic("Cannot create pipe\n");
lcore_config[i].state = WAIT;
/* Set thread_name for aid in debugging. */
snprintf(thread_name, sizeof(thread_name),
- "lcore-slave-%d", i);
+ "lcore-worker-%d", i);
rte_thread_setname(lcore_config[i].thread_id, thread_name);
ret = pthread_setaffinity_np(lcore_config[i].thread_id,
}
/*
- * Launch a dummy function on all slave lcores, so that master lcore
+ * Launch a dummy function on all worker lcores, so that main lcore
* knows they are all ready when this function returns.
*/
- rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
rte_eal_mp_wait_lcore();
/* initialize services so vdevs register service during bus_probe. */
#include "eal_thread.h"
/*
- * Send a message to a slave lcore identified by slave_id to call a
+ * Send a message to a worker lcore identified by worker_id to call a
* function f with argument arg. Once the execution is done, the
* remote lcore switch in FINISHED state.
*/
int
-rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned worker_id)
{
int n;
char c = 0;
- int m2s = lcore_config[slave_id].pipe_master2slave[1];
- int s2m = lcore_config[slave_id].pipe_slave2master[0];
+ int m2w = lcore_config[worker_id].pipe_main2worker[1];
+ int w2m = lcore_config[worker_id].pipe_worker2main[0];
int rc = -EBUSY;
- if (lcore_config[slave_id].state != WAIT)
+ if (lcore_config[worker_id].state != WAIT)
goto finish;
- lcore_config[slave_id].f = f;
- lcore_config[slave_id].arg = arg;
+ lcore_config[worker_id].f = f;
+ lcore_config[worker_id].arg = arg;
/* send message */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = write(m2s, &c, 1);
+ n = write(m2w, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/* wait ack */
do {
- n = read(s2m, &c, 1);
+ n = read(w2m, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
rc = 0;
finish:
- rte_eal_trace_thread_remote_launch(f, arg, slave_id, rc);
+ rte_eal_trace_thread_remote_launch(f, arg, worker_id, rc);
return rc;
}
int n, ret;
unsigned lcore_id;
pthread_t thread_id;
- int m2s, s2m;
+ int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
thread_id = pthread_self();
/* retrieve our lcore_id from the configuration structure */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (thread_id == lcore_config[lcore_id].thread_id)
break;
}
if (lcore_id == RTE_MAX_LCORE)
rte_panic("cannot retrieve lcore id\n");
- m2s = lcore_config[lcore_id].pipe_master2slave[0];
- s2m = lcore_config[lcore_id].pipe_slave2master[1];
+ m2w = lcore_config[lcore_id].pipe_main2worker[0];
+ w2m = lcore_config[lcore_id].pipe_worker2main[1];
__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
/* wait command */
do {
- n = read(m2s, &c, 1);
+ n = read(m2w, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
/* send ack */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = write(s2m, &c, 1);
+ n = write(w2m, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/**
* Initialize the Environment Abstraction Layer (EAL).
*
- * This function is to be executed on the MASTER lcore only, as soon
+ * This function is to be executed on the MAIN lcore only, as soon
* as possible in the application's main() function.
*
* The function finishes the initialization process before main() is called.
- * It puts the SLAVE lcores in the WAIT state.
+ * It puts the WORKER lcores in the WAIT state.
*
* When the multi-partition feature is supported, depending on the
* configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this
RTE_TRACE_POINT(
rte_eal_trace_thread_remote_launch,
RTE_TRACE_POINT_ARGS(int (*f)(void *), void *arg,
- unsigned int slave_id, int rc),
+ unsigned int worker_id, int rc),
rte_trace_point_emit_ptr(f);
rte_trace_point_emit_ptr(arg);
- rte_trace_point_emit_u32(slave_id);
+ rte_trace_point_emit_u32(worker_id);
rte_trace_point_emit_int(rc);
)
RTE_TRACE_POINT(
/**
* Launch a function on another lcore.
*
- * To be executed on the MASTER lcore only.
+ * To be executed on the MAIN lcore only.
*
- * Sends a message to a slave lcore (identified by the slave_id) that
+ * Sends a message to a worker lcore (identified by the worker_id) that
* is in the WAIT state (this is true after the first call to
* rte_eal_init()). This can be checked by first calling
- * rte_eal_wait_lcore(slave_id).
+ * rte_eal_wait_lcore(worker_id).
*
* When the remote lcore receives the message, it switches to
* the RUNNING state, then calls the function f with argument arg. Once the
* the return value of f is stored in a local variable to be read using
* rte_eal_wait_lcore().
*
- * The MASTER lcore returns as soon as the message is sent and knows
+ * The MAIN lcore returns as soon as the message is sent and knows
* nothing about the completion of f.
*
* Note: This function is not designed to offer optimum
* The function to be called.
* @param arg
* The argument for the function.
- * @param slave_id
+ * @param worker_id
* The identifier of the lcore on which the function should be executed.
* @return
* - 0: Success. Execution of function f started on the remote lcore.
* - (-EBUSY): The remote lcore is not in a WAIT state.
*/
-int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id);
+int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned worker_id);
/**
- * This enum indicates whether the master core must execute the handler
+ * This enum indicates whether the main core must execute the handler
* launched on all logical cores.
*/
-enum rte_rmt_call_master_t {
- SKIP_MASTER = 0, /**< lcore handler not executed by master core. */
- CALL_MASTER, /**< lcore handler executed by master core. */
+enum rte_rmt_call_main_t {
+ SKIP_MAIN = 0, /**< lcore handler not executed by main core. */
+ CALL_MAIN, /**< lcore handler executed by main core. */
};
+/* These legacy definitions will be removed in future release */
+#define SKIP_MASTER RTE_DEPRECATED(SKIP_MASTER) SKIP_MAIN
+#define CALL_MASTER RTE_DEPRECATED(CALL_MASTER) CALL_MAIN
+
/**
* Launch a function on all lcores.
*
- * Check that each SLAVE lcore is in a WAIT state, then call
+ * Check that each WORKER lcore is in a WAIT state, then call
* rte_eal_remote_launch() for each lcore.
*
* @param f
* The function to be called.
* @param arg
* The argument for the function.
- * @param call_master
- * If call_master set to SKIP_MASTER, the MASTER lcore does not call
- * the function. If call_master is set to CALL_MASTER, the function
- * is also called on master before returning. In any case, the master
+ * @param call_main
+ * If call_main set to SKIP_MAIN, the MAIN lcore does not call
+ * the function. If call_main is set to CALL_MAIN, the function
+ * is also called on main before returning. In any case, the main
* lcore returns as soon as it finished its job and knows nothing
* about the completion of f on the other lcores.
* @return
* case, no message is sent to any of the lcores.
*/
int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg,
- enum rte_rmt_call_master_t call_master);
+ enum rte_rmt_call_main_t call_main);
/**
- * Get the state of the lcore identified by slave_id.
+ * Get the state of the lcore identified by worker_id.
*
- * To be executed on the MASTER lcore only.
+ * To be executed on the MAIN lcore only.
*
- * @param slave_id
+ * @param worker_id
* The identifier of the lcore.
* @return
* The state of the lcore.
*/
-enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id);
+enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned int worker_id);
/**
* Wait until an lcore finishes its job.
*
- * To be executed on the MASTER lcore only.
+ * To be executed on the MAIN lcore only.
*
- * If the slave lcore identified by the slave_id is in a FINISHED state,
+ * If the worker lcore identified by the worker_id is in a FINISHED state,
* switch to the WAIT state. If the lcore is in RUNNING state, wait until
* the lcore finishes its job and moves to the FINISHED state.
*
- * @param slave_id
+ * @param worker_id
* The identifier of the lcore.
* @return
- * - 0: If the lcore identified by the slave_id is in a WAIT state.
+ * - 0: If the lcore identified by the worker_id is in a WAIT state.
* - The value that was returned by the previous remote launch
- * function call if the lcore identified by the slave_id was in a
+ * function call if the lcore identified by the worker_id was in a
* FINISHED or RUNNING state. In this case, it changes the state
* of the lcore to WAIT.
*/
-int rte_eal_wait_lcore(unsigned slave_id);
+int rte_eal_wait_lcore(unsigned worker_id);
/**
* Wait until all lcores finish their jobs.
*
- * To be executed on the MASTER lcore only. Issue an
+ * To be executed on the MAIN lcore only. Issue an
* rte_eal_wait_lcore() for every lcore. The return values are
* ignored.
*
* After a call to rte_eal_mp_wait_lcore(), the caller can assume
- * that all slave lcores are in a WAIT state.
+ * that all worker lcores are in a WAIT state.
*/
void rte_eal_mp_wait_lcore(void);
}
/**
- * Get the id of the master lcore
+ * Get the id of the main lcore
*
* @return
- * the id of the master lcore
+ * the id of the main lcore
*/
-unsigned int rte_get_master_lcore(void);
+unsigned int rte_get_main_lcore(void);
+
+/**
+ * Deprecated function the id of the main lcore
+ *
+ * @return
+ * the id of the main lcore
+ */
+__rte_deprecated
+static inline unsigned int rte_get_master_lcore(void)
+{
+ return rte_get_main_lcore();
+}
/**
* Return the number of execution units (lcores) on the system.
*
* @param i
* The current lcore (reference).
- * @param skip_master
- * If true, do not return the ID of the master lcore.
+ * @param skip_main
+ * If true, do not return the ID of the main lcore.
* @param wrap
* If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise,
* return RTE_MAX_LCORE.
* @return
* The next lcore_id or RTE_MAX_LCORE if not found.
*/
-unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap);
+unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap);
/**
* Macro to browse all running lcores.
*/
#define RTE_LCORE_FOREACH(i) \
for (i = rte_get_next_lcore(-1, 0, 0); \
- i<RTE_MAX_LCORE; \
+ i < RTE_MAX_LCORE; \
i = rte_get_next_lcore(i, 0, 0))
/**
- * Macro to browse all running lcores except the master lcore.
+ * Macro to browse all running lcores except the main lcore.
*/
-#define RTE_LCORE_FOREACH_SLAVE(i) \
+#define RTE_LCORE_FOREACH_WORKER(i) \
for (i = rte_get_next_lcore(-1, 1, 0); \
- i<RTE_MAX_LCORE; \
+ i < RTE_MAX_LCORE; \
i = rte_get_next_lcore(i, 1, 0))
+#define RTE_LCORE_FOREACH_SLAVE(l) \
+ RTE_DEPRECATED(RTE_LCORE_FOREACH_SLAVE) RTE_LCORE_FOREACH_WORKER(l)
+
/**
* Callback prototype for initializing lcores.
*
int socket_id;
const struct rte_config *config = rte_eal_get_configuration();
- socket_id = rte_lcore_to_socket_id(config->master_lcore);
+ socket_id = rte_lcore_to_socket_id(config->main_lcore);
if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
- RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
+ RTE_LOG(WARNING, EAL, "WARNING: Main core has no memory on local socket!\n");
}
static int
eal_check_mem_on_local_socket();
if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
- &lcore_config[config->master_lcore].cpuset) != 0) {
+ &lcore_config[config->main_lcore].cpuset) != 0) {
rte_eal_init_alert("Cannot set affinity");
rte_errno = EINVAL;
return -1;
}
- __rte_thread_init(config->master_lcore,
- &lcore_config[config->master_lcore].cpuset);
+ __rte_thread_init(config->main_lcore,
+ &lcore_config[config->main_lcore].cpuset);
ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
- RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
- config->master_lcore, (uintptr_t)thread_id, cpuset,
+ RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
+ config->main_lcore, (uintptr_t)thread_id, cpuset,
ret == 0 ? "" : "...");
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
/*
- * create communication pipes between master thread
+ * create communication pipes between main thread
* and children
*/
- if (pipe(lcore_config[i].pipe_master2slave) < 0)
+ if (pipe(lcore_config[i].pipe_main2worker) < 0)
rte_panic("Cannot create pipe\n");
- if (pipe(lcore_config[i].pipe_slave2master) < 0)
+ if (pipe(lcore_config[i].pipe_worker2main) < 0)
rte_panic("Cannot create pipe\n");
lcore_config[i].state = WAIT;
/* Set thread_name for aid in debugging. */
snprintf(thread_name, sizeof(thread_name),
- "lcore-slave-%d", i);
+ "lcore-worker-%d", i);
ret = rte_thread_setname(lcore_config[i].thread_id,
thread_name);
if (ret != 0)
}
/*
- * Launch a dummy function on all slave lcores, so that master lcore
+ * Launch a dummy function on all worker lcores, so that main lcore
* knows they are all ready when this function returns.
*/
- rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
rte_eal_mp_wait_lcore();
/* initialize services so vdevs register service during bus_probe. */
/* the allocation logic is a little bit convoluted, but here's how it
* works, in a nutshell:
* - if user hasn't specified on which sockets to allocate memory via
- * --socket-mem, we allocate all of our memory on master core socket.
+ * --socket-mem, we allocate all of our memory on main core socket.
* - if user has specified sockets to allocate memory on, there may be
* some "unused" memory left (e.g. if user has specified --socket-mem
* such that not all memory adds up to 2 gigabytes), so add it to all
for (i = 0; i < rte_socket_count(); i++) {
int hp_sizes = (int) internal_conf->num_hugepage_sizes;
uint64_t max_socket_mem, cur_socket_mem;
- unsigned int master_lcore_socket;
+ unsigned int main_lcore_socket;
struct rte_config *cfg = rte_eal_get_configuration();
bool skip;
skip = active_sockets != 0 &&
internal_conf->socket_mem[socket_id] == 0;
/* ...or if we didn't specifically request memory on *any*
- * socket, and this is not master lcore
+ * socket, and this is not main lcore
*/
- master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
- skip |= active_sockets == 0 && socket_id != master_lcore_socket;
+ main_lcore_socket = rte_lcore_to_socket_id(cfg->main_lcore);
+ skip |= active_sockets == 0 && socket_id != main_lcore_socket;
if (skip) {
RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
#include "eal_thread.h"
/*
- * Send a message to a slave lcore identified by slave_id to call a
+ * Send a message to a worker lcore identified by worker_id to call a
* function f with argument arg. Once the execution is done, the
* remote lcore switch in FINISHED state.
*/
int
-rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned int worker_id)
{
int n;
char c = 0;
- int m2s = lcore_config[slave_id].pipe_master2slave[1];
- int s2m = lcore_config[slave_id].pipe_slave2master[0];
+ int m2w = lcore_config[worker_id].pipe_main2worker[1];
+ int w2m = lcore_config[worker_id].pipe_worker2main[0];
int rc = -EBUSY;
- if (lcore_config[slave_id].state != WAIT)
+ if (lcore_config[worker_id].state != WAIT)
goto finish;
- lcore_config[slave_id].f = f;
- lcore_config[slave_id].arg = arg;
+ lcore_config[worker_id].f = f;
+ lcore_config[worker_id].arg = arg;
/* send message */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = write(m2s, &c, 1);
+ n = write(m2w, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/* wait ack */
do {
- n = read(s2m, &c, 1);
+ n = read(w2m, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
rc = 0;
finish:
- rte_eal_trace_thread_remote_launch(f, arg, slave_id, rc);
+ rte_eal_trace_thread_remote_launch(f, arg, worker_id, rc);
return rc;
}
int n, ret;
unsigned lcore_id;
pthread_t thread_id;
- int m2s, s2m;
+ int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
thread_id = pthread_self();
/* retrieve our lcore_id from the configuration structure */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (thread_id == lcore_config[lcore_id].thread_id)
break;
}
if (lcore_id == RTE_MAX_LCORE)
rte_panic("cannot retrieve lcore id\n");
- m2s = lcore_config[lcore_id].pipe_master2slave[0];
- s2m = lcore_config[lcore_id].pipe_slave2master[1];
+ m2w = lcore_config[lcore_id].pipe_main2worker[0];
+ w2m = lcore_config[lcore_id].pipe_worker2main[1];
__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
/* wait command */
do {
- n = read(m2s, &c, 1);
+ n = read(m2w, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
/* send ack */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = write(s2m, &c, 1);
+ n = write(w2m, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
rte_eal_wait_lcore
rte_exit
rte_free
- rte_get_master_lcore
+ rte_get_main_lcore
rte_get_next_lcore
rte_get_tsc_hz
rte_hexdump
rte_free;
rte_get_hpet_cycles;
rte_get_hpet_hz;
- rte_get_master_lcore;
+ rte_get_main_lcore;
rte_get_next_lcore;
rte_get_tsc_hz;
rte_hexdump;
return -1;
}
- __rte_thread_init(config->master_lcore,
- &lcore_config[config->master_lcore].cpuset);
+ __rte_thread_init(config->main_lcore,
+ &lcore_config[config->main_lcore].cpuset);
bscan = rte_bus_scan();
if (bscan < 0) {
return -1;
}
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
/*
- * create communication pipes between master thread
+ * create communication pipes between main thread
* and children
*/
- if (_pipe(lcore_config[i].pipe_master2slave,
+ if (_pipe(lcore_config[i].pipe_main2worker,
sizeof(char), _O_BINARY) < 0)
rte_panic("Cannot create pipe\n");
- if (_pipe(lcore_config[i].pipe_slave2master,
+ if (_pipe(lcore_config[i].pipe_worker2main,
sizeof(char), _O_BINARY) < 0)
rte_panic("Cannot create pipe\n");
}
/*
- * Launch a dummy function on all slave lcores, so that master lcore
+ * Launch a dummy function on all worker lcores, so that main lcore
* knows they are all ready when this function returns.
*/
- rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+ rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
rte_eal_mp_wait_lcore();
return fctret;
}
#include "eal_windows.h"
/*
- * Send a message to a slave lcore identified by slave_id to call a
+ * Send a message to a worker lcore identified by worker_id to call a
* function f with argument arg. Once the execution is done, the
* remote lcore switch in FINISHED state.
*/
int
-rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned int slave_id)
+rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned int worker_id)
{
int n;
char c = 0;
- int m2s = lcore_config[slave_id].pipe_master2slave[1];
- int s2m = lcore_config[slave_id].pipe_slave2master[0];
+ int m2w = lcore_config[worker_id].pipe_main2worker[1];
+ int w2m = lcore_config[worker_id].pipe_worker2main[0];
- if (lcore_config[slave_id].state != WAIT)
+ if (lcore_config[worker_id].state != WAIT)
return -EBUSY;
- lcore_config[slave_id].f = f;
- lcore_config[slave_id].arg = arg;
+ lcore_config[worker_id].f = f;
+ lcore_config[worker_id].arg = arg;
/* send message */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = _write(m2s, &c, 1);
+ n = _write(m2w, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/* wait ack */
do {
- n = _read(s2m, &c, 1);
+ n = _read(w2m, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
int n, ret;
unsigned int lcore_id;
pthread_t thread_id;
- int m2s, s2m;
+ int m2w, w2m;
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
thread_id = pthread_self();
/* retrieve our lcore_id from the configuration structure */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (thread_id == lcore_config[lcore_id].thread_id)
break;
}
if (lcore_id == RTE_MAX_LCORE)
rte_panic("cannot retrieve lcore id\n");
- m2s = lcore_config[lcore_id].pipe_master2slave[0];
- s2m = lcore_config[lcore_id].pipe_slave2master[1];
+ m2w = lcore_config[lcore_id].pipe_main2worker[0];
+ w2m = lcore_config[lcore_id].pipe_worker2main[1];
__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
/* wait command */
do {
- n = _read(m2s, &c, 1);
+ n = _read(m2w, &c, 1);
} while (n < 0 && errno == EINTR);
if (n <= 0)
/* send ack */
n = 0;
while (n == 0 || (n < 0 && errno == EINTR))
- n = _write(s2m, &c, 1);
+ n = _write(w2m, &c, 1);
if (n < 0)
rte_panic("cannot write on configuration pipe\n");
/**
* Initialize and preallocate KNI subsystem
*
- * This function is to be executed on the MASTER lcore only, after EAL
+ * This function is to be executed on the main lcore only, after EAL
* initialization and before any KNI interface is attempted to be
* allocated
*
if (get_freq_index(LOW) > total_avail_freqs[i])
return -1;
- if (rte_get_master_lcore() != i) {
+ if (rte_get_main_lcore() != i) {
w->wrk_stats[i].lcore_id = i;
set_policy(&w->wrk_stats[i], policy);
}