struct thread_params *p)
{
int ret;
- const unsigned int iter_shift = 23;
+ const unsigned int iter_shift = 15;
const unsigned int iterations = 1 << iter_shift;
struct rte_ring *r = p->r;
unsigned int bsize = p->size;
f2 = dequeue_bulk_16B;
}
- for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
+ for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
lcore_count = 0;
param1.size = param2.size = bulk_sizes[i];
param1.r = param2.r = r;
- if (cores->c1 == rte_get_master_lcore()) {
+ if (cores->c1 == rte_get_main_lcore()) {
rte_eal_remote_launch(f2, ¶m2, cores->c2);
f1(¶m1);
rte_eal_wait_lcore(cores->c2);
if (burst == NULL)
return -1;
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
while (rte_atomic32_read(&synchro) == 0)
rte_pause();
static int
run_on_all_cores(struct rte_ring *r, const int esize)
{
- uint64_t total = 0;
+ uint64_t total;
struct thread_params param;
lcore_function_t *lcore_f;
unsigned int i, c;
memset(¶m, 0, sizeof(struct thread_params));
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
+ total = 0;
printf("\nBulk enq/dequeue count on size %u\n", bulk_sizes[i]);
param.size = bulk_sizes[i];
param.r = r;
- /* clear synchro and start slaves */
+ /* clear synchro and start workers */
rte_atomic32_set(&synchro, 0);
- if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MASTER) < 0)
+ if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
- /* start synchro and launch test on master */
+ /* start synchro and launch test on main */
rte_atomic32_set(&synchro, 1);
lcore_f(¶m);
goto test_fail;
}
- printf("\n### Testing using all slave nodes ###\n");
+ printf("\n### Testing using all worker nodes ###\n");
if (run_on_all_cores(r, esize) < 0)
goto test_fail;