summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
bf7a24b)
This patch rectifies slave_id to point to valid core indexes rather than
core ranks in read-write lock-free concurrency test.
It also replaces a 'for' loop with RTE_LCORE_FOREACH API.
Fixes:
c7eb0972e74b ("test/hash: add lock-free r/w concurrency")
Cc: stable@dpdk.org
Signed-off-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
uint32_t i = 0;
uint16_t core_id;
uint32_t max_cores = rte_lcore_count();
uint32_t i = 0;
uint16_t core_id;
uint32_t max_cores = rte_lcore_count();
- for (core_id = 0; core_id < RTE_MAX_LCORE && i < max_cores; core_id++) {
- if (rte_lcore_is_enabled(core_id)) {
- enabled_core_ids[i] = core_id;
- i++;
- }
+ RTE_LCORE_FOREACH(core_id) {
+ enabled_core_ids[i] = core_id;
+ i++;
enabled_core_ids[i]);
for (i = 1; i <= rwc_core_cnt[n]; i++)
enabled_core_ids[i]);
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
if (ret < 0)
goto err;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
for (i = rwc_core_cnt[n] + 1;
i <= rwc_core_cnt[m] + rwc_core_cnt[n];
i++)
for (i = rwc_core_cnt[n] + 1;
i <= rwc_core_cnt[m] + rwc_core_cnt[n];
i++)
+ rte_eal_wait_lcore(enabled_core_ids[i]);
writer_done = 1;
for (i = 1; i <= rwc_core_cnt[n]; i++)
writer_done = 1;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =
writer_done = 1;
for (i = 1; i <= rwc_core_cnt[n]; i++)
writer_done = 1;
for (i = 1; i <= rwc_core_cnt[n]; i++)
- if (rte_eal_wait_lcore(i) < 0)
+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
goto err;
unsigned long long cycles_per_lookup =
goto err;
unsigned long long cycles_per_lookup =