/* Make sure that this has the same value as __RTE_QSBR_CNT_INIT */
#define TEST_RCU_QSBR_CNT_INIT 1
-#define TEST_RCU_MAX_LCORE 128
-uint16_t enabled_core_ids[TEST_RCU_MAX_LCORE];
+uint16_t enabled_core_ids[RTE_MAX_LCORE];
uint8_t num_cores;
static uint32_t *keys;
#define TOTAL_ENTRY (1024 * 8)
#define COUNTER_VALUE 4096
-static uint32_t *hash_data[TEST_RCU_MAX_LCORE][TOTAL_ENTRY];
+static uint32_t *hash_data[RTE_MAX_LCORE][TOTAL_ENTRY];
static uint8_t writer_done;
-static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];
-struct rte_hash *h[TEST_RCU_MAX_LCORE];
-char hash_name[TEST_RCU_MAX_LCORE][8];
+static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
+struct rte_hash *h[RTE_MAX_LCORE];
+char hash_name[RTE_MAX_LCORE][8];
struct test_rcu_thread_info {
/* Index in RCU array */
/* lcore IDs registered on the RCU variable */
uint16_t r_core_ids[2];
};
-struct test_rcu_thread_info thread_info[TEST_RCU_MAX_LCORE/4];
-
-static inline int
-get_enabled_cores_mask(void)
-{
- uint16_t core_id;
- uint32_t max_cores = rte_lcore_count();
-
- if (max_cores > TEST_RCU_MAX_LCORE) {
- printf("Number of cores exceed %d\n", TEST_RCU_MAX_LCORE);
- return -1;
- }
-
- core_id = 0;
- num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
- enabled_core_ids[num_cores] = core_id;
- num_cores++;
- }
-
- return 0;
-}
+struct test_rcu_thread_info thread_info[RTE_MAX_LCORE/4];
static int
alloc_rcu(void)
int i;
uint32_t sz;
- sz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
t[i] = (struct rte_rcu_qsbr *)rte_zmalloc(NULL, sz,
RTE_CACHE_LINE_SIZE);
{
int i;
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_free(t[i]);
return 0;
sz = rte_rcu_qsbr_get_memsize(0);
TEST_RCU_QSBR_RETURN_IF_ERROR((sz != 1), "Get Memsize for 0 threads");
- sz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
/* For 128 threads,
* for machines with cache line size of 64B - 8384
* for machines with cache line size of 128 - 16768
printf("\nTest rte_rcu_qsbr_init()\n");
- r = rte_rcu_qsbr_init(NULL, TEST_RCU_MAX_LCORE);
+ r = rte_rcu_qsbr_init(NULL, RTE_MAX_LCORE);
TEST_RCU_QSBR_RETURN_IF_ERROR((r != 1), "NULL variable");
return 0;
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
"NULL variable, invalid thread id");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
/* Register valid thread id */
ret = rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
"Already registered thread id");
/* Register valid thread id - max allowed thread id */
- ret = rte_rcu_qsbr_thread_register(t[0], TEST_RCU_MAX_LCORE - 1);
+ ret = rte_rcu_qsbr_thread_register(t[0], RTE_MAX_LCORE - 1);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), "Max thread id");
ret = rte_rcu_qsbr_thread_register(t[0], 100000);
static int
test_rcu_qsbr_thread_unregister(void)
{
- int i, j, ret;
+ unsigned int num_threads[3] = {1, RTE_MAX_LCORE, 1};
+ unsigned int i, j;
uint64_t token;
- uint8_t num_threads[3] = {1, TEST_RCU_MAX_LCORE, 1};
+ int ret;
printf("\nTest rte_rcu_qsbr_thread_unregister()\n");
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
"NULL variable, invalid thread id");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
"NULL variable, invalid thread id");
/* Find first disabled core */
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++) {
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
if (enabled_core_ids[i] == 0)
break;
}
/*
* Test with different thread_ids:
* 1 - thread_id = 0
- * 2 - All possible thread_ids, from 0 to TEST_RCU_MAX_LCORE
- * 3 - thread_id = TEST_RCU_MAX_LCORE - 1
+ * 2 - All possible thread_ids, from 0 to RTE_MAX_LCORE
+ * 3 - thread_id = RTE_MAX_LCORE - 1
*/
for (j = 0; j < 3; j++) {
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
for (i = 0; i < num_threads[j]; i++)
rte_rcu_qsbr_thread_register(t[0],
- (j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);
+ (j == 2) ? (RTE_MAX_LCORE - 1) : i);
token = rte_rcu_qsbr_start(t[0]);
TEST_RCU_QSBR_RETURN_IF_ERROR(
/* Update quiescent state counter */
for (i = 0; i < num_threads[j]; i++) {
/* Skip one update */
- if (i == (TEST_RCU_MAX_LCORE - 10))
+ if (i == (RTE_MAX_LCORE - 10))
continue;
rte_rcu_qsbr_quiescent(t[0],
- (j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);
+ (j == 2) ? (RTE_MAX_LCORE - 1) : i);
}
if (j == 1) {
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
"Non-blocking QSBR check");
/* Update the previously skipped thread */
- rte_rcu_qsbr_quiescent(t[0], TEST_RCU_MAX_LCORE - 10);
+ rte_rcu_qsbr_quiescent(t[0], RTE_MAX_LCORE - 10);
}
/* Validate the updates */
for (i = 0; i < num_threads[j]; i++)
rte_rcu_qsbr_thread_unregister(t[0],
- (j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);
+ (j == 2) ? (RTE_MAX_LCORE - 1) : i);
/* Check with no thread registered */
ret = rte_rcu_qsbr_check(t[0], token, true);
printf("\nTest rte_rcu_qsbr_start()\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
for (i = 0; i < 3; i++)
rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
printf("\nTest rte_rcu_qsbr_check()\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
token = rte_rcu_qsbr_start(t[0]);
TEST_RCU_QSBR_RETURN_IF_ERROR(
ret = rte_rcu_qsbr_check(t[0], token, true);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Blocking QSBR check");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
for (i = 0; i < 4; i++)
rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
printf("\nTest rte_rcu_qsbr_synchronize()\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
/* Test if the API returns when there are no threads reporting
* QS on the variable.
/* Test if the API returns when there are threads registered
* but not online.
*/
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_register(t[0], i);
rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
rte_rcu_qsbr_thread_offline(t[0], 0);
/* Check the other boundary */
- rte_rcu_qsbr_thread_online(t[0], TEST_RCU_MAX_LCORE - 1);
- rte_rcu_qsbr_synchronize(t[0], TEST_RCU_MAX_LCORE - 1);
- rte_rcu_qsbr_thread_offline(t[0], TEST_RCU_MAX_LCORE - 1);
+ rte_rcu_qsbr_thread_online(t[0], RTE_MAX_LCORE - 1);
+ rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
+ rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
/* Test if the API returns after unregisterng all the threads */
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_unregister(t[0], i);
rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
printf("Test rte_rcu_qsbr_thread_online()\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
/* Register 2 threads to validate that only the
* online thread is waited upon.
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread update");
/* Make all the threads online */
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
token = rte_rcu_qsbr_start(t[0]);
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++) {
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
rte_rcu_qsbr_thread_register(t[0], i);
rte_rcu_qsbr_thread_online(t[0], i);
}
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread online");
/* Check if all the online threads can report QS */
token = rte_rcu_qsbr_start(t[0]);
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_quiescent(t[0], i);
ret = rte_rcu_qsbr_check(t[0], token, true);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread update");
printf("\nTest rte_rcu_qsbr_thread_offline()\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
/*
* Check a sequence of online/status/offline/status/online/status
*/
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
token = rte_rcu_qsbr_start(t[0]);
/* Make the threads online */
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++) {
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
rte_rcu_qsbr_thread_register(t[0], i);
rte_rcu_qsbr_thread_online(t[0], i);
}
/* Check if all the online threads can report QS */
token = rte_rcu_qsbr_start(t[0]);
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_quiescent(t[0], i);
ret = rte_rcu_qsbr_check(t[0], token, true);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "report QS");
/* Make all the threads offline */
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_offline(t[0], i);
/* Make sure these threads are not being waited on */
token = rte_rcu_qsbr_start(t[0]);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "offline QS");
/* Make the threads online */
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_online(t[0], i);
/* Check if all the online threads can report QS */
token = rte_rcu_qsbr_start(t[0]);
- for (i = 0; i < TEST_RCU_MAX_LCORE; i++)
+ for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_quiescent(t[0], i);
ret = rte_rcu_qsbr_check(t[0], token, true);
TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "online again");
rte_rcu_qsbr_dump(stdout, NULL);
rte_rcu_qsbr_dump(NULL, NULL);
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
- rte_rcu_qsbr_init(t[1], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
+ rte_rcu_qsbr_init(t[1], RTE_MAX_LCORE);
/* QS variable with 0 core mask */
rte_rcu_qsbr_dump(stdout, t[0]);
for (i = 0; i < TOTAL_ENTRY; i++) {
hash_data[hash_id][i] =
- rte_zmalloc(NULL,
- sizeof(uint32_t) * TEST_RCU_MAX_LCORE, 0);
+ rte_zmalloc(NULL, sizeof(uint32_t) * RTE_MAX_LCORE, 0);
if (hash_data[hash_id][i] == NULL) {
printf("No memory\n");
return NULL;
printf("Test: 1 writer, 1 QSBR variable, simultaneous QSBR queries\n");
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
/* Shared data structure created */
h[0] = init_hash(0);
for (i = 0; i < test_cores / 4; i++) {
j = i * 4;
- rte_rcu_qsbr_init(t[i], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[i], RTE_MAX_LCORE);
h[i] = init_hash(i);
if (h[i] == NULL) {
printf("Hash init failed\n");
static int
test_rcu_qsbr_main(void)
{
- if (get_enabled_cores_mask() != 0)
- return -1;
+ uint16_t core_id;
+
+ num_cores = 0;
+ RTE_LCORE_FOREACH_SLAVE(core_id) {
+ enabled_core_ids[num_cores] = core_id;
+ num_cores++;
+ }
if (num_cores < 4) {
printf("Test failed! Need 4 or more cores\n");
#include "test.h"
/* Check condition and return an error if true. */
-#define TEST_RCU_MAX_LCORE 128
-static uint16_t enabled_core_ids[TEST_RCU_MAX_LCORE];
+static uint16_t enabled_core_ids[RTE_MAX_LCORE];
static uint8_t num_cores;
static uint32_t *keys;
static volatile uint8_t all_registered;
static volatile uint32_t thr_id;
-static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];
+static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
static rte_atomic64_t updates, checks;
*/
#define RCU_SCALE_DOWN 1000
-/* Simple way to allocate thread ids in 0 to TEST_RCU_MAX_LCORE space */
+/* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */
static inline uint32_t
alloc_thread_id(void)
{
uint32_t tmp_thr_id;
tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
- if (tmp_thr_id >= TEST_RCU_MAX_LCORE)
+ if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
return tmp_thr_id;
}
-static inline int
-get_enabled_cores_mask(void)
-{
- uint16_t core_id;
- uint32_t max_cores = rte_lcore_count();
-
- if (max_cores > TEST_RCU_MAX_LCORE) {
- printf("Number of cores exceed %d\n", TEST_RCU_MAX_LCORE);
- return -1;
- }
-
- core_id = 0;
- num_cores = 0;
- RTE_LCORE_FOREACH_SLAVE(core_id) {
- enabled_core_ids[num_cores] = core_id;
- num_cores++;
- }
-
- return 0;
-}
-
static int
test_rcu_qsbr_reader_perf(void *arg)
{
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
else
- tmp_num_cores = TEST_RCU_MAX_LCORE;
+ tmp_num_cores = RTE_MAX_LCORE;
sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
if (all_registered == 1)
tmp_num_cores = num_cores;
else
- tmp_num_cores = TEST_RCU_MAX_LCORE;
+ tmp_num_cores = RTE_MAX_LCORE;
sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
/* Number of readers does not matter for QS variable in this test
* case as no reader will be registered.
*/
- sz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
RTE_CACHE_LINE_SIZE);
/* QS variable is initialized */
- rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
+ rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
/* Writer threads are launched */
for (i = 0; i < num_cores; i++)
for (i = 0; i < TOTAL_ENTRY; i++) {
hash_data[i] = rte_zmalloc(NULL,
- sizeof(uint32_t) * TEST_RCU_MAX_LCORE, 0);
+ sizeof(uint32_t) * RTE_MAX_LCORE, 0);
if (hash_data[i] == NULL) {
printf("No memory\n");
return NULL;
if (all_registered == 1)
tmp_num_cores = num_cores;
else
- tmp_num_cores = TEST_RCU_MAX_LCORE;
+ tmp_num_cores = RTE_MAX_LCORE;
sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
if (all_registered == 1)
tmp_num_cores = num_cores;
else
- tmp_num_cores = TEST_RCU_MAX_LCORE;
+ tmp_num_cores = RTE_MAX_LCORE;
sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
static int
test_rcu_qsbr_main(void)
{
+ uint16_t core_id;
+
rte_atomic64_init(&updates);
rte_atomic64_init(&update_cycles);
rte_atomic64_init(&checks);
rte_atomic64_init(&check_cycles);
- if (get_enabled_cores_mask() != 0)
- return -1;
+ num_cores = 0;
+ RTE_LCORE_FOREACH_SLAVE(core_id) {
+ enabled_core_ids[num_cores] = core_id;
+ num_cores++;
+ }
printf("Number of cores provided = %d\n", num_cores);
if (num_cores < 2) {
printf("Test failed! Need 2 or more cores\n");
goto test_fail;
}
- if (num_cores > TEST_RCU_MAX_LCORE) {
- printf("Test failed! %d cores supported\n", TEST_RCU_MAX_LCORE);
- goto test_fail;
- }
printf("Perf test with all reader threads registered\n");
printf("--------------------------------------------\n");
goto test_fail;
/* Make sure the actual number of cores provided is less than
- * TEST_RCU_MAX_LCORE. This will allow for some threads not
+ * RTE_MAX_LCORE. This will allow for some threads not
* to be registered on the QS variable.
*/
- if (num_cores >= TEST_RCU_MAX_LCORE) {
+ if (num_cores >= RTE_MAX_LCORE) {
printf("Test failed! number of cores provided should be less than %d\n",
- TEST_RCU_MAX_LCORE);
+ RTE_MAX_LCORE);
goto test_fail;
}