-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <string.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_eal.h>
typedef void (*case_clean_t)(unsigned lcore_id);
#define MAX_STRING_SIZE (256)
-#define MAX_ITER_TIMES (16)
-#define MAX_LPM_ITER_TIMES (8)
+#define MAX_ITER_MULTI (16)
+#define MAX_ITER_ONCE (4)
+#define MAX_LPM_ITER_TIMES (6)
-#define MEMPOOL_ELT_SIZE (0)
+#define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
#define MEMPOOL_SIZE (4)
-#define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
+#define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
+static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
#define WAIT_SYNCHRO_FOR_SLAVES() do{ \
* rte_eal_init only init once
*/
static int
-test_eal_init_once(__attribute__((unused)) void *arg)
+test_eal_init_once(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
WAIT_SYNCHRO_FOR_SLAVES();
+ rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
if (rte_eal_init(0, NULL) != -1)
return -1;
/*
* ring create/lookup reentrancy test
*/
+static void
+ring_clean(unsigned int lcore_id)
+{
+ struct rte_ring *rp;
+ char ring_name[MAX_STRING_SIZE];
+ int i;
+
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
+ snprintf(ring_name, sizeof(ring_name),
+ "fr_test_%d_%d", lcore_id, i);
+ rp = rte_ring_lookup(ring_name);
+ if (rp != NULL)
+ rte_ring_free(rp);
+ }
+}
+
static int
-ring_create_lookup(__attribute__((unused)) void *arg)
+ring_create_lookup(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_ring * rp;
WAIT_SYNCHRO_FOR_SLAVES();
/* create the same ring simultaneously on all threads */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
- if ((NULL == rp) && (rte_ring_lookup("fr_test_once") == NULL))
- return -1;
+ if (rp != NULL)
+ rte_atomic32_inc(&obj_count);
}
/* create/lookup new ring several times */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
if (NULL == rp)
return -1;
if (rte_ring_lookup(ring_name) != rp)
return -1;
- }
- /* verify all ring created sucessful */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
- snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
+ /* verify all ring created successful */
if (rte_ring_lookup(ring_name) == NULL)
return -1;
}
}
static void
-my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
+my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
void *obj, unsigned i)
{
uint32_t *objnum = obj;
*objnum = i;
}
+static void
+mempool_clean(unsigned int lcore_id)
+{
+ struct rte_mempool *mp;
+ char mempool_name[MAX_STRING_SIZE];
+ int i;
+
+ /* verify all ring created successful */
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
+ snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
+ lcore_id, i);
+ mp = rte_mempool_lookup(mempool_name);
+ if (mp != NULL)
+ rte_mempool_free(mp);
+ }
+}
+
static int
-mempool_create_lookup(__attribute__((unused)) void *arg)
+mempool_create_lookup(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_mempool * mp;
WAIT_SYNCHRO_FOR_SLAVES();
/* create the same mempool simultaneously on all threads */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_ONCE; i++) {
mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,
NULL, NULL,
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
- if ((NULL == mp) && (rte_mempool_lookup("fr_test_once") == NULL))
- return -1;
+ if (mp != NULL)
+ rte_atomic32_inc(&obj_count);
}
/* create/lookup new ring several times */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,
return -1;
if (rte_mempool_lookup(mempool_name) != mp)
return -1;
- }
- /* verify all ring created sucessful */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
- snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
+ /* verify all ring created successful */
if (rte_mempool_lookup(mempool_name) == NULL)
return -1;
}
struct rte_hash *handle;
int i;
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
if ((handle = rte_hash_find_existing(hash_name)) != NULL)
}
static int
-hash_create_free(__attribute__((unused)) void *arg)
+hash_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_hash *handle;
/* create the same hash simultaneously on all threads */
hash_params.name = "fr_test_once";
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
- if ((NULL == handle) && (rte_hash_find_existing("fr_test_once") == NULL))
- return -1;
+ if (handle != NULL)
+ rte_atomic32_inc(&obj_count);
}
/* create mutiple times simultaneously */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
hash_params.name = hash_name;
return -1;
rte_hash_free(handle);
- }
-
- /* verify free correct */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
- snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
+ /* verify free correct */
if (NULL != rte_hash_find_existing(hash_name))
return -1;
}
struct rte_fbk_hash_table *handle;
int i;
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
}
static int
-fbk_create_free(__attribute__((unused)) void *arg)
+fbk_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_fbk_hash_table *handle;
/* create the same fbk hash table simultaneously on all threads */
fbk_params.name = "fr_test_once";
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
- if ((NULL == handle) && (rte_fbk_hash_find_existing("fr_test_once") == NULL))
- return -1;
+ if (handle != NULL)
+ rte_atomic32_inc(&obj_count);
}
/* create mutiple fbk tables simultaneously */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
+ for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
fbk_params.name = fbk_name;
return -1;
rte_fbk_hash_free(handle);
- }
-
- /* verify free correct */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
- snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
+ /* verify free correct */
if (NULL != rte_fbk_hash_find_existing(fbk_name))
return -1;
}
#ifdef RTE_LIBRTE_LPM
static void
-lpm_clean(unsigned lcore_id)
+lpm_clean(unsigned int lcore_id)
{
char lpm_name[MAX_STRING_SIZE];
struct rte_lpm *lpm;
}
static int
-lpm_create_free(__attribute__((unused)) void *arg)
+lpm_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_lpm *lpm;
+ struct rte_lpm_config config;
+
+ config.max_rules = 4;
+ config.number_tbl8s = 256;
+ config.flags = 0;
char lpm_name[MAX_STRING_SIZE];
int i;
WAIT_SYNCHRO_FOR_SLAVES();
/* create the same lpm simultaneously on all threads */
- for (i = 0; i < MAX_ITER_TIMES; i++) {
- lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
- if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == NULL))
- return -1;
+ for (i = 0; i < MAX_ITER_ONCE; i++) {
+ lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
+ if (lpm != NULL)
+ rte_atomic32_inc(&obj_count);
}
/* create mutiple fbk tables simultaneously */
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
- lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
+ lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
if (NULL == lpm)
return -1;
return -1;
rte_lpm_free(lpm);
- }
- /* verify free correct */
- for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
- snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
+ /* verify free correct */
if (NULL != rte_lpm_find_existing(lpm_name))
return -1;
}
/* All test cases in the test suite */
struct test_case test_cases[] = {
{ test_eal_init_once, NULL, NULL, "eal init once" },
- { ring_create_lookup, NULL, NULL, "ring create/lookup" },
- { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
+ { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
+ { mempool_create_lookup, NULL, mempool_clean,
+ "mempool create/lookup" },
#ifdef RTE_LIBRTE_HASH
{ hash_create_free, NULL, hash_clean, "hash create/free" },
{ fbk_create_free, NULL, fbk_clean, "fbk create/free" },
unsigned lcore_id;
unsigned cores_save = rte_lcore_count();
unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
+ unsigned count;
if (pt_case->func == NULL)
return -1;
+ rte_atomic32_set(&obj_count, 0);
rte_atomic32_set(&synchro, 0);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
pt_case->clean(lcore_id);
}
+ count = rte_atomic32_read(&obj_count);
+ if (count != 1) {
+ printf("%s: common object allocated %d times (should be 1)\n",
+ pt_case->name, count);
+ ret = -1;
+ }
+
return ret;
}
uint32_t case_id;
struct test_case *pt_case = NULL;
- if (rte_lcore_count() <= 1) {
- printf("Not enough lcore for testing\n");
- return -1;
+ if (rte_lcore_count() < 2) {
+ printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
+ return TEST_SKIPPED;
}
else if (rte_lcore_count() > MAX_LCORES)
printf("Too many lcores, some cores will be disabled\n");
- for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
+ for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
pt_case = &test_cases[case_id];
if (pt_case->func == NULL)
continue;
return 0;
}
-static struct test_command func_reentrancy_cmd = {
- .command = "func_reentrancy_autotest",
- .callback = test_func_reentrancy,
-};
-REGISTER_TEST_COMMAND(func_reentrancy_cmd);
+REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);