-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdio.h>
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
-#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_spinlock.h>
-#include <rte_atomic.h>
#include "test.h"
* - There is a global spinlock and a table of spinlocks (one per lcore).
*
* - The test function takes all of these locks and launches the
- * ``test_spinlock_per_core()`` function on each core (except the master).
+ * ``test_spinlock_per_core()`` function on each core (except the main).
*
* - The function takes the global lock, display something, then releases
* the global lock.
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static rte_atomic32_t synchro;
+static uint32_t synchro;
static int
-test_spinlock_per_core(__attribute__((unused)) void *arg)
+test_spinlock_per_core(__rte_unused void *arg)
{
rte_spinlock_lock(&sl);
printf("Global lock taken on core %u\n", rte_lcore_id());
}
static int
-test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
+test_spinlock_recursive_per_core(__rte_unused void *arg)
{
unsigned id = rte_lcore_id();
}
static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
-#define TIME_S 5
+#define MAX_LOOP 10000
static int
load_loop_fn(void *func_param)
{
uint64_t time_diff = 0, begin;
uint64_t hz = rte_get_timer_hz();
- uint64_t lcount = 0;
+ volatile uint64_t lcount = 0;
const int use_lock = *(int*)func_param;
const unsigned lcore = rte_lcore_id();
- /* wait synchro for slaves */
- if (lcore != rte_get_master_lcore())
- while (rte_atomic32_read(&synchro) == 0);
+ /* wait synchro for workers */
+ if (lcore != rte_get_main_lcore())
+ rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
begin = rte_get_timer_cycles();
- while (time_diff / hz < TIME_S) {
+ while (lcount < MAX_LOOP) {
if (use_lock)
rte_spinlock_lock(&lk);
lcount++;
if (use_lock)
rte_spinlock_unlock(&lk);
- /* delay to make lock duty cycle slighlty realistic */
- rte_delay_us(1);
- time_diff = rte_get_timer_cycles() - begin;
}
- lock_count[lcore] = lcount;
+ time_diff = rte_get_timer_cycles() - begin;
+ time_count[lcore] = time_diff * 1000000 / hz;
return 0;
}
printf("\nTest with no lock on single core...\n");
load_loop_fn(&lock);
- printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
- memset(lock_count, 0, sizeof(lock_count));
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
+ time_count[lcore]);
+ memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
lock = 1;
load_loop_fn(&lock);
- printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
- memset(lock_count, 0, sizeof(lock_count));
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
+ time_count[lcore]);
+ memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
- /* Clear synchro and start slaves */
- rte_atomic32_set(&synchro, 0);
- rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+ /* Clear synchro and start workers */
+ __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
- /* start synchro and launch test on master */
- rte_atomic32_set(&synchro, 1);
+ /* start synchro and launch test on main */
+ __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
RTE_LCORE_FOREACH(i) {
- printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
- total += lock_count[i];
+ printf("Core [%u] Cost Time = %"PRIu64" us\n", i,
+ time_count[i]);
+ total += time_count[i];
}
- printf("Total count = %"PRIu64"\n", total);
+ printf("Total Cost Time = %"PRIu64" us\n", total);
return 0;
}
/*
* Use rte_spinlock_trylock() to trylock a spinlock object,
- * If it could not lock the object sucessfully, it would
+ * If it could not lock the object successfully, it would
* return immediately and the variable of "count" would be
* increased by one per times. the value of "count" could be
* checked as the result later.
*/
static int
-test_spinlock_try(__attribute__((unused)) void *arg)
+test_spinlock_try(__rte_unused void *arg)
{
if (rte_spinlock_trylock(&sl_try) == 0) {
rte_spinlock_lock(&sl);
* Test rte_eal_get_lcore_state() in addition to spinlocks
* as we have "waiting" then "running" lcores.
*/
-int
+static int
test_spinlock(void)
{
int ret = 0;
int i;
- /* slave cores should be waiting: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be waiting: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_spinlock_lock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_spinlock_lock(&sl_tab[i]);
rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
}
- /* slave cores should be busy: print it */
- RTE_LCORE_FOREACH_SLAVE(i) {
+ /* worker cores should be busy: print it */
+ RTE_LCORE_FOREACH_WORKER(i) {
printf("lcore %d state: %d\n", i,
(int) rte_eal_get_lcore_state(i));
}
rte_spinlock_unlock(&sl);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_spinlock_unlock(&sl_tab[i]);
- rte_delay_ms(100);
+ rte_delay_ms(10);
}
rte_eal_mp_wait_lcore();
} else
rte_spinlock_recursive_unlock(&slr);
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
}
rte_spinlock_recursive_unlock(&slr);
/*
* Test if it could return immediately from try-locking a locked object.
- * Here it will lock the spinlock object first, then launch all the slave
+ * Here it will lock the spinlock object first, then launch all the worker
* lcores to trylock the same spinlock object.
- * All the slave lcores should give up try-locking a locked object and
+ * All the worker lcores should give up try-locking a locked object and
* return immediately, and then increase the "count" initialized with zero
* by one per times.
- * We can check if the "count" is finally equal to the number of all slave
+ * We can check if the "count" is finally equal to the number of all worker
* lcores to see if the behavior of try-locking a locked spinlock object
* is correct.
*/
return -1;
}
count = 0;
- RTE_LCORE_FOREACH_SLAVE(i) {
+ RTE_LCORE_FOREACH_WORKER(i) {
rte_eal_remote_launch(test_spinlock_try, NULL, i);
}
rte_eal_mp_wait_lcore();
return ret;
}
+
+REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);