git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ethdev: add represented port action to flow API
[dpdk.git]
/
app
/
test
/
test_mempool_perf.c
diff --git
a/app/test/test_mempool_perf.c
b/app/test/test_mempool_perf.c
index
60bda8a
..
8f62973
100644
(file)
--- a/
app/test/test_mempool_perf.c
+++ b/
app/test/test_mempool_perf.c
@@
-20,7
+20,6
@@
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
@@
-83,7
+82,7
@@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static
rte_atomic
32_t synchro;
+static
uint
32_t synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@
-143,9
+142,9
@@
per_lcore_mempool_test(void *arg)
stats[lcore_id].enq_count = 0;
stats[lcore_id].enq_count = 0;
- /* wait synchro for
slave
s */
- if (lcore_id != rte_get_ma
ster
_lcore())
-
while (rte_atomic32_read(&synchro) == 0
);
+ /* wait synchro for
worker
s */
+ if (lcore_id != rte_get_ma
in
_lcore())
+
rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED
);
start_cycles = rte_get_timer_cycles();
start_cycles = rte_get_timer_cycles();
@@
-198,7
+197,7
@@
launch_cores(struct rte_mempool *mp, unsigned int cores)
int ret;
unsigned cores_save = cores;
int ret;
unsigned cores_save = cores;
-
rte_atomic32_set(&synchro, 0
);
+
__atomic_store_n(&synchro, 0, __ATOMIC_RELAXED
);
/* reset stats */
memset(stats, 0, sizeof(stats));
/* reset stats */
memset(stats, 0, sizeof(stats));
@@
-214,7
+213,7
@@
launch_cores(struct rte_mempool *mp, unsigned int cores)
return -1;
}
return -1;
}
- RTE_LCORE_FOREACH_
SLAVE
(lcore_id) {
+ RTE_LCORE_FOREACH_
WORKER
(lcore_id) {
if (cores == 1)
break;
cores--;
if (cores == 1)
break;
cores--;
@@
-222,13
+221,13
@@
launch_cores(struct rte_mempool *mp, unsigned int cores)
mp, lcore_id);
}
mp, lcore_id);
}
- /* start synchro and launch test on ma
ster
*/
-
rte_atomic32_set(&synchro, 1
);
+ /* start synchro and launch test on ma
in
*/
+
__atomic_store_n(&synchro, 1, __ATOMIC_RELAXED
);
ret = per_lcore_mempool_test(mp);
cores = cores_save;
ret = per_lcore_mempool_test(mp);
cores = cores_save;
- RTE_LCORE_FOREACH_
SLAVE
(lcore_id) {
+ RTE_LCORE_FOREACH_
WORKER
(lcore_id) {
if (cores == 1)
break;
cores--;
if (cores == 1)
break;
cores--;
@@
-288,8
+287,6
@@
test_mempool_perf(void)
const char *default_pool_ops;
int ret = -1;
const char *default_pool_ops;
int ret = -1;
- rte_atomic32_init(&synchro);
-
/* create a mempool (without cache) */
mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,
/* create a mempool (without cache) */
mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
MEMPOOL_ELT_SIZE, 0, 0,