/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2022 SmartShare Systems
*/
#include <string.h>
*
* - Bulk get from 1 to 32
* - Bulk put from 1 to 32
+ * - Bulk get and put from 1 to 32, compile time constant
*
* - Number of kept objects (*n_keep*)
*
* - 32
* - 128
+ * - 512
*/
#define N 65536
#define TIME_S 5
#define MEMPOOL_ELT_SIZE 2048
-#define MAX_KEEP 128
+#define MAX_KEEP 512
#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
+/* Number of pointers fitting into one cache line. */
+#define CACHE_LINE_BURST (RTE_CACHE_LINE_SIZE / sizeof(uintptr_t))
+
#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
#define RET_ERR() do { \
LOG_ERR(); \
/* number of objects retrieved from mempool before putting them back */
static unsigned n_keep;
+/* true if we want to test with constant n_get_bulk and n_put_bulk */
+static int use_constant_values;
+
/* number of enqueues / dequeues */
struct mempool_test_stats {
uint64_t enq_count;
*objnum = i;
}
+static __rte_always_inline int
+test_loop(struct rte_mempool *mp, struct rte_mempool_cache *cache,
+ unsigned int x_keep, unsigned int x_get_bulk, unsigned int x_put_bulk)
+{
+ void *obj_table[MAX_KEEP] __rte_cache_aligned;
+ unsigned int idx;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; likely(i < (N / x_keep)); i++) {
+ /* get x_keep objects by bulk of x_get_bulk */
+ for (idx = 0; idx < x_keep; idx += x_get_bulk) {
+ ret = rte_mempool_generic_get(mp,
+ &obj_table[idx],
+ x_get_bulk,
+ cache);
+ if (unlikely(ret < 0)) {
+ rte_mempool_dump(stdout, mp);
+ return ret;
+ }
+ }
+
+ /* put the objects back by bulk of x_put_bulk */
+ for (idx = 0; idx < x_keep; idx += x_put_bulk) {
+ rte_mempool_generic_put(mp,
+ &obj_table[idx],
+ x_put_bulk,
+ cache);
+ }
+ }
+
+ return 0;
+}
+
static int
per_lcore_mempool_test(void *arg)
{
- void *obj_table[MAX_KEEP];
- unsigned i, idx;
struct rte_mempool *mp = arg;
unsigned lcore_id = rte_lcore_id();
int ret = 0;
GOTO_ERR(ret, out);
if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
GOTO_ERR(ret, out);
+ /* for constant n, n_get_bulk and n_put_bulk must be the same */
+ if (use_constant_values && n_put_bulk != n_get_bulk)
+ GOTO_ERR(ret, out);
stats[lcore_id].enq_count = 0;
start_cycles = rte_get_timer_cycles();
while (time_diff/hz < TIME_S) {
- for (i = 0; likely(i < (N/n_keep)); i++) {
- /* get n_keep objects by bulk of n_bulk */
- idx = 0;
- while (idx < n_keep) {
- ret = rte_mempool_generic_get(mp,
- &obj_table[idx],
- n_get_bulk,
- cache);
- if (unlikely(ret < 0)) {
- rte_mempool_dump(stdout, mp);
- /* in this case, objects are lost... */
- GOTO_ERR(ret, out);
- }
- idx += n_get_bulk;
- }
+ if (!use_constant_values)
+ ret = test_loop(mp, cache, n_keep, n_get_bulk, n_put_bulk);
+ else if (n_get_bulk == 1)
+ ret = test_loop(mp, cache, n_keep, 1, 1);
+ else if (n_get_bulk == 4)
+ ret = test_loop(mp, cache, n_keep, 4, 4);
+ else if (n_get_bulk == CACHE_LINE_BURST)
+ ret = test_loop(mp, cache, n_keep,
+ CACHE_LINE_BURST, CACHE_LINE_BURST);
+ else if (n_get_bulk == 32)
+ ret = test_loop(mp, cache, n_keep, 32, 32);
+ else
+ ret = -1;
+
+ if (ret < 0)
+ GOTO_ERR(ret, out);
- /* put the objects back */
- idx = 0;
- while (idx < n_keep) {
- rte_mempool_generic_put(mp, &obj_table[idx],
- n_put_bulk,
- cache);
- idx += n_put_bulk;
- }
- }
end_cycles = rte_get_timer_cycles();
time_diff = end_cycles - start_cycles;
stats[lcore_id].enq_count += N;
memset(stats, 0, sizeof(stats));
printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
- "n_put_bulk=%u n_keep=%u ",
+ "n_put_bulk=%u n_keep=%u constant_n=%u ",
use_external_cache ?
external_cache_size : (unsigned) mp->cache_size,
- cores, n_get_bulk, n_put_bulk, n_keep);
+ cores, n_get_bulk, n_put_bulk, n_keep, use_constant_values);
if (rte_mempool_avail_count(mp) != MEMPOOL_SIZE) {
printf("mempool is not full\n");
static int
do_one_mempool_test(struct rte_mempool *mp, unsigned int cores)
{
- unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
- unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
- unsigned keep_tab[] = { 32, 128, 0 };
+ unsigned int bulk_tab_get[] = { 1, 4, CACHE_LINE_BURST, 32, 0 };
+ unsigned int bulk_tab_put[] = { 1, 4, CACHE_LINE_BURST, 32, 0 };
+ unsigned int keep_tab[] = { 32, 128, 512, 0 };
unsigned *get_bulk_ptr;
unsigned *put_bulk_ptr;
unsigned *keep_ptr;
for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) {
for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) {
+ use_constant_values = 0;
n_get_bulk = *get_bulk_ptr;
n_put_bulk = *put_bulk_ptr;
n_keep = *keep_ptr;
ret = launch_cores(mp, cores);
-
if (ret < 0)
return -1;
+
+ /* replay test with constant values */
+ if (n_get_bulk == n_put_bulk) {
+ use_constant_values = 1;
+ ret = launch_cores(mp, cores);
+ if (ret < 0)
+ return -1;
+ }
}
}
}