1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf_pool_ops.h>
36 * Basic tests: done on one core with and without cache:
38 * - Get one object, put one object
39 * - Get two objects, put two objects
40 * - Get all objects, test that their content is not modified and
41 * put them back in the pool.
44 #define MEMPOOL_ELT_SIZE 2048
46 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
48 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
49 #define RET_ERR() do { \
53 #define GOTO_ERR(var, label) do { \
59 static rte_atomic32_t synchro;
62 * save the object number in the first 4 bytes of object data. All
63 * other bytes are set to 0.
66 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
67 void *obj, unsigned i)
69 uint32_t *objnum = obj;
71 memset(obj, 0, mp->elt_size);
75 /* basic tests (done on one core) */
77 test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
86 struct rte_mempool_cache *cache;
88 if (use_external_cache) {
89 /* Create a user-owned mempool cache. */
90 cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
95 /* May be NULL if cache is disabled. */
96 cache = rte_mempool_default_cache(mp, rte_lcore_id());
99 /* dump the mempool status */
100 rte_mempool_dump(stdout, mp);
102 printf("get an object\n");
103 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
105 rte_mempool_dump(stdout, mp);
107 /* tests that improve coverage */
108 printf("get object count\n");
109 /* We have to count the extra caches, one in this case. */
110 offset = use_external_cache ? 1 * cache->len : 0;
111 if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
114 printf("get private data\n");
115 if (rte_mempool_get_priv(mp) != (char *)mp +
116 MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
119 #ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
120 printf("get physical address of an object\n");
121 if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj))
125 printf("put the object back\n");
126 rte_mempool_generic_put(mp, &obj, 1, cache);
127 rte_mempool_dump(stdout, mp);
129 printf("get 2 objects\n");
130 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
132 if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) {
133 rte_mempool_generic_put(mp, &obj, 1, cache);
136 rte_mempool_dump(stdout, mp);
138 printf("put the objects back\n");
139 rte_mempool_generic_put(mp, &obj, 1, cache);
140 rte_mempool_generic_put(mp, &obj2, 1, cache);
141 rte_mempool_dump(stdout, mp);
144 * get many objects: we cannot get them all because the cache
145 * on other cores may not be empty.
147 objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
148 if (objtable == NULL)
151 for (i = 0; i < MEMPOOL_SIZE; i++) {
152 if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0)
157 * for each object, check that its content was not modified,
158 * and put objects back in pool
164 if (*objnum > MEMPOOL_SIZE) {
165 printf("bad object number(%d)\n", *objnum);
169 for (j = sizeof(*objnum); j < mp->elt_size; j++) {
170 if (obj_data[j] != 0)
174 rte_mempool_generic_put(mp, &objtable[i], 1, cache);
179 printf("objects were modified!\n");
182 if (use_external_cache) {
183 rte_mempool_cache_flush(cache, mp);
184 rte_mempool_cache_free(cache);
190 static int test_mempool_creation_with_exceeded_cache_size(void)
192 struct rte_mempool *mp_cov;
194 mp_cov = rte_mempool_create("test_mempool_cache_too_big",
197 RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
202 if (mp_cov != NULL) {
203 rte_mempool_free(mp_cov);
210 static struct rte_mempool *mp_spsc;
211 static rte_spinlock_t scsp_spinlock;
212 static void *scsp_obj_table[MAX_KEEP];
215 * single producer function
217 static int test_mempool_single_producer(void)
221 uint64_t start_cycles, end_cycles;
222 uint64_t duration = rte_get_timer_hz() / 4;
224 start_cycles = rte_get_timer_cycles();
226 end_cycles = rte_get_timer_cycles();
227 /* duration uses up, stop producing */
228 if (start_cycles + duration < end_cycles)
230 rte_spinlock_lock(&scsp_spinlock);
231 for (i = 0; i < MAX_KEEP; i ++) {
232 if (NULL != scsp_obj_table[i]) {
233 obj = scsp_obj_table[i];
237 rte_spinlock_unlock(&scsp_spinlock);
241 if (rte_mempool_from_obj(obj) != mp_spsc) {
242 printf("obj not owned by this mempool\n");
245 rte_mempool_put(mp_spsc, obj);
246 rte_spinlock_lock(&scsp_spinlock);
247 scsp_obj_table[i] = NULL;
248 rte_spinlock_unlock(&scsp_spinlock);
255 * single consumer function
257 static int test_mempool_single_consumer(void)
261 uint64_t start_cycles, end_cycles;
262 uint64_t duration = rte_get_timer_hz() / 8;
264 start_cycles = rte_get_timer_cycles();
266 end_cycles = rte_get_timer_cycles();
267 /* duration uses up, stop consuming */
268 if (start_cycles + duration < end_cycles)
270 rte_spinlock_lock(&scsp_spinlock);
271 for (i = 0; i < MAX_KEEP; i ++) {
272 if (NULL == scsp_obj_table[i])
275 rte_spinlock_unlock(&scsp_spinlock);
278 if (rte_mempool_get(mp_spsc, &obj) < 0)
280 rte_spinlock_lock(&scsp_spinlock);
281 scsp_obj_table[i] = obj;
282 rte_spinlock_unlock(&scsp_spinlock);
289 * test function for mempool test based on singple consumer and single producer,
290 * can run on one lcore only
293 test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
295 return test_mempool_single_consumer();
299 my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
301 printf("mempool name is %s\n", mp->name);
302 /* nothing to be implemented here*/
307 * it tests the mempool operations based on singple producer and single consumer
310 test_mempool_sp_sc(void)
313 unsigned lcore_id = rte_lcore_id();
316 /* create a mempool with single producer/consumer ring */
317 if (mp_spsc == NULL) {
318 mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
319 MEMPOOL_ELT_SIZE, 0, 0,
323 MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
328 if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
329 printf("Cannot lookup mempool from its name\n");
333 lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
334 if (lcore_next >= RTE_MAX_LCORE) {
338 if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
342 rte_spinlock_init(&scsp_spinlock);
343 memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
344 rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
346 if (test_mempool_single_producer() < 0)
349 if (rte_eal_wait_lcore(lcore_next) < 0)
353 rte_mempool_free(mp_spsc);
360 * it tests some more basic of mempool
363 test_mempool_basic_ex(struct rte_mempool *mp)
373 obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
376 printf("test_mempool_basic_ex fail to rte_malloc\n");
379 printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
380 mp->name, rte_mempool_in_use_count(mp));
381 if (rte_mempool_full(mp) != 1) {
382 printf("test_mempool_basic_ex the mempool should be full\n");
383 goto fail_mp_basic_ex;
386 for (i = 0; i < MEMPOOL_SIZE; i ++) {
387 if (rte_mempool_get(mp, &obj[i]) < 0) {
388 printf("test_mp_basic_ex fail to get object for [%u]\n",
390 goto fail_mp_basic_ex;
393 if (rte_mempool_get(mp, &err_obj) == 0) {
394 printf("test_mempool_basic_ex get an impossible obj\n");
395 goto fail_mp_basic_ex;
397 printf("number: %u\n", i);
398 if (rte_mempool_empty(mp) != 1) {
399 printf("test_mempool_basic_ex the mempool should be empty\n");
400 goto fail_mp_basic_ex;
403 for (i = 0; i < MEMPOOL_SIZE; i++)
404 rte_mempool_put(mp, obj[i]);
406 if (rte_mempool_full(mp) != 1) {
407 printf("test_mempool_basic_ex the mempool should be full\n");
408 goto fail_mp_basic_ex;
415 rte_free((void *)obj);
421 test_mempool_same_name_twice_creation(void)
423 struct rte_mempool *mp_tc, *mp_tc2;
425 mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
426 MEMPOOL_ELT_SIZE, 0, 0,
434 mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
435 MEMPOOL_ELT_SIZE, 0, 0,
440 if (mp_tc2 != NULL) {
441 rte_mempool_free(mp_tc);
442 rte_mempool_free(mp_tc2);
446 rte_mempool_free(mp_tc);
451 walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
453 printf("\t%s\n", mp->name);
460 struct rte_mempool *mp_cache = NULL;
461 struct rte_mempool *mp_nocache = NULL;
462 struct rte_mempool *mp_stack = NULL;
463 struct rte_mempool *default_pool = NULL;
464 const char *default_pool_ops = rte_mbuf_best_mempool_ops();
466 rte_atomic32_init(&synchro);
468 /* create a mempool (without cache) */
469 mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
470 MEMPOOL_ELT_SIZE, 0, 0,
475 if (mp_nocache == NULL) {
476 printf("cannot allocate mp_nocache mempool\n");
480 /* create a mempool (with cache) */
481 mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
483 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
488 if (mp_cache == NULL) {
489 printf("cannot allocate mp_cache mempool\n");
493 /* create a mempool with an external handler */
494 mp_stack = rte_mempool_create_empty("test_stack",
497 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
500 if (mp_stack == NULL) {
501 printf("cannot allocate mp_stack mempool\n");
504 if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
505 printf("cannot set stack handler\n");
508 if (rte_mempool_populate_default(mp_stack) < 0) {
509 printf("cannot populate mp_stack mempool\n");
512 rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
514 /* Create a mempool based on Default handler */
515 printf("Testing %s mempool handler\n", default_pool_ops);
516 default_pool = rte_mempool_create_empty("default_pool",
519 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
522 if (default_pool == NULL) {
523 printf("cannot allocate default mempool\n");
526 if (rte_mempool_set_ops_byname(default_pool,
527 default_pool_ops, NULL) < 0) {
528 printf("cannot set %s handler\n", default_pool_ops);
531 if (rte_mempool_populate_default(default_pool) < 0) {
532 printf("cannot populate %s mempool\n", default_pool_ops);
535 rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
537 /* retrieve the mempool from its name */
538 if (rte_mempool_lookup("test_nocache") != mp_nocache) {
539 printf("Cannot lookup mempool from its name\n");
543 printf("Walk into mempools:\n");
544 rte_mempool_walk(walk_cb, NULL);
546 rte_mempool_list_dump(stdout);
548 /* basic tests without cache */
549 if (test_mempool_basic(mp_nocache, 0) < 0)
552 /* basic tests with cache */
553 if (test_mempool_basic(mp_cache, 0) < 0)
556 /* basic tests with user-owned cache */
557 if (test_mempool_basic(mp_nocache, 1) < 0)
560 /* more basic tests without cache */
561 if (test_mempool_basic_ex(mp_nocache) < 0)
564 /* mempool operation test based on single producer and single comsumer */
565 if (test_mempool_sp_sc() < 0)
568 if (test_mempool_creation_with_exceeded_cache_size() < 0)
571 if (test_mempool_same_name_twice_creation() < 0)
574 /* test the stack handler */
575 if (test_mempool_basic(mp_stack, 1) < 0)
578 if (test_mempool_basic(default_pool, 1) < 0)
581 rte_mempool_list_dump(stdout);
586 rte_mempool_free(mp_nocache);
587 rte_mempool_free(mp_cache);
588 rte_mempool_free(mp_stack);
589 rte_mempool_free(default_pool);
594 REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);