4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
51 #include <rte_per_lcore.h>
52 #include <rte_lcore.h>
53 #include <rte_atomic.h>
54 #include <rte_branch_prediction.h>
56 #include <rte_mempool.h>
57 #include <rte_spinlock.h>
58 #include <rte_malloc.h>
66 * Basic tests: done on one core with and without cache:
68 * - Get one object, put one object
69 * - Get two objects, put two objects
70 * - Get all objects, test that their content is not modified and
71 * put them back in the pool.
74 #define MEMPOOL_ELT_SIZE 2048
76 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
78 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
79 #define RET_ERR() do { \
83 #define GOTO_ERR(var, label) do { \
89 static rte_atomic32_t synchro;
92 * Simple example of custom mempool structure. Holds pointers to all the
93 * elements which are simply malloc'd in this example.
95 struct custom_mempool {
103 * Loop through all the element pointers and allocate a chunk of memory, then
104 * insert that memory into the ring.
107 custom_mempool_alloc(struct rte_mempool *mp)
109 struct custom_mempool *cm;
111 cm = rte_zmalloc("custom_mempool",
112 sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0);
116 rte_spinlock_init(&cm->lock);
124 custom_mempool_free(struct rte_mempool *mp)
126 rte_free((void *)(mp->pool_data));
130 custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table,
133 struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
136 rte_spinlock_lock(&cm->lock);
137 if (cm->count + n > cm->size) {
140 memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n);
143 rte_spinlock_unlock(&cm->lock);
149 custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
151 struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
154 rte_spinlock_lock(&cm->lock);
159 memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n);
161 rte_spinlock_unlock(&cm->lock);
166 custom_mempool_get_count(const struct rte_mempool *mp)
168 struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
173 static struct rte_mempool_ops mempool_ops_custom = {
174 .name = "custom_handler",
175 .alloc = custom_mempool_alloc,
176 .free = custom_mempool_free,
177 .enqueue = custom_mempool_enqueue,
178 .dequeue = custom_mempool_dequeue,
179 .get_count = custom_mempool_get_count,
182 MEMPOOL_REGISTER_OPS(mempool_ops_custom);
185 * save the object number in the first 4 bytes of object data. All
186 * other bytes are set to 0.
189 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
190 void *obj, unsigned i)
192 uint32_t *objnum = obj;
194 memset(obj, 0, mp->elt_size);
198 /* basic tests (done on one core) */
200 test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
209 struct rte_mempool_cache *cache;
211 if (use_external_cache) {
212 /* Create a user-owned mempool cache. */
213 cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
218 /* May be NULL if cache is disabled. */
219 cache = rte_mempool_default_cache(mp, rte_lcore_id());
222 /* dump the mempool status */
223 rte_mempool_dump(stdout, mp);
225 printf("get an object\n");
226 if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
228 rte_mempool_dump(stdout, mp);
230 /* tests that improve coverage */
231 printf("get object count\n");
232 /* We have to count the extra caches, one in this case. */
233 offset = use_external_cache ? 1 * cache->len : 0;
234 if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
237 printf("get private data\n");
238 if (rte_mempool_get_priv(mp) != (char *)mp +
239 MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
242 #ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
243 printf("get physical address of an object\n");
244 if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
248 printf("put the object back\n");
249 rte_mempool_generic_put(mp, &obj, 1, cache, 0);
250 rte_mempool_dump(stdout, mp);
252 printf("get 2 objects\n");
253 if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
255 if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) {
256 rte_mempool_generic_put(mp, &obj, 1, cache, 0);
259 rte_mempool_dump(stdout, mp);
261 printf("put the objects back\n");
262 rte_mempool_generic_put(mp, &obj, 1, cache, 0);
263 rte_mempool_generic_put(mp, &obj2, 1, cache, 0);
264 rte_mempool_dump(stdout, mp);
267 * get many objects: we cannot get them all because the cache
268 * on other cores may not be empty.
270 objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
271 if (objtable == NULL)
274 for (i = 0; i < MEMPOOL_SIZE; i++) {
275 if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0)
280 * for each object, check that its content was not modified,
281 * and put objects back in pool
287 if (*objnum > MEMPOOL_SIZE) {
288 printf("bad object number(%d)\n", *objnum);
292 for (j = sizeof(*objnum); j < mp->elt_size; j++) {
293 if (obj_data[j] != 0)
297 rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0);
302 printf("objects were modified!\n");
305 if (use_external_cache) {
306 rte_mempool_cache_flush(cache, mp);
307 rte_mempool_cache_free(cache);
313 static int test_mempool_creation_with_exceeded_cache_size(void)
315 struct rte_mempool *mp_cov;
317 mp_cov = rte_mempool_create("test_mempool_cache_too_big",
320 RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
325 if (mp_cov != NULL) {
326 rte_mempool_free(mp_cov);
333 static struct rte_mempool *mp_spsc;
334 static rte_spinlock_t scsp_spinlock;
335 static void *scsp_obj_table[MAX_KEEP];
338 * single producer function
340 static int test_mempool_single_producer(void)
344 uint64_t start_cycles, end_cycles;
345 uint64_t duration = rte_get_timer_hz() / 4;
347 start_cycles = rte_get_timer_cycles();
349 end_cycles = rte_get_timer_cycles();
350 /* duration uses up, stop producing */
351 if (start_cycles + duration < end_cycles)
353 rte_spinlock_lock(&scsp_spinlock);
354 for (i = 0; i < MAX_KEEP; i ++) {
355 if (NULL != scsp_obj_table[i]) {
356 obj = scsp_obj_table[i];
360 rte_spinlock_unlock(&scsp_spinlock);
364 if (rte_mempool_from_obj(obj) != mp_spsc) {
365 printf("obj not owned by this mempool\n");
368 rte_mempool_put(mp_spsc, obj);
369 rte_spinlock_lock(&scsp_spinlock);
370 scsp_obj_table[i] = NULL;
371 rte_spinlock_unlock(&scsp_spinlock);
378 * single consumer function
380 static int test_mempool_single_consumer(void)
384 uint64_t start_cycles, end_cycles;
385 uint64_t duration = rte_get_timer_hz() / 8;
387 start_cycles = rte_get_timer_cycles();
389 end_cycles = rte_get_timer_cycles();
390 /* duration uses up, stop consuming */
391 if (start_cycles + duration < end_cycles)
393 rte_spinlock_lock(&scsp_spinlock);
394 for (i = 0; i < MAX_KEEP; i ++) {
395 if (NULL == scsp_obj_table[i])
398 rte_spinlock_unlock(&scsp_spinlock);
401 if (rte_mempool_get(mp_spsc, &obj) < 0)
403 rte_spinlock_lock(&scsp_spinlock);
404 scsp_obj_table[i] = obj;
405 rte_spinlock_unlock(&scsp_spinlock);
412 * test function for mempool test based on singple consumer and single producer,
413 * can run on one lcore only
416 test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
418 return test_mempool_single_consumer();
422 my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
424 printf("mempool name is %s\n", mp->name);
425 /* nothing to be implemented here*/
430 * it tests the mempool operations based on singple producer and single consumer
433 test_mempool_sp_sc(void)
436 unsigned lcore_id = rte_lcore_id();
439 /* create a mempool with single producer/consumer ring */
440 if (mp_spsc == NULL) {
441 mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
442 MEMPOOL_ELT_SIZE, 0, 0,
446 MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
451 if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
452 printf("Cannot lookup mempool from its name\n");
453 rte_mempool_free(mp_spsc);
456 lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
457 if (lcore_next >= RTE_MAX_LCORE) {
458 rte_mempool_free(mp_spsc);
461 if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
462 rte_mempool_free(mp_spsc);
465 rte_spinlock_init(&scsp_spinlock);
466 memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
467 rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
469 if (test_mempool_single_producer() < 0)
472 if (rte_eal_wait_lcore(lcore_next) < 0)
474 rte_mempool_free(mp_spsc);
480 * it tests some more basic of mempool
483 test_mempool_basic_ex(struct rte_mempool *mp)
493 obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
496 printf("test_mempool_basic_ex fail to rte_malloc\n");
499 printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
500 mp->name, rte_mempool_in_use_count(mp));
501 if (rte_mempool_full(mp) != 1) {
502 printf("test_mempool_basic_ex the mempool should be full\n");
503 goto fail_mp_basic_ex;
506 for (i = 0; i < MEMPOOL_SIZE; i ++) {
507 if (rte_mempool_get(mp, &obj[i]) < 0) {
508 printf("test_mp_basic_ex fail to get object for [%u]\n",
510 goto fail_mp_basic_ex;
513 if (rte_mempool_get(mp, &err_obj) == 0) {
514 printf("test_mempool_basic_ex get an impossible obj\n");
515 goto fail_mp_basic_ex;
517 printf("number: %u\n", i);
518 if (rte_mempool_empty(mp) != 1) {
519 printf("test_mempool_basic_ex the mempool should be empty\n");
520 goto fail_mp_basic_ex;
523 for (i = 0; i < MEMPOOL_SIZE; i++)
524 rte_mempool_put(mp, obj[i]);
526 if (rte_mempool_full(mp) != 1) {
527 printf("test_mempool_basic_ex the mempool should be full\n");
528 goto fail_mp_basic_ex;
535 rte_free((void *)obj);
541 test_mempool_same_name_twice_creation(void)
543 struct rte_mempool *mp_tc, *mp_tc2;
545 mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
546 MEMPOOL_ELT_SIZE, 0, 0,
554 mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
555 MEMPOOL_ELT_SIZE, 0, 0,
560 if (mp_tc2 != NULL) {
561 rte_mempool_free(mp_tc);
562 rte_mempool_free(mp_tc2);
566 rte_mempool_free(mp_tc);
571 * BAsic test for mempool_xmem functions.
574 test_mempool_xmem_misc(void)
576 uint32_t elt_num, total_size;
581 total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL);
582 sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX);
584 usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
585 MEMPOOL_PG_SHIFT_MAX);
587 if (sz != (size_t)usz) {
588 printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
589 "returns: %#zx, while expected: %#zx;\n",
590 __func__, elt_num, total_size, sz, (size_t)usz);
600 struct rte_mempool *mp_cache = NULL;
601 struct rte_mempool *mp_nocache = NULL;
602 struct rte_mempool *mp_ext = NULL;
604 rte_atomic32_init(&synchro);
606 /* create a mempool (without cache) */
607 mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
608 MEMPOOL_ELT_SIZE, 0, 0,
613 if (mp_nocache == NULL) {
614 printf("cannot allocate mp_nocache mempool\n");
618 /* create a mempool (with cache) */
619 mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
621 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
626 if (mp_cache == NULL) {
627 printf("cannot allocate mp_cache mempool\n");
631 /* create a mempool with an external handler */
632 mp_ext = rte_mempool_create_empty("test_ext",
635 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
638 if (mp_ext == NULL) {
639 printf("cannot allocate mp_ext mempool\n");
642 if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) {
643 printf("cannot set custom handler\n");
646 if (rte_mempool_populate_default(mp_ext) < 0) {
647 printf("cannot populate mp_ext mempool\n");
650 rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
652 /* retrieve the mempool from its name */
653 if (rte_mempool_lookup("test_nocache") != mp_nocache) {
654 printf("Cannot lookup mempool from its name\n");
658 rte_mempool_list_dump(stdout);
660 /* basic tests without cache */
661 if (test_mempool_basic(mp_nocache, 0) < 0)
664 /* basic tests with cache */
665 if (test_mempool_basic(mp_cache, 0) < 0)
668 /* basic tests with user-owned cache */
669 if (test_mempool_basic(mp_nocache, 1) < 0)
672 /* more basic tests without cache */
673 if (test_mempool_basic_ex(mp_nocache) < 0)
676 /* mempool operation test based on single producer and single comsumer */
677 if (test_mempool_sp_sc() < 0)
680 if (test_mempool_creation_with_exceeded_cache_size() < 0)
683 if (test_mempool_same_name_twice_creation() < 0)
686 if (test_mempool_xmem_misc() < 0)
689 rte_mempool_list_dump(stdout);
694 rte_mempool_free(mp_nocache);
695 rte_mempool_free(mp_cache);
696 rte_mempool_free(mp_ext);
700 static struct test_command mempool_cmd = {
701 .command = "mempool_autotest",
702 .callback = test_mempool,
704 REGISTER_TEST_COMMAND(mempool_cmd);