X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_mempool.c;h=084842fdaa640942e716631ede38a0ecd71a6979;hb=25d5c40f252fb77ed78a7baa5d6e54912ad83941;hp=238ff52058a1687811f09b6fedd105c989990baf;hpb=a0fd91cefcc054b770dec6d8fb60db0d3145c45a;p=dpdk.git diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 238ff52058..084842fdaa 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -44,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -52,10 +22,11 @@ #include #include #include -#include #include #include #include +#include +#include #include "test.h" @@ -88,105 +59,12 @@ static rte_atomic32_t synchro; -/* - * Simple example of custom mempool structure. Holds pointers to all the - * elements which are simply malloc'd in this example. - */ -struct custom_mempool { - rte_spinlock_t lock; - unsigned count; - unsigned size; - void *elts[]; -}; - -/* - * Loop through all the element pointers and allocate a chunk of memory, then - * insert that memory into the ring. - */ -static int -custom_mempool_alloc(struct rte_mempool *mp) -{ - struct custom_mempool *cm; - - cm = rte_zmalloc("custom_mempool", - sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0); - if (cm == NULL) - return -ENOMEM; - - rte_spinlock_init(&cm->lock); - cm->count = 0; - cm->size = mp->size; - mp->pool_data = cm; - return 0; -} - -static void -custom_mempool_free(struct rte_mempool *mp) -{ - rte_free((void *)(mp->pool_data)); -} - -static int -custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table, - unsigned n) -{ - struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); - int ret = 0; - - rte_spinlock_lock(&cm->lock); - if (cm->count + n > cm->size) { - ret = -ENOBUFS; - } else { - memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n); - cm->count += n; - } - rte_spinlock_unlock(&cm->lock); - return ret; -} - - -static int -custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) -{ - struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); - int ret = 0; - - rte_spinlock_lock(&cm->lock); - if (n > cm->count) { - ret = -ENOENT; - } else { - cm->count -= n; - memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n); - } - rte_spinlock_unlock(&cm->lock); - return ret; -} - -static unsigned -custom_mempool_get_count(const struct rte_mempool *mp) -{ - struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); - - return cm->count; -} - -static struct rte_mempool_ops mempool_ops_custom = { - .name = "custom_handler", - .alloc = custom_mempool_alloc, - .free = custom_mempool_free, - .enqueue = custom_mempool_enqueue, - .dequeue = custom_mempool_dequeue, - .get_count = custom_mempool_get_count, -}; - -MEMPOOL_REGISTER_OPS(mempool_ops_custom); - /* * save the object number in the first 4 bytes of object data. All * other bytes are set to 0. */ static void -my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, +my_obj_init(struct rte_mempool *mp, __rte_unused void *arg, void *obj, unsigned i) { uint32_t *objnum = obj; @@ -223,7 +101,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache) rte_mempool_dump(stdout, mp); printf("get an object\n"); - if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0) + if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) GOTO_ERR(ret, out); rte_mempool_dump(stdout, mp); @@ -239,28 +117,28 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache) MEMPOOL_HEADER_SIZE(mp, mp->cache_size)) GOTO_ERR(ret, out); -#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */ +#ifndef RTE_EXEC_ENV_FREEBSD /* rte_mem_virt2iova() not supported on bsd */ printf("get physical address of an object\n"); - if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj)) + if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj)) GOTO_ERR(ret, out); #endif printf("put the object back\n"); - rte_mempool_generic_put(mp, &obj, 1, cache, 0); + rte_mempool_generic_put(mp, &obj, 1, cache); rte_mempool_dump(stdout, mp); printf("get 2 objects\n"); - if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0) + if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0) GOTO_ERR(ret, out); - if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) { - rte_mempool_generic_put(mp, &obj, 1, cache, 0); + if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) { + rte_mempool_generic_put(mp, &obj, 1, cache); GOTO_ERR(ret, out); } rte_mempool_dump(stdout, mp); printf("put the objects back\n"); - rte_mempool_generic_put(mp, &obj, 1, cache, 0); - rte_mempool_generic_put(mp, &obj2, 1, cache, 0); + rte_mempool_generic_put(mp, &obj, 1, cache); + rte_mempool_generic_put(mp, &obj2, 1, cache); rte_mempool_dump(stdout, mp); /* @@ -272,7 +150,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache) GOTO_ERR(ret, out); for (i = 0; i < MEMPOOL_SIZE; i++) { - if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0) + if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0) break; } @@ -294,7 +172,7 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache) ret = -1; } - rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0); + rte_mempool_generic_put(mp, &objtable[i], 1, cache); } free(objtable); @@ -413,13 +291,13 @@ static int test_mempool_single_consumer(void) * can run on one lcore only */ static int -test_mempool_launch_single_consumer(__attribute__((unused)) void *arg) +test_mempool_launch_single_consumer(__rte_unused void *arg) { return test_mempool_single_consumer(); } static void -my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg) +my_mp_init(struct rte_mempool *mp, __rte_unused void *arg) { printf("mempool name is %s\n", mp->name); /* nothing to be implemented here*/ @@ -450,17 +328,17 @@ test_mempool_sp_sc(void) } if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { printf("Cannot lookup mempool from its name\n"); - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } lcore_next = rte_get_next_lcore(lcore_id, 0, 1); if (lcore_next >= RTE_MAX_LCORE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } rte_spinlock_init(&scsp_spinlock); memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); @@ -471,7 +349,10 @@ test_mempool_sp_sc(void) if (rte_eal_wait_lcore(lcore_next) < 0) ret = -1; + +err: rte_mempool_free(mp_spsc); + mp_spsc = NULL; return ret; } @@ -567,39 +448,48 @@ test_mempool_same_name_twice_creation(void) return 0; } -/* - * BAsic test for mempool_xmem functions. - */ -static int -test_mempool_xmem_misc(void) +static void +walk_cb(struct rte_mempool *mp, void *userdata __rte_unused) { - uint32_t elt_num, total_size; - size_t sz; - ssize_t usz; - - elt_num = MAX_KEEP; - total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL); - sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX); - - usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1, - MEMPOOL_PG_SHIFT_MAX); - - if (sz != (size_t)usz) { - printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) " - "returns: %#zx, while expected: %#zx;\n", - __func__, elt_num, total_size, sz, (size_t)usz); - return -1; - } + printf("\t%s\n", mp->name); +} - return 0; +struct mp_data { + int16_t ret; +}; + +static void +test_mp_mem_init(struct rte_mempool *mp, + __rte_unused void *opaque, + __rte_unused struct rte_mempool_memhdr *memhdr, + __rte_unused unsigned int mem_idx) +{ + struct mp_data *data = opaque; + + if (mp == NULL) { + data->ret = -1; + return; + } + /* nothing to be implemented here*/ + data->ret = 0; } static int test_mempool(void) { + int ret = -1; + uint32_t nb_objs = 0; + uint32_t nb_mem_chunks = 0; struct rte_mempool *mp_cache = NULL; struct rte_mempool *mp_nocache = NULL; - struct rte_mempool *mp_ext = NULL; + struct rte_mempool *mp_stack_anon = NULL; + struct rte_mempool *mp_stack_mempool_iter = NULL; + struct rte_mempool *mp_stack = NULL; + struct rte_mempool *default_pool = NULL; + struct mp_data cb_arg = { + .ret = -1 + }; + const char *default_pool_ops = rte_mbuf_best_mempool_ops(); rte_atomic32_init(&synchro); @@ -612,7 +502,7 @@ test_mempool(void) if (mp_nocache == NULL) { printf("cannot allocate mp_nocache mempool\n"); - goto err; + GOTO_ERR(ret, err); } /* create a mempool (with cache) */ @@ -625,80 +515,154 @@ test_mempool(void) if (mp_cache == NULL) { printf("cannot allocate mp_cache mempool\n"); - goto err; + GOTO_ERR(ret, err); } + /* create an empty mempool */ + mp_stack_anon = rte_mempool_create_empty("test_stack_anon", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + SOCKET_ID_ANY, 0); + + if (mp_stack_anon == NULL) + GOTO_ERR(ret, err); + + /* populate an empty mempool */ + ret = rte_mempool_populate_anon(mp_stack_anon); + printf("%s ret = %d\n", __func__, ret); + if (ret < 0) + GOTO_ERR(ret, err); + + /* Try to populate when already populated */ + ret = rte_mempool_populate_anon(mp_stack_anon); + if (ret != 0) + GOTO_ERR(ret, err); + + /* create a mempool */ + mp_stack_mempool_iter = rte_mempool_create("test_iter_obj", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + + if (mp_stack_mempool_iter == NULL) + GOTO_ERR(ret, err); + + /* test to initialize mempool objects and memory */ + nb_objs = rte_mempool_obj_iter(mp_stack_mempool_iter, rte_pktmbuf_init, + NULL); + if (nb_objs == 0) + GOTO_ERR(ret, err); + + nb_mem_chunks = rte_mempool_mem_iter(mp_stack_mempool_iter, + test_mp_mem_init, &cb_arg); + if (nb_mem_chunks == 0 || cb_arg.ret < 0) + GOTO_ERR(ret, err); + /* create a mempool with an external handler */ - mp_ext = rte_mempool_create_empty("test_ext", + mp_stack = rte_mempool_create_empty("test_stack", MEMPOOL_SIZE, MEMPOOL_ELT_SIZE, RTE_MEMPOOL_CACHE_MAX_SIZE, 0, SOCKET_ID_ANY, 0); - if (mp_ext == NULL) { - printf("cannot allocate mp_ext mempool\n"); - goto err; + if (mp_stack == NULL) { + printf("cannot allocate mp_stack mempool\n"); + GOTO_ERR(ret, err); } - if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) { - printf("cannot set custom handler\n"); - goto err; + if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) { + printf("cannot set stack handler\n"); + GOTO_ERR(ret, err); } - if (rte_mempool_populate_default(mp_ext) < 0) { - printf("cannot populate mp_ext mempool\n"); - goto err; + if (rte_mempool_populate_default(mp_stack) < 0) { + printf("cannot populate mp_stack mempool\n"); + GOTO_ERR(ret, err); + } + rte_mempool_obj_iter(mp_stack, my_obj_init, NULL); + + /* Create a mempool based on Default handler */ + printf("Testing %s mempool handler\n", default_pool_ops); + default_pool = rte_mempool_create_empty("default_pool", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + SOCKET_ID_ANY, 0); + + if (default_pool == NULL) { + printf("cannot allocate default mempool\n"); + GOTO_ERR(ret, err); + } + if (rte_mempool_set_ops_byname(default_pool, + default_pool_ops, NULL) < 0) { + printf("cannot set %s handler\n", default_pool_ops); + GOTO_ERR(ret, err); } - rte_mempool_obj_iter(mp_ext, my_obj_init, NULL); + if (rte_mempool_populate_default(default_pool) < 0) { + printf("cannot populate %s mempool\n", default_pool_ops); + GOTO_ERR(ret, err); + } + rte_mempool_obj_iter(default_pool, my_obj_init, NULL); /* retrieve the mempool from its name */ if (rte_mempool_lookup("test_nocache") != mp_nocache) { printf("Cannot lookup mempool from its name\n"); - goto err; + GOTO_ERR(ret, err); } + printf("Walk into mempools:\n"); + rte_mempool_walk(walk_cb, NULL); + rte_mempool_list_dump(stdout); /* basic tests without cache */ if (test_mempool_basic(mp_nocache, 0) < 0) - goto err; + GOTO_ERR(ret, err); /* basic tests with cache */ if (test_mempool_basic(mp_cache, 0) < 0) - goto err; + GOTO_ERR(ret, err); /* basic tests with user-owned cache */ if (test_mempool_basic(mp_nocache, 1) < 0) - goto err; + GOTO_ERR(ret, err); /* more basic tests without cache */ if (test_mempool_basic_ex(mp_nocache) < 0) - goto err; + GOTO_ERR(ret, err); /* mempool operation test based on single producer and single comsumer */ if (test_mempool_sp_sc() < 0) - goto err; + GOTO_ERR(ret, err); if (test_mempool_creation_with_exceeded_cache_size() < 0) - goto err; + GOTO_ERR(ret, err); if (test_mempool_same_name_twice_creation() < 0) - goto err; + GOTO_ERR(ret, err); - if (test_mempool_xmem_misc() < 0) - goto err; + /* test the stack handler */ + if (test_mempool_basic(mp_stack, 1) < 0) + GOTO_ERR(ret, err); + + if (test_mempool_basic(default_pool, 1) < 0) + GOTO_ERR(ret, err); rte_mempool_list_dump(stdout); - return 0; + ret = 0; err: rte_mempool_free(mp_nocache); rte_mempool_free(mp_cache); - rte_mempool_free(mp_ext); - return -1; + rte_mempool_free(mp_stack_anon); + rte_mempool_free(mp_stack_mempool_iter); + rte_mempool_free(mp_stack); + rte_mempool_free(default_pool); + + return ret; } -static struct test_command mempool_cmd = { - .command = "mempool_autotest", - .callback = test_mempool, -}; -REGISTER_TEST_COMMAND(mempool_cmd); +REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);