X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_mempool.c;h=b9880b32b8f95ffa330595488044db1265ecf37e;hb=dd0eedb1cfcf0cb7423d859177c5bc6f931eaf8a;hp=c181057117be32a63819e83de4ab061c67aef4c7;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index c181057117..b9880b32b8 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -1,36 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 */ #include @@ -49,231 +47,46 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include -#include - #include "test.h" /* * Mempool * ======= * - * #. Basic tests: done on one core with and without cache: + * Basic tests: done on one core with and without cache: * * - Get one object, put one object * - Get two objects, put two objects * - Get all objects, test that their content is not modified and * put them back in the pool. - * - * #. Performance tests: - * - * Each core get *n_keep* objects per bulk of *n_get_bulk*. Then, - * objects are put back in the pool per bulk of *n_put_bulk*. - * - * This sequence is done during TIME_S seconds. - * - * This test is done on the following configurations: - * - * - Cores configuration (*cores*) - * - * - One core with cache - * - Two cores with cache - * - Max. cores with cache - * - One core without cache - * - Two cores without cache - * - Max. cores without cache - * - * - Bulk size (*n_get_bulk*, *n_put_bulk*) - * - * - Bulk get from 1 to 32 - * - Bulk put from 1 to 32 - * - * - Number of kept objects (*n_keep*) - * - * - 32 - * - 128 */ -#define N 65536 -#define TIME_S 5 #define MEMPOOL_ELT_SIZE 2048 -#define MAX_KEEP 128 -#define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) - -static struct rte_mempool *mp; -static struct rte_mempool *mp_cache, *mp_nocache; +#define MAX_KEEP 16 +#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) + +#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__) +#define RET_ERR() do { \ + LOG_ERR(); \ + return -1; \ + } while (0) +#define GOTO_ERR(var, label) do { \ + LOG_ERR(); \ + var = -1; \ + goto label; \ + } while (0) static rte_atomic32_t synchro; -/* number of objects in one bulk operation (get or put) */ -static unsigned n_get_bulk; -static unsigned n_put_bulk; - -/* number of objects retrived from mempool before putting them back */ -static unsigned n_keep; - -/* number of enqueues / dequeues */ -struct mempool_test_stats { - unsigned enq_count; -} __rte_cache_aligned; - -static struct mempool_test_stats stats[RTE_MAX_LCORE]; - -static int -per_lcore_mempool_test(__attribute__((unused)) void *arg) -{ - void *obj_table[MAX_KEEP]; - unsigned i, idx; - unsigned lcore_id = rte_lcore_id(); - int ret; - uint64_t start_cycles, end_cycles; - uint64_t time_diff = 0, hz = rte_get_hpet_hz(); - - /* n_get_bulk and n_put_bulk must be divisors of n_keep */ - if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep) - return -1; - if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep) - return -1; - - stats[lcore_id].enq_count = 0; - - /* wait synchro for slaves */ - if (lcore_id != rte_get_master_lcore()) - while (rte_atomic32_read(&synchro) == 0); - - start_cycles = rte_get_hpet_cycles(); - - while (time_diff/hz < TIME_S) { - for (i = 0; likely(i < (N/n_keep)); i++) { - /* get n_keep objects by bulk of n_bulk */ - idx = 0; - while (idx < n_keep) { - ret = rte_mempool_get_bulk(mp, &obj_table[idx], - n_get_bulk); - if (unlikely(ret < 0)) { - rte_mempool_dump(mp); - rte_ring_dump(mp->ring); - /* in this case, objects are lost... */ - return -1; - } - idx += n_get_bulk; - } - - /* put the objects back */ - idx = 0; - while (idx < n_keep) { - rte_mempool_put_bulk(mp, &obj_table[idx], - n_put_bulk); - idx += n_put_bulk; - } - } - end_cycles = rte_get_hpet_cycles(); - time_diff = end_cycles - start_cycles; - stats[lcore_id].enq_count += N; - } - - return 0; -} - -/* launch all the per-lcore test, and display the result */ -static int -launch_cores(unsigned cores) -{ - unsigned lcore_id; - unsigned rate; - int ret; - unsigned cores_save = cores; - - rte_atomic32_set(&synchro, 0); - - /* reset stats */ - memset(stats, 0, sizeof(stats)); - - printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u " - "n_put_bulk=%u n_keep=%u ", - (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep); - - if (rte_mempool_count(mp) != MEMPOOL_SIZE) { - printf("mempool is not full\n"); - return -1; - } - - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (cores == 1) - break; - cores--; - rte_eal_remote_launch(per_lcore_mempool_test, - NULL, lcore_id); - } - - /* start synchro and launch test on master */ - rte_atomic32_set(&synchro, 1); - - ret = per_lcore_mempool_test(NULL); - - cores = cores_save; - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (cores == 1) - break; - cores--; - if (rte_eal_wait_lcore(lcore_id) < 0) - ret = -1; - } - - if (ret < 0) { - printf("per-lcore test returned -1\n"); - return -1; - } - - rate = 0; - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) - rate += (stats[lcore_id].enq_count / TIME_S); - - printf("rate_persec=%u\n", rate); - - return 0; -} - -/* for a given number of core, launch all test cases */ -static int -do_one_mempool_test(unsigned cores) -{ - unsigned bulk_tab_get[] = { 1, 4, 32, 0 }; - unsigned bulk_tab_put[] = { 1, 4, 32, 0 }; - unsigned keep_tab[] = { 32, 128, 0 }; - unsigned *get_bulk_ptr; - unsigned *put_bulk_ptr; - unsigned *keep_ptr; - int ret; - - for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) { - for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) { - for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) { - - n_get_bulk = *get_bulk_ptr; - n_put_bulk = *put_bulk_ptr; - n_keep = *keep_ptr; - ret = launch_cores(cores); - - if (ret < 0) - return -1; - } - } - } - return 0; -} - - /* * save the object number in the first 4 bytes of object data. All * other bytes are set to 0. @@ -283,13 +96,14 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, void *obj, unsigned i) { uint32_t *objnum = obj; + memset(obj, 0, mp->elt_size); *objnum = i; } /* basic tests (done on one core) */ static int -test_mempool_basic(void) +test_mempool_basic(struct rte_mempool *mp, int use_external_cache) { uint32_t *objnum; void **objtable; @@ -297,61 +111,74 @@ test_mempool_basic(void) char *obj_data; int ret = 0; unsigned i, j; - unsigned old_bulk_count; + int offset; + struct rte_mempool_cache *cache; + + if (use_external_cache) { + /* Create a user-owned mempool cache. */ + cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE, + SOCKET_ID_ANY); + if (cache == NULL) + RET_ERR(); + } else { + /* May be NULL if cache is disabled. */ + cache = rte_mempool_default_cache(mp, rte_lcore_id()); + } /* dump the mempool status */ - rte_mempool_dump(mp); - old_bulk_count = rte_mempool_get_bulk_count(mp); - rte_mempool_dump(mp); - if (rte_mempool_set_bulk_count(mp, 0) == 0) - return -1; - if (rte_mempool_get_bulk_count(mp) == 0) - return -1; - if (rte_mempool_set_bulk_count(mp, 2) < 0) - return -1; - if (rte_mempool_get_bulk_count(mp) != 2) - return -1; - rte_mempool_dump(mp); - if (rte_mempool_set_bulk_count(mp, old_bulk_count) < 0) - return -1; - if (rte_mempool_get_bulk_count(mp) != old_bulk_count) - return -1; - rte_mempool_dump(mp); + rte_mempool_dump(stdout, mp); printf("get an object\n"); - if (rte_mempool_get(mp, &obj) < 0) - return -1; - rte_mempool_dump(mp); + if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0) + GOTO_ERR(ret, out); + rte_mempool_dump(stdout, mp); + + /* tests that improve coverage */ + printf("get object count\n"); + /* We have to count the extra caches, one in this case. */ + offset = use_external_cache ? 1 * cache->len : 0; + if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1) + GOTO_ERR(ret, out); + + printf("get private data\n"); + if (rte_mempool_get_priv(mp) != (char *)mp + + MEMPOOL_HEADER_SIZE(mp, mp->cache_size)) + GOTO_ERR(ret, out); + +#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */ + printf("get physical address of an object\n"); + if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj)) + GOTO_ERR(ret, out); +#endif printf("put the object back\n"); - rte_mempool_put(mp, obj); - rte_mempool_dump(mp); + rte_mempool_generic_put(mp, &obj, 1, cache, 0); + rte_mempool_dump(stdout, mp); printf("get 2 objects\n"); - if (rte_mempool_get(mp, &obj) < 0) - return -1; - if (rte_mempool_get(mp, &obj2) < 0) { - rte_mempool_put(mp, obj); - return -1; + if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0) + GOTO_ERR(ret, out); + if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) { + rte_mempool_generic_put(mp, &obj, 1, cache, 0); + GOTO_ERR(ret, out); } - rte_mempool_dump(mp); + rte_mempool_dump(stdout, mp); printf("put the objects back\n"); - rte_mempool_put(mp, obj); - rte_mempool_put(mp, obj2); - rte_mempool_dump(mp); + rte_mempool_generic_put(mp, &obj, 1, cache, 0); + rte_mempool_generic_put(mp, &obj2, 1, cache, 0); + rte_mempool_dump(stdout, mp); /* * get many objects: we cannot get them all because the cache * on other cores may not be empty. */ objtable = malloc(MEMPOOL_SIZE * sizeof(void *)); - if (objtable == NULL) { - return -1; - } + if (objtable == NULL) + GOTO_ERR(ret, out); - for (i=0; i MEMPOOL_SIZE) { - printf("bad object number\n"); + printf("bad object number(%d)\n", *objnum); ret = -1; break; } - for (j=sizeof(*objnum); jelt_size; j++) { + for (j = sizeof(*objnum); j < mp->elt_size; j++) { if (obj_data[j] != 0) ret = -1; } - rte_mempool_put(mp, objtable[i]); + rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0); } free(objtable); if (ret == -1) printf("objects were modified!\n"); +out: + if (use_external_cache) { + rte_mempool_cache_flush(cache, mp); + rte_mempool_cache_free(cache); + } + return ret; } @@ -387,14 +220,17 @@ static int test_mempool_creation_with_exceeded_cache_size(void) { struct rte_mempool *mp_cov; - mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, - RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0, - NULL, NULL, - my_obj_init, NULL, - SOCKET_ID_ANY, 0); - if(NULL != mp_cov) { - return -1; + mp_cov = rte_mempool_create("test_mempool_cache_too_big", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + + if (mp_cov != NULL) { + rte_mempool_free(mp_cov); + RET_ERR(); } return 0; @@ -412,29 +248,30 @@ static int test_mempool_single_producer(void) unsigned int i; void *obj = NULL; uint64_t start_cycles, end_cycles; - uint64_t duration = rte_get_hpet_hz() * 8; + uint64_t duration = rte_get_timer_hz() / 4; - start_cycles = rte_get_hpet_cycles(); + start_cycles = rte_get_timer_cycles(); while (1) { - end_cycles = rte_get_hpet_cycles(); + end_cycles = rte_get_timer_cycles(); /* duration uses up, stop producing */ if (start_cycles + duration < end_cycles) break; rte_spinlock_lock(&scsp_spinlock); for (i = 0; i < MAX_KEEP; i ++) { - if (NULL != scsp_obj_table[i]) + if (NULL != scsp_obj_table[i]) { obj = scsp_obj_table[i]; break; + } } rte_spinlock_unlock(&scsp_spinlock); if (i >= MAX_KEEP) { continue; } if (rte_mempool_from_obj(obj) != mp_spsc) { - printf("test_mempool_single_producer there is an obj not owned by this mempool\n"); - return -1; + printf("obj not owned by this mempool\n"); + RET_ERR(); } - rte_mempool_sp_put(mp_spsc, obj); + rte_mempool_put(mp_spsc, obj); rte_spinlock_lock(&scsp_spinlock); scsp_obj_table[i] = NULL; rte_spinlock_unlock(&scsp_spinlock); @@ -451,11 +288,11 @@ static int test_mempool_single_consumer(void) unsigned int i; void * obj; uint64_t start_cycles, end_cycles; - uint64_t duration = rte_get_hpet_hz() * 5; + uint64_t duration = rte_get_timer_hz() / 8; - start_cycles = rte_get_hpet_cycles(); + start_cycles = rte_get_timer_cycles(); while (1) { - end_cycles = rte_get_hpet_cycles(); + end_cycles = rte_get_timer_cycles(); /* duration uses up, stop consuming */ if (start_cycles + duration < end_cycles) break; @@ -467,7 +304,7 @@ static int test_mempool_single_consumer(void) rte_spinlock_unlock(&scsp_spinlock); if (i >= MAX_KEEP) continue; - if (rte_mempool_sc_get(mp_spsc, &obj) < 0) + if (rte_mempool_get(mp_spsc, &obj) < 0) break; rte_spinlock_lock(&scsp_spinlock); scsp_obj_table[i] = obj; @@ -478,14 +315,17 @@ static int test_mempool_single_consumer(void) } /* - * test function for mempool test based on singple consumer and single producer, can run on one lcore only + * test function for mempool test based on singple consumer and single producer, + * can run on one lcore only */ -static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg) +static int +test_mempool_launch_single_consumer(__attribute__((unused)) void *arg) { return test_mempool_single_consumer(); } -static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg) +static void +my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg) { printf("mempool name is %s\n", mp->name); /* nothing to be implemented here*/ @@ -503,33 +343,41 @@ test_mempool_sp_sc(void) unsigned lcore_next; /* create a mempool with single producer/consumer ring */ - if (NULL == mp_spsc) { + if (mp_spsc == NULL) { mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, 0, 0, - my_mp_init, NULL, - my_obj_init, NULL, - SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); - if (NULL == mp_spsc) { - return -1; - } + MEMPOOL_ELT_SIZE, 0, 0, + my_mp_init, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, + MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | + MEMPOOL_F_SC_GET); + if (mp_spsc == NULL) + RET_ERR(); } if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { printf("Cannot lookup mempool from its name\n"); - return -1; + rte_mempool_free(mp_spsc); + RET_ERR(); } lcore_next = rte_get_next_lcore(lcore_id, 0, 1); - if (RTE_MAX_LCORE <= lcore_next) - return -1; - if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) - return -1; + if (lcore_next >= RTE_MAX_LCORE) { + rte_mempool_free(mp_spsc); + RET_ERR(); + } + if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) { + rte_mempool_free(mp_spsc); + RET_ERR(); + } rte_spinlock_init(&scsp_spinlock); memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); - rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next); - if(test_mempool_single_producer() < 0) + rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, + lcore_next); + if (test_mempool_single_producer() < 0) ret = -1; - if(rte_eal_wait_lcore(lcore_next) < 0) + if (rte_eal_wait_lcore(lcore_next) < 0) ret = -1; + rte_mempool_free(mp_spsc); return ret; } @@ -538,7 +386,7 @@ test_mempool_sp_sc(void) * it tests some more basic of mempool */ static int -test_mempool_basic_ex(struct rte_mempool * mp) +test_mempool_basic_ex(struct rte_mempool *mp) { unsigned i; void **obj; @@ -548,38 +396,41 @@ test_mempool_basic_ex(struct rte_mempool * mp) if (mp == NULL) return ret; - obj = (void **)rte_zmalloc("test_mempool_basic_ex", (MEMPOOL_SIZE * sizeof(void *)), 0); + obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE, + sizeof(void *), 0); if (obj == NULL) { printf("test_mempool_basic_ex fail to rte_malloc\n"); return ret; } - printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp)); + printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", + mp->name, rte_mempool_in_use_count(mp)); if (rte_mempool_full(mp) != 1) { - printf("test_mempool_basic_ex the mempool is not full but it should be\n"); + printf("test_mempool_basic_ex the mempool should be full\n"); goto fail_mp_basic_ex; } for (i = 0; i < MEMPOOL_SIZE; i ++) { - if (rte_mempool_mc_get(mp, &obj[i]) < 0) { - printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i); + if (rte_mempool_get(mp, &obj[i]) < 0) { + printf("test_mp_basic_ex fail to get object for [%u]\n", + i); goto fail_mp_basic_ex; } } - if (rte_mempool_mc_get(mp, &err_obj) == 0) { - printf("test_mempool_basic_ex get an impossible obj from mempool\n"); + if (rte_mempool_get(mp, &err_obj) == 0) { + printf("test_mempool_basic_ex get an impossible obj\n"); goto fail_mp_basic_ex; } printf("number: %u\n", i); if (rte_mempool_empty(mp) != 1) { - printf("test_mempool_basic_ex the mempool is not empty but it should be\n"); + printf("test_mempool_basic_ex the mempool should be empty\n"); goto fail_mp_basic_ex; } - for (i = 0; i < MEMPOOL_SIZE; i ++) { - rte_mempool_mp_put(mp, obj[i]); - } + for (i = 0; i < MEMPOOL_SIZE; i++) + rte_mempool_put(mp, obj[i]); + if (rte_mempool_full(mp) != 1) { - printf("test_mempool_basic_ex the mempool is not full but it should be\n"); + printf("test_mempool_basic_ex the mempool should be full\n"); goto fail_mp_basic_ex; } @@ -595,113 +446,174 @@ fail_mp_basic_ex: static int test_mempool_same_name_twice_creation(void) { - struct rte_mempool *mp_tc; - - mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, 0, 0, - NULL, NULL, - NULL, NULL, - SOCKET_ID_ANY, 0); - if (NULL == mp_tc) - return -1; + struct rte_mempool *mp_tc, *mp_tc2; + + mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + NULL, NULL, + SOCKET_ID_ANY, 0); + + if (mp_tc == NULL) + RET_ERR(); + + mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + NULL, NULL, + SOCKET_ID_ANY, 0); + + if (mp_tc2 != NULL) { + rte_mempool_free(mp_tc); + rte_mempool_free(mp_tc2); + RET_ERR(); + } - mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, 0, 0, - NULL, NULL, - NULL, NULL, - SOCKET_ID_ANY, 0); - if (NULL != mp_tc) + rte_mempool_free(mp_tc); + return 0; +} + +/* + * BAsic test for mempool_xmem functions. + */ +static int +test_mempool_xmem_misc(void) +{ + uint32_t elt_num, total_size; + size_t sz; + ssize_t usz; + + elt_num = MAX_KEEP; + total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL); + sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX); + + usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1, + MEMPOOL_PG_SHIFT_MAX); + + if (sz != (size_t)usz) { + printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) " + "returns: %#zx, while expected: %#zx;\n", + __func__, elt_num, total_size, sz, (size_t)usz); return -1; + } return 0; } -int +static void +walk_cb(struct rte_mempool *mp, void *userdata __rte_unused) +{ + printf("\t%s\n", mp->name); +} + +static int test_mempool(void) { + struct rte_mempool *mp_cache = NULL; + struct rte_mempool *mp_nocache = NULL; + struct rte_mempool *mp_stack = NULL; + rte_atomic32_init(&synchro); /* create a mempool (without cache) */ - if (mp_nocache == NULL) - mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, 0, 0, - NULL, NULL, - my_obj_init, NULL, - SOCKET_ID_ANY, 0); - if (mp_nocache == NULL) - return -1; + mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + + if (mp_nocache == NULL) { + printf("cannot allocate mp_nocache mempool\n"); + goto err; + } /* create a mempool (with cache) */ - if (mp_cache == NULL) - mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE, - MEMPOOL_ELT_SIZE, - RTE_MEMPOOL_CACHE_MAX_SIZE, 0, - NULL, NULL, - my_obj_init, NULL, - SOCKET_ID_ANY, 0); - if (mp_cache == NULL) - return -1; + mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + + if (mp_cache == NULL) { + printf("cannot allocate mp_cache mempool\n"); + goto err; + } + /* create a mempool with an external handler */ + mp_stack = rte_mempool_create_empty("test_stack", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + SOCKET_ID_ANY, 0); + + if (mp_stack == NULL) { + printf("cannot allocate mp_stack mempool\n"); + goto err; + } + if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) { + printf("cannot set stack handler\n"); + goto err; + } + if (rte_mempool_populate_default(mp_stack) < 0) { + printf("cannot populate mp_stack mempool\n"); + goto err; + } + rte_mempool_obj_iter(mp_stack, my_obj_init, NULL); /* retrieve the mempool from its name */ if (rte_mempool_lookup("test_nocache") != mp_nocache) { printf("Cannot lookup mempool from its name\n"); - return -1; + goto err; } - rte_mempool_list_dump(); + printf("Walk into mempools:\n"); + rte_mempool_walk(walk_cb, NULL); + + rte_mempool_list_dump(stdout); /* basic tests without cache */ - mp = mp_nocache; - if (test_mempool_basic() < 0) - return -1; + if (test_mempool_basic(mp_nocache, 0) < 0) + goto err; /* basic tests with cache */ - mp = mp_cache; - if (test_mempool_basic() < 0) - return -1; + if (test_mempool_basic(mp_cache, 0) < 0) + goto err; + + /* basic tests with user-owned cache */ + if (test_mempool_basic(mp_nocache, 1) < 0) + goto err; /* more basic tests without cache */ if (test_mempool_basic_ex(mp_nocache) < 0) - return -1; - - /* performance test with 1, 2 and max cores */ - printf("start performance test (without cache)\n"); - mp = mp_nocache; - - if (do_one_mempool_test(1) < 0) - return -1; - - if (do_one_mempool_test(2) < 0) - return -1; - - if (do_one_mempool_test(rte_lcore_count()) < 0) - return -1; - - /* performance test with 1, 2 and max cores */ - printf("start performance test (with cache)\n"); - mp = mp_cache; - - if (do_one_mempool_test(1) < 0) - return -1; - - if (do_one_mempool_test(2) < 0) - return -1; - - if (do_one_mempool_test(rte_lcore_count()) < 0) - return -1; + goto err; /* mempool operation test based on single producer and single comsumer */ if (test_mempool_sp_sc() < 0) - return -1; + goto err; if (test_mempool_creation_with_exceeded_cache_size() < 0) - return -1; + goto err; if (test_mempool_same_name_twice_creation() < 0) - return -1; + goto err; - rte_mempool_list_dump(); + if (test_mempool_xmem_misc() < 0) + goto err; + + /* test the stack handler */ + if (test_mempool_basic(mp_stack, 1) < 0) + goto err; + + rte_mempool_list_dump(stdout); return 0; + +err: + rte_mempool_free(mp_nocache); + rte_mempool_free(mp_cache); + rte_mempool_free(mp_stack); + return -1; } + +REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);