X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_mempool_perf.c;h=4fac04cd06f761214214ba0a2532c325b32c1d1b;hb=a0fd91cefcc054b770dec6d8fb60db0d3145c45a;hp=86cba8ae7f7f5e779ce087417eaf72b103d2d70c;hpb=21a7f4e2646e1cb6b0dbd6643e5d64f72355af58;p=dpdk.git diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c index 86cba8ae7f..4fac04cd06 100644 --- a/app/test/test_mempool_perf.c +++ b/app/test/test_mempool_perf.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -79,6 +78,9 @@ * - One core without cache * - Two cores without cache * - Max. cores without cache + * - One core with user-owned cache + * - Two cores with user-owned cache + * - Max. cores with user-owned cache * * - Bulk size (*n_get_bulk*, *n_put_bulk*) * @@ -95,10 +97,23 @@ #define TIME_S 5 #define MEMPOOL_ELT_SIZE 2048 #define MAX_KEEP 128 -#define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) +#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) + +#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__) +#define RET_ERR() do { \ + LOG_ERR(); \ + return -1; \ + } while (0) +#define GOTO_ERR(var, label) do { \ + LOG_ERR(); \ + var = -1; \ + goto label; \ + } while (0) static struct rte_mempool *mp; static struct rte_mempool *mp_cache, *mp_nocache; +static int use_external_cache; +static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; static rte_atomic32_t synchro; @@ -111,7 +126,7 @@ static unsigned n_keep; /* number of enqueues / dequeues */ struct mempool_test_stats { - unsigned enq_count; + uint64_t enq_count; } __rte_cache_aligned; static struct mempool_test_stats stats[RTE_MAX_LCORE]; @@ -135,15 +150,27 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg) void *obj_table[MAX_KEEP]; unsigned i, idx; unsigned lcore_id = rte_lcore_id(); - int ret; + int ret = 0; uint64_t start_cycles, end_cycles; uint64_t time_diff = 0, hz = rte_get_timer_hz(); + struct rte_mempool_cache *cache; + + if (use_external_cache) { + /* Create a user-owned mempool cache. */ + cache = rte_mempool_cache_create(external_cache_size, + SOCKET_ID_ANY); + if (cache == NULL) + RET_ERR(); + } else { + /* May be NULL if cache is disabled. */ + cache = rte_mempool_default_cache(mp, lcore_id); + } /* n_get_bulk and n_put_bulk must be divisors of n_keep */ if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep) - return -1; + GOTO_ERR(ret, out); if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep) - return -1; + GOTO_ERR(ret, out); stats[lcore_id].enq_count = 0; @@ -158,13 +185,14 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg) /* get n_keep objects by bulk of n_bulk */ idx = 0; while (idx < n_keep) { - ret = rte_mempool_get_bulk(mp, &obj_table[idx], - n_get_bulk); + ret = rte_mempool_generic_get(mp, + &obj_table[idx], + n_get_bulk, + cache, 0); if (unlikely(ret < 0)) { - rte_mempool_dump(mp); - rte_ring_dump(mp->ring); + rte_mempool_dump(stdout, mp); /* in this case, objects are lost... */ - return -1; + GOTO_ERR(ret, out); } idx += n_get_bulk; } @@ -172,8 +200,9 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg) /* put the objects back */ idx = 0; while (idx < n_keep) { - rte_mempool_put_bulk(mp, &obj_table[idx], - n_put_bulk); + rte_mempool_generic_put(mp, &obj_table[idx], + n_put_bulk, + cache, 0); idx += n_put_bulk; } } @@ -182,7 +211,13 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg) stats[lcore_id].enq_count += N; } - return 0; +out: + if (use_external_cache) { + rte_mempool_cache_flush(cache, mp); + rte_mempool_cache_free(cache); + } + + return ret; } /* launch all the per-lcore test, and display the result */ @@ -190,7 +225,7 @@ static int launch_cores(unsigned cores) { unsigned lcore_id; - unsigned rate; + uint64_t rate; int ret; unsigned cores_save = cores; @@ -201,9 +236,11 @@ launch_cores(unsigned cores) printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u " "n_put_bulk=%u n_keep=%u ", - (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep); + use_external_cache ? + external_cache_size : (unsigned) mp->cache_size, + cores, n_get_bulk, n_put_bulk, n_keep); - if (rte_mempool_count(mp) != MEMPOOL_SIZE) { + if (rte_mempool_avail_count(mp) != MEMPOOL_SIZE) { printf("mempool is not full\n"); return -1; } @@ -239,7 +276,7 @@ launch_cores(unsigned cores) for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) rate += (stats[lcore_id].enq_count / TIME_S); - printf("rate_persec=%u\n", rate); + printf("rate_persec=%" PRIu64 "\n", rate); return 0; } @@ -273,7 +310,7 @@ do_one_mempool_test(unsigned cores) return 0; } -int +static int test_mempool_perf(void) { rte_atomic32_init(&synchro); @@ -325,7 +362,27 @@ test_mempool_perf(void) if (do_one_mempool_test(rte_lcore_count()) < 0) return -1; - rte_mempool_list_dump(); + /* performance test with 1, 2 and max cores */ + printf("start performance test (with user-owned cache)\n"); + mp = mp_nocache; + use_external_cache = 1; + + if (do_one_mempool_test(1) < 0) + return -1; + + if (do_one_mempool_test(2) < 0) + return -1; + + if (do_one_mempool_test(rte_lcore_count()) < 0) + return -1; + + rte_mempool_list_dump(stdout); return 0; } + +static struct test_command mempool_perf_cmd = { + .command = "mempool_perf_autotest", + .callback = test_mempool_perf, +}; +REGISTER_TEST_COMMAND(mempool_perf_cmd);