4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
51 #include <rte_per_lcore.h>
52 #include <rte_lcore.h>
53 #include <rte_atomic.h>
54 #include <rte_branch_prediction.h>
56 #include <rte_mempool.h>
57 #include <rte_spinlock.h>
58 #include <rte_malloc.h>
66 * Each core get *n_keep* objects per bulk of *n_get_bulk*. Then,
67 * objects are put back in the pool per bulk of *n_put_bulk*.
69 * This sequence is done during TIME_S seconds.
71 * This test is done on the following configurations:
73 * - Cores configuration (*cores*)
75 * - One core with cache
76 * - Two cores with cache
77 * - Max. cores with cache
78 * - One core without cache
79 * - Two cores without cache
80 * - Max. cores without cache
81 * - One core with user-owned cache
82 * - Two cores with user-owned cache
83 * - Max. cores with user-owned cache
85 * - Bulk size (*n_get_bulk*, *n_put_bulk*)
87 * - Bulk get from 1 to 32
88 * - Bulk put from 1 to 32
90 * - Number of kept objects (*n_keep*)
98 #define MEMPOOL_ELT_SIZE 2048
100 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
102 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
103 #define RET_ERR() do { \
107 #define GOTO_ERR(var, label) do { \
113 static struct rte_mempool *mp;
114 static struct rte_mempool *mp_cache, *mp_nocache;
115 static int use_external_cache;
116 static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
118 static rte_atomic32_t synchro;
120 /* number of objects in one bulk operation (get or put) */
121 static unsigned n_get_bulk;
122 static unsigned n_put_bulk;
124 /* number of objects retrived from mempool before putting them back */
125 static unsigned n_keep;
127 /* number of enqueues / dequeues */
128 struct mempool_test_stats {
130 } __rte_cache_aligned;
132 static struct mempool_test_stats stats[RTE_MAX_LCORE];
135 * save the object number in the first 4 bytes of object data. All
136 * other bytes are set to 0.
139 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
140 void *obj, unsigned i)
142 uint32_t *objnum = obj;
143 memset(obj, 0, mp->elt_size);
148 per_lcore_mempool_test(__attribute__((unused)) void *arg)
150 void *obj_table[MAX_KEEP];
152 unsigned lcore_id = rte_lcore_id();
154 uint64_t start_cycles, end_cycles;
155 uint64_t time_diff = 0, hz = rte_get_timer_hz();
156 struct rte_mempool_cache *cache;
158 if (use_external_cache) {
159 /* Create a user-owned mempool cache. */
160 cache = rte_mempool_cache_create(external_cache_size,
165 /* May be NULL if cache is disabled. */
166 cache = rte_mempool_default_cache(mp, lcore_id);
169 /* n_get_bulk and n_put_bulk must be divisors of n_keep */
170 if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)
172 if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
175 stats[lcore_id].enq_count = 0;
177 /* wait synchro for slaves */
178 if (lcore_id != rte_get_master_lcore())
179 while (rte_atomic32_read(&synchro) == 0);
181 start_cycles = rte_get_timer_cycles();
183 while (time_diff/hz < TIME_S) {
184 for (i = 0; likely(i < (N/n_keep)); i++) {
185 /* get n_keep objects by bulk of n_bulk */
187 while (idx < n_keep) {
188 ret = rte_mempool_generic_get(mp,
192 if (unlikely(ret < 0)) {
193 rte_mempool_dump(stdout, mp);
194 /* in this case, objects are lost... */
200 /* put the objects back */
202 while (idx < n_keep) {
203 rte_mempool_generic_put(mp, &obj_table[idx],
209 end_cycles = rte_get_timer_cycles();
210 time_diff = end_cycles - start_cycles;
211 stats[lcore_id].enq_count += N;
215 if (use_external_cache) {
216 rte_mempool_cache_flush(cache, mp);
217 rte_mempool_cache_free(cache);
223 /* launch all the per-lcore test, and display the result */
225 launch_cores(unsigned cores)
230 unsigned cores_save = cores;
232 rte_atomic32_set(&synchro, 0);
235 memset(stats, 0, sizeof(stats));
237 printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
238 "n_put_bulk=%u n_keep=%u ",
240 external_cache_size : (unsigned) mp->cache_size,
241 cores, n_get_bulk, n_put_bulk, n_keep);
243 if (rte_mempool_avail_count(mp) != MEMPOOL_SIZE) {
244 printf("mempool is not full\n");
248 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
252 rte_eal_remote_launch(per_lcore_mempool_test,
256 /* start synchro and launch test on master */
257 rte_atomic32_set(&synchro, 1);
259 ret = per_lcore_mempool_test(NULL);
262 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
266 if (rte_eal_wait_lcore(lcore_id) < 0)
271 printf("per-lcore test returned -1\n");
276 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
277 rate += (stats[lcore_id].enq_count / TIME_S);
279 printf("rate_persec=%" PRIu64 "\n", rate);
284 /* for a given number of core, launch all test cases */
286 do_one_mempool_test(unsigned cores)
288 unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
289 unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
290 unsigned keep_tab[] = { 32, 128, 0 };
291 unsigned *get_bulk_ptr;
292 unsigned *put_bulk_ptr;
296 for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) {
297 for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) {
298 for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) {
300 n_get_bulk = *get_bulk_ptr;
301 n_put_bulk = *put_bulk_ptr;
303 ret = launch_cores(cores);
314 test_mempool_perf(void)
316 rte_atomic32_init(&synchro);
318 /* create a mempool (without cache) */
319 if (mp_nocache == NULL)
320 mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
321 MEMPOOL_ELT_SIZE, 0, 0,
325 if (mp_nocache == NULL)
328 /* create a mempool (with cache) */
329 if (mp_cache == NULL)
330 mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
332 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
336 if (mp_cache == NULL)
339 /* performance test with 1, 2 and max cores */
340 printf("start performance test (without cache)\n");
343 if (do_one_mempool_test(1) < 0)
346 if (do_one_mempool_test(2) < 0)
349 if (do_one_mempool_test(rte_lcore_count()) < 0)
352 /* performance test with 1, 2 and max cores */
353 printf("start performance test (with cache)\n");
356 if (do_one_mempool_test(1) < 0)
359 if (do_one_mempool_test(2) < 0)
362 if (do_one_mempool_test(rte_lcore_count()) < 0)
365 /* performance test with 1, 2 and max cores */
366 printf("start performance test (with user-owned cache)\n");
368 use_external_cache = 1;
370 if (do_one_mempool_test(1) < 0)
373 if (do_one_mempool_test(2) < 0)
376 if (do_one_mempool_test(rte_lcore_count()) < 0)
379 rte_mempool_list_dump(stdout);
384 REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);