4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
50 #include <rte_tailq.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_spinlock.h>
59 #include <rte_malloc.h>
67 * Each core get *n_keep* objects per bulk of *n_get_bulk*. Then,
68 * objects are put back in the pool per bulk of *n_put_bulk*.
70 * This sequence is done during TIME_S seconds.
72 * This test is done on the following configurations:
74 * - Cores configuration (*cores*)
76 * - One core with cache
77 * - Two cores with cache
78 * - Max. cores with cache
79 * - One core without cache
80 * - Two cores without cache
81 * - Max. cores without cache
83 * - Bulk size (*n_get_bulk*, *n_put_bulk*)
85 * - Bulk get from 1 to 32
86 * - Bulk put from 1 to 32
88 * - Number of kept objects (*n_keep*)
96 #define MEMPOOL_ELT_SIZE 2048
98 #define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
100 static struct rte_mempool *mp;
101 static struct rte_mempool *mp_cache, *mp_nocache;
103 static rte_atomic32_t synchro;
105 /* number of objects in one bulk operation (get or put) */
106 static unsigned n_get_bulk;
107 static unsigned n_put_bulk;
109 /* number of objects retrived from mempool before putting them back */
110 static unsigned n_keep;
112 /* number of enqueues / dequeues */
113 struct mempool_test_stats {
115 } __rte_cache_aligned;
117 static struct mempool_test_stats stats[RTE_MAX_LCORE];
120 * save the object number in the first 4 bytes of object data. All
121 * other bytes are set to 0.
124 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
125 void *obj, unsigned i)
127 uint32_t *objnum = obj;
128 memset(obj, 0, mp->elt_size);
133 per_lcore_mempool_test(__attribute__((unused)) void *arg)
135 void *obj_table[MAX_KEEP];
137 unsigned lcore_id = rte_lcore_id();
139 uint64_t start_cycles, end_cycles;
140 uint64_t time_diff = 0, hz = rte_get_timer_hz();
142 /* n_get_bulk and n_put_bulk must be divisors of n_keep */
143 if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)
145 if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
148 stats[lcore_id].enq_count = 0;
150 /* wait synchro for slaves */
151 if (lcore_id != rte_get_master_lcore())
152 while (rte_atomic32_read(&synchro) == 0);
154 start_cycles = rte_get_timer_cycles();
156 while (time_diff/hz < TIME_S) {
157 for (i = 0; likely(i < (N/n_keep)); i++) {
158 /* get n_keep objects by bulk of n_bulk */
160 while (idx < n_keep) {
161 ret = rte_mempool_get_bulk(mp, &obj_table[idx],
163 if (unlikely(ret < 0)) {
164 rte_mempool_dump(stdout, mp);
165 rte_ring_dump(stdout, mp->ring);
166 /* in this case, objects are lost... */
172 /* put the objects back */
174 while (idx < n_keep) {
175 rte_mempool_put_bulk(mp, &obj_table[idx],
180 end_cycles = rte_get_timer_cycles();
181 time_diff = end_cycles - start_cycles;
182 stats[lcore_id].enq_count += N;
188 /* launch all the per-lcore test, and display the result */
190 launch_cores(unsigned cores)
195 unsigned cores_save = cores;
197 rte_atomic32_set(&synchro, 0);
200 memset(stats, 0, sizeof(stats));
202 printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
203 "n_put_bulk=%u n_keep=%u ",
204 (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep);
206 if (rte_mempool_count(mp) != MEMPOOL_SIZE) {
207 printf("mempool is not full\n");
211 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
215 rte_eal_remote_launch(per_lcore_mempool_test,
219 /* start synchro and launch test on master */
220 rte_atomic32_set(&synchro, 1);
222 ret = per_lcore_mempool_test(NULL);
225 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
229 if (rte_eal_wait_lcore(lcore_id) < 0)
234 printf("per-lcore test returned -1\n");
239 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
240 rate += (stats[lcore_id].enq_count / TIME_S);
242 printf("rate_persec=%u\n", rate);
247 /* for a given number of core, launch all test cases */
249 do_one_mempool_test(unsigned cores)
251 unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
252 unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
253 unsigned keep_tab[] = { 32, 128, 0 };
254 unsigned *get_bulk_ptr;
255 unsigned *put_bulk_ptr;
259 for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) {
260 for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) {
261 for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) {
263 n_get_bulk = *get_bulk_ptr;
264 n_put_bulk = *put_bulk_ptr;
266 ret = launch_cores(cores);
277 test_mempool_perf(void)
279 rte_atomic32_init(&synchro);
281 /* create a mempool (without cache) */
282 if (mp_nocache == NULL)
283 mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
284 MEMPOOL_ELT_SIZE, 0, 0,
288 if (mp_nocache == NULL)
291 /* create a mempool (with cache) */
292 if (mp_cache == NULL)
293 mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
295 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
299 if (mp_cache == NULL)
302 /* performance test with 1, 2 and max cores */
303 printf("start performance test (without cache)\n");
306 if (do_one_mempool_test(1) < 0)
309 if (do_one_mempool_test(2) < 0)
312 if (do_one_mempool_test(rte_lcore_count()) < 0)
315 /* performance test with 1, 2 and max cores */
316 printf("start performance test (with cache)\n");
319 if (do_one_mempool_test(1) < 0)
322 if (do_one_mempool_test(2) < 0)
325 if (do_one_mempool_test(rte_lcore_count()) < 0)
328 rte_mempool_list_dump(stdout);
333 static struct test_command mempool_perf_cmd = {
334 .command = "mempool_perf_autotest",
335 .callback = test_mempool_perf,
337 REGISTER_TEST_COMMAND(mempool_perf_cmd);