4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
50 #include <rte_tailq.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_spinlock.h>
59 #include <rte_malloc.h>
61 #include <cmdline_parse.h>
69 * Each core get *n_keep* objects per bulk of *n_get_bulk*. Then,
70 * objects are put back in the pool per bulk of *n_put_bulk*.
72 * This sequence is done during TIME_S seconds.
74 * This test is done on the following configurations:
76 * - Cores configuration (*cores*)
78 * - One core with cache
79 * - Two cores with cache
80 * - Max. cores with cache
81 * - One core without cache
82 * - Two cores without cache
83 * - Max. cores without cache
85 * - Bulk size (*n_get_bulk*, *n_put_bulk*)
87 * - Bulk get from 1 to 32
88 * - Bulk put from 1 to 32
90 * - Number of kept objects (*n_keep*)
98 #define MEMPOOL_ELT_SIZE 2048
100 #define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
102 static struct rte_mempool *mp;
103 static struct rte_mempool *mp_cache, *mp_nocache;
105 static rte_atomic32_t synchro;
107 /* number of objects in one bulk operation (get or put) */
108 static unsigned n_get_bulk;
109 static unsigned n_put_bulk;
111 /* number of objects retrived from mempool before putting them back */
112 static unsigned n_keep;
114 /* number of enqueues / dequeues */
115 struct mempool_test_stats {
117 } __rte_cache_aligned;
119 static struct mempool_test_stats stats[RTE_MAX_LCORE];
122 * save the object number in the first 4 bytes of object data. All
123 * other bytes are set to 0.
126 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
127 void *obj, unsigned i)
129 uint32_t *objnum = obj;
130 memset(obj, 0, mp->elt_size);
135 per_lcore_mempool_test(__attribute__((unused)) void *arg)
137 void *obj_table[MAX_KEEP];
139 unsigned lcore_id = rte_lcore_id();
141 uint64_t start_cycles, end_cycles;
142 uint64_t time_diff = 0, hz = rte_get_timer_hz();
144 /* n_get_bulk and n_put_bulk must be divisors of n_keep */
145 if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)
147 if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
150 stats[lcore_id].enq_count = 0;
152 /* wait synchro for slaves */
153 if (lcore_id != rte_get_master_lcore())
154 while (rte_atomic32_read(&synchro) == 0);
156 start_cycles = rte_get_timer_cycles();
158 while (time_diff/hz < TIME_S) {
159 for (i = 0; likely(i < (N/n_keep)); i++) {
160 /* get n_keep objects by bulk of n_bulk */
162 while (idx < n_keep) {
163 ret = rte_mempool_get_bulk(mp, &obj_table[idx],
165 if (unlikely(ret < 0)) {
166 rte_mempool_dump(mp);
167 rte_ring_dump(mp->ring);
168 /* in this case, objects are lost... */
174 /* put the objects back */
176 while (idx < n_keep) {
177 rte_mempool_put_bulk(mp, &obj_table[idx],
182 end_cycles = rte_get_timer_cycles();
183 time_diff = end_cycles - start_cycles;
184 stats[lcore_id].enq_count += N;
190 /* launch all the per-lcore test, and display the result */
192 launch_cores(unsigned cores)
197 unsigned cores_save = cores;
199 rte_atomic32_set(&synchro, 0);
202 memset(stats, 0, sizeof(stats));
204 printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
205 "n_put_bulk=%u n_keep=%u ",
206 (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep);
208 if (rte_mempool_count(mp) != MEMPOOL_SIZE) {
209 printf("mempool is not full\n");
213 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
217 rte_eal_remote_launch(per_lcore_mempool_test,
221 /* start synchro and launch test on master */
222 rte_atomic32_set(&synchro, 1);
224 ret = per_lcore_mempool_test(NULL);
227 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
231 if (rte_eal_wait_lcore(lcore_id) < 0)
236 printf("per-lcore test returned -1\n");
241 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
242 rate += (stats[lcore_id].enq_count / TIME_S);
244 printf("rate_persec=%u\n", rate);
249 /* for a given number of core, launch all test cases */
251 do_one_mempool_test(unsigned cores)
253 unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
254 unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
255 unsigned keep_tab[] = { 32, 128, 0 };
256 unsigned *get_bulk_ptr;
257 unsigned *put_bulk_ptr;
261 for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) {
262 for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) {
263 for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) {
265 n_get_bulk = *get_bulk_ptr;
266 n_put_bulk = *put_bulk_ptr;
268 ret = launch_cores(cores);
279 test_mempool_perf(void)
281 rte_atomic32_init(&synchro);
283 /* create a mempool (without cache) */
284 if (mp_nocache == NULL)
285 mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
286 MEMPOOL_ELT_SIZE, 0, 0,
290 if (mp_nocache == NULL)
293 /* create a mempool (with cache) */
294 if (mp_cache == NULL)
295 mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
297 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
301 if (mp_cache == NULL)
304 /* performance test with 1, 2 and max cores */
305 printf("start performance test (without cache)\n");
308 if (do_one_mempool_test(1) < 0)
311 if (do_one_mempool_test(2) < 0)
314 if (do_one_mempool_test(rte_lcore_count()) < 0)
317 /* performance test with 1, 2 and max cores */
318 printf("start performance test (with cache)\n");
321 if (do_one_mempool_test(1) < 0)
324 if (do_one_mempool_test(2) < 0)
327 if (do_one_mempool_test(rte_lcore_count()) < 0)
330 rte_mempool_list_dump();