4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
41 #include <rte_common.h>
42 #include <cmdline_parse.h>
43 #include <rte_cycles.h>
44 #include <rte_random.h>
45 #include <rte_malloc.h>
47 #include <rte_memcpy.h>
52 * Set this to the maximum buffer size you want to test. If it is 0, then the
53 * values in the buf_sizes[] array below will be used.
55 #define TEST_VALUE_RANGE 0
57 /* List of buffer sizes to test */
58 #if TEST_VALUE_RANGE == 0
59 static size_t buf_sizes[] = {
60 0, 1, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255,
61 256, 257, 320, 384, 511, 512, 513, 1023, 1024, 1025, 1518, 1522, 1600,
62 2048, 3072, 4096, 5120, 6144, 7168, 8192
64 /* MUST be as large as largest packet size above */
65 #define SMALL_BUFFER_SIZE 8192
66 #else /* TEST_VALUE_RANGE != 0 */
67 static size_t buf_sizes[TEST_VALUE_RANGE];
68 #define SMALL_BUFFER_SIZE TEST_VALUE_RANGE
69 #endif /* TEST_VALUE_RANGE == 0 */
73 * Arrays of this size are used for measuring uncached memory accesses by
74 * picking a random location within the buffer. Make this smaller if there are
75 * memory allocation errors.
77 #define LARGE_BUFFER_SIZE (100 * 1024 * 1024)
79 /* How many times to run timing loop for performance tests */
80 #define TEST_ITERATIONS 1000000
81 #define TEST_BATCH_SIZE 100
83 /* Data is aligned on this many bytes (power of 2) */
84 #define ALIGNMENT_UNIT 16
87 * Pointers used in performance tests. The two large buffers are for uncached
88 * access where random addresses within the buffer are used for each
89 * memcpy. The two small buffers are for cached access.
91 static uint8_t *large_buf_read, *large_buf_write,
92 *small_buf_read, *small_buf_write;
94 /* Initialise data buffers. */
100 large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
101 if (large_buf_read == NULL)
102 goto error_large_buf_read;
104 large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
105 if (large_buf_write == NULL)
106 goto error_large_buf_write;
108 small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
109 if (small_buf_read == NULL)
110 goto error_small_buf_read;
112 small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
113 if (small_buf_write == NULL)
114 goto error_small_buf_write;
116 for (i = 0; i < LARGE_BUFFER_SIZE; i++)
117 large_buf_read[i] = rte_rand();
118 for (i = 0; i < SMALL_BUFFER_SIZE; i++)
119 small_buf_read[i] = rte_rand();
123 error_small_buf_write:
124 rte_free(small_buf_read);
125 error_small_buf_read:
126 rte_free(large_buf_write);
127 error_large_buf_write:
128 rte_free(large_buf_read);
129 error_large_buf_read:
130 printf("ERROR: not enough memory");
134 /* Cleanup data buffers */
138 rte_free(large_buf_read);
139 rte_free(large_buf_write);
140 rte_free(small_buf_read);
141 rte_free(small_buf_write);
145 * Get a random offset into large array, with enough space needed to perform
146 * max copy size. Offset is aligned.
149 get_rand_offset(void)
151 return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
152 ~(ALIGNMENT_UNIT - 1));
155 /* Fill in source and destination addresses. */
157 fill_addr_arrays(size_t *dst_addr, int is_dst_cached,
158 size_t *src_addr, int is_src_cached)
162 for (i = 0; i < TEST_BATCH_SIZE; i++) {
163 dst_addr[i] = (is_dst_cached) ? 0 : get_rand_offset();
164 src_addr[i] = (is_src_cached) ? 0 : get_rand_offset();
168 /* Integer division with round to nearest */
169 static inline uint64_t
170 div_round(uint64_t dividend, uint64_t divisor)
172 return ((2 * dividend) + divisor) / (2 * divisor);
176 * WORKAROUND: For some reason the first test doing an uncached write
177 * takes a very long time (~25 times longer than is expected). So we do
178 * it once without timing.
181 do_uncached_write(uint8_t *dst, int is_dst_cached,
182 const uint8_t *src, int is_src_cached, size_t size)
185 size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE];
187 for (i = 0; i < (TEST_ITERATIONS / TEST_BATCH_SIZE); i++) {
188 fill_addr_arrays(dst_addrs, is_dst_cached,
189 src_addrs, is_src_cached);
190 for (j = 0; j < TEST_BATCH_SIZE; j++)
191 rte_memcpy(dst+dst_addrs[j], src+src_addrs[j], size);
196 * Run a single memcpy performance test. This is a macro to ensure that if
197 * the "size" parameter is a constant it won't be converted to a variable.
199 #define SINGLE_PERF_TEST(dst, is_dst_cached, src, is_src_cached, size) do { \
200 unsigned int iter, t; \
201 size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE]; \
202 uint64_t start_time, total_time = 0; \
203 uint64_t total_time2 = 0; \
204 for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \
205 fill_addr_arrays(dst_addrs, is_dst_cached, \
206 src_addrs, is_src_cached); \
207 start_time = rte_rdtsc(); \
208 for (t = 0; t < TEST_BATCH_SIZE; t++) \
209 rte_memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
210 total_time += rte_rdtsc() - start_time; \
212 for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \
213 fill_addr_arrays(dst_addrs, is_dst_cached, \
214 src_addrs, is_src_cached); \
215 start_time = rte_rdtsc(); \
216 for (t = 0; t < TEST_BATCH_SIZE; t++) \
217 memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
218 total_time2 += rte_rdtsc() - start_time; \
220 printf("%9u/", (unsigned)div_round(total_time, TEST_ITERATIONS)); \
221 printf("%4u", (unsigned)div_round(total_time2, TEST_ITERATIONS)); \
224 /* Run memcpy() tests for each cached/uncached permutation. */
225 #define ALL_PERF_TESTS_FOR_SIZE(n) do { \
226 if (__builtin_constant_p(n)) \
227 printf("\nC%6u ", (unsigned)n); \
229 printf("\n%7u ", (unsigned)n); \
230 SINGLE_PERF_TEST(small_buf_write, 1, small_buf_read, 1, n); \
231 SINGLE_PERF_TEST(large_buf_write, 0, small_buf_read, 1, n); \
232 SINGLE_PERF_TEST(small_buf_write, 1, large_buf_read, 0, n); \
233 SINGLE_PERF_TEST(large_buf_write, 0, large_buf_read, 0, n); \
237 * Run performance tests for a number of different sizes and cached/uncached
243 const unsigned num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
247 ret = init_buffers();
251 #if TEST_VALUE_RANGE != 0
252 /* Setup buf_sizes array, if required */
253 for (i = 0; i < TEST_VALUE_RANGE; i++)
257 /* See function comment */
258 do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE);
260 printf("\n** rte_memcpy()/memcpy performance tests **\n"
261 "======= ============== ============== ============== ==============\n"
262 " Size Cache to cache Cache to mem Mem to cache Mem to mem\n"
263 "(bytes) (ticks) (ticks) (ticks) (ticks)\n"
264 "------- -------------- -------------- -------------- --------------");
266 /* Do tests where size is a variable */
267 for (i = 0; i < num_buf_sizes; i++) {
268 ALL_PERF_TESTS_FOR_SIZE((size_t)buf_sizes[i]);
271 #ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P
272 /* Do tests where size is a compile-time constant */
273 ALL_PERF_TESTS_FOR_SIZE(63U);
274 ALL_PERF_TESTS_FOR_SIZE(64U);
275 ALL_PERF_TESTS_FOR_SIZE(65U);
276 ALL_PERF_TESTS_FOR_SIZE(255U);
277 ALL_PERF_TESTS_FOR_SIZE(256U);
278 ALL_PERF_TESTS_FOR_SIZE(257U);
279 ALL_PERF_TESTS_FOR_SIZE(1023U);
280 ALL_PERF_TESTS_FOR_SIZE(1024U);
281 ALL_PERF_TESTS_FOR_SIZE(1025U);
282 ALL_PERF_TESTS_FOR_SIZE(1518U);
284 printf("\n======= ============== ============== ============== ==============\n\n");
291 /* Structure with base memcpy func pointer, and number of bytes it copies */
292 struct base_memcpy_func {
293 void (*func)(uint8_t *dst, const uint8_t *src);
297 /* To create base_memcpy_func structure entries */
298 #define BASE_FUNC(n) {rte_mov##n, n}
300 /* Max number of bytes that can be copies with a "base" memcpy functions */
301 #define MAX_BASE_FUNC_SIZE 256
304 * Test the "base" memcpy functions, that a copy fixed number of bytes.
309 const struct base_memcpy_func base_memcpy_funcs[6] = {
318 unsigned num_funcs = sizeof(base_memcpy_funcs) / sizeof(base_memcpy_funcs[0]);
319 uint8_t dst[MAX_BASE_FUNC_SIZE];
320 uint8_t src[MAX_BASE_FUNC_SIZE];
322 for (i = 0; i < num_funcs; i++) {
323 unsigned size = base_memcpy_funcs[i].size;
324 for (j = 0; j < size; j++) {
326 src[j] = (uint8_t) rte_rand();
328 base_memcpy_funcs[i].func(dst, src);
329 for (j = 0; j < size; j++)
330 if (dst[j] != src[j])
338 * Create two buffers, and initialise one with random values. These are copied
339 * to the second buffer and then compared to see if the copy was successful.
340 * The bytes outside the copied area are also checked to make sure they were not
344 test_single_memcpy(unsigned int off_src, unsigned int off_dst, size_t size)
347 uint8_t dest[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
348 uint8_t src[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
351 for (i = 0; i < SMALL_BUFFER_SIZE + ALIGNMENT_UNIT; i++) {
353 src[i] = (uint8_t) rte_rand();
357 rte_memcpy(dest + off_dst, src + off_src, size);
359 /* Check nothing before offset is affected */
360 for (i = 0; i < off_dst; i++) {
362 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
363 "[modified before start of dst].\n",
364 (unsigned)size, off_src, off_dst);
369 /* Check everything was copied */
370 for (i = 0; i < size; i++) {
371 if (dest[i + off_dst] != src[i + off_src]) {
372 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
373 "[didn't copy byte %u].\n",
374 (unsigned)size, off_src, off_dst, i);
379 /* Check nothing after copy was affected */
380 for (i = size; i < SMALL_BUFFER_SIZE; i++) {
381 if (dest[i + off_dst] != 0) {
382 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
383 "[copied too many].\n",
384 (unsigned)size, off_src, off_dst);
392 * Check functionality for various buffer sizes and data offsets/alignments.
397 unsigned int off_src, off_dst, i;
398 unsigned int num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
401 for (off_src = 0; off_src < ALIGNMENT_UNIT; off_src++) {
402 for (off_dst = 0; off_dst < ALIGNMENT_UNIT; off_dst++) {
403 for (i = 0; i < num_buf_sizes; i++) {
404 ret = test_single_memcpy(off_src, off_dst,
422 ret = base_func_test();