4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_common.h>
41 #include <cmdline_parse.h>
42 #include <rte_cycles.h>
43 #include <rte_random.h>
44 #include <rte_malloc.h>
46 #include <rte_memcpy.h>
51 * Set this to the maximum buffer size you want to test. If it is 0, then the
52 * values in the buf_sizes[] array below will be used.
54 #define TEST_VALUE_RANGE 0
56 /* List of buffer sizes to test */
57 #if TEST_VALUE_RANGE == 0
58 static size_t buf_sizes[] = {
59 0, 1, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255,
60 256, 257, 320, 384, 511, 512, 513, 1023, 1024, 1025, 1518, 1522, 1600,
61 2048, 3072, 4096, 5120, 6144, 7168, 8192
63 /* MUST be as large as largest packet size above */
64 #define SMALL_BUFFER_SIZE 8192
65 #else /* TEST_VALUE_RANGE != 0 */
66 static size_t buf_sizes[TEST_VALUE_RANGE];
67 #define SMALL_BUFFER_SIZE TEST_VALUE_RANGE
68 #endif /* TEST_VALUE_RANGE == 0 */
72 * Arrays of this size are used for measuring uncached memory accesses by
73 * picking a random location within the buffer. Make this smaller if there are
74 * memory allocation errors.
76 #define LARGE_BUFFER_SIZE (100 * 1024 * 1024)
78 /* How many times to run timing loop for performance tests */
79 #define TEST_ITERATIONS 1000000
80 #define TEST_BATCH_SIZE 100
82 /* Data is aligned on this many bytes (power of 2) */
83 #define ALIGNMENT_UNIT 16
86 * Pointers used in performance tests. The two large buffers are for uncached
87 * access where random addresses within the buffer are used for each
88 * memcpy. The two small buffers are for cached access.
90 static uint8_t *large_buf_read, *large_buf_write,
91 *small_buf_read, *small_buf_write;
93 /* Initialise data buffers. */
99 large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
100 if (large_buf_read == NULL)
101 goto error_large_buf_read;
103 large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
104 if (large_buf_write == NULL)
105 goto error_large_buf_write;
107 small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
108 if (small_buf_read == NULL)
109 goto error_small_buf_read;
111 small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
112 if (small_buf_write == NULL)
113 goto error_small_buf_write;
115 for (i = 0; i < LARGE_BUFFER_SIZE; i++)
116 large_buf_read[i] = rte_rand();
117 for (i = 0; i < SMALL_BUFFER_SIZE; i++)
118 small_buf_read[i] = rte_rand();
122 error_small_buf_write:
123 rte_free(small_buf_read);
124 error_small_buf_read:
125 rte_free(large_buf_write);
126 error_large_buf_write:
127 rte_free(large_buf_read);
128 error_large_buf_read:
129 printf("ERROR: not enough memory");
133 /* Cleanup data buffers */
137 rte_free(large_buf_read);
138 rte_free(large_buf_write);
139 rte_free(small_buf_read);
140 rte_free(small_buf_write);
144 * Get a random offset into large array, with enough space needed to perform
145 * max copy size. Offset is aligned.
148 get_rand_offset(void)
150 return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
151 ~(ALIGNMENT_UNIT - 1));
154 /* Fill in source and destination addresses. */
156 fill_addr_arrays(size_t *dst_addr, int is_dst_cached,
157 size_t *src_addr, int is_src_cached)
161 for (i = 0; i < TEST_BATCH_SIZE; i++) {
162 dst_addr[i] = (is_dst_cached) ? 0 : get_rand_offset();
163 src_addr[i] = (is_src_cached) ? 0 : get_rand_offset();
167 /* Integer division with round to nearest */
168 static inline uint64_t
169 div_round(uint64_t dividend, uint64_t divisor)
171 return ((2 * dividend) + divisor) / (2 * divisor);
175 * WORKAROUND: For some reason the first test doing an uncached write
176 * takes a very long time (~25 times longer than is expected). So we do
177 * it once without timing.
180 do_uncached_write(uint8_t *dst, int is_dst_cached,
181 const uint8_t *src, int is_src_cached, size_t size)
184 size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE];
186 for (i = 0; i < (TEST_ITERATIONS / TEST_BATCH_SIZE); i++) {
187 fill_addr_arrays(dst_addrs, is_dst_cached,
188 src_addrs, is_src_cached);
189 for (j = 0; j < TEST_BATCH_SIZE; j++)
190 rte_memcpy(dst+dst_addrs[j], src+src_addrs[j], size);
195 * Run a single memcpy performance test. This is a macro to ensure that if
196 * the "size" parameter is a constant it won't be converted to a variable.
198 #define SINGLE_PERF_TEST(dst, is_dst_cached, src, is_src_cached, size) do { \
199 unsigned int iter, t; \
200 size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE]; \
201 uint64_t start_time, total_time = 0; \
202 uint64_t total_time2 = 0; \
203 for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \
204 fill_addr_arrays(dst_addrs, is_dst_cached, \
205 src_addrs, is_src_cached); \
206 start_time = rte_rdtsc(); \
207 for (t = 0; t < TEST_BATCH_SIZE; t++) \
208 rte_memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
209 total_time += rte_rdtsc() - start_time; \
211 for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \
212 fill_addr_arrays(dst_addrs, is_dst_cached, \
213 src_addrs, is_src_cached); \
214 start_time = rte_rdtsc(); \
215 for (t = 0; t < TEST_BATCH_SIZE; t++) \
216 memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
217 total_time2 += rte_rdtsc() - start_time; \
219 printf("%9u/", (unsigned)div_round(total_time, TEST_ITERATIONS)); \
220 printf("%4u", (unsigned)div_round(total_time2, TEST_ITERATIONS)); \
223 /* Run memcpy() tests for each cached/uncached permutation. */
224 #define ALL_PERF_TESTS_FOR_SIZE(n) do { \
225 if (__builtin_constant_p(n)) \
226 printf("\nC%6u ", (unsigned)n); \
228 printf("\n%7u ", (unsigned)n); \
229 SINGLE_PERF_TEST(small_buf_write, 1, small_buf_read, 1, n); \
230 SINGLE_PERF_TEST(large_buf_write, 0, small_buf_read, 1, n); \
231 SINGLE_PERF_TEST(small_buf_write, 1, large_buf_read, 0, n); \
232 SINGLE_PERF_TEST(large_buf_write, 0, large_buf_read, 0, n); \
236 * Run performance tests for a number of different sizes and cached/uncached
242 const unsigned num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
246 ret = init_buffers();
250 #if TEST_VALUE_RANGE != 0
251 /* Setup buf_sizes array, if required */
252 for (i = 0; i < TEST_VALUE_RANGE; i++)
256 /* See function comment */
257 do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE);
259 printf("\n** rte_memcpy()/memcpy performance tests **\n"
260 "======= ============== ============== ============== ==============\n"
261 " Size Cache to cache Cache to mem Mem to cache Mem to mem\n"
262 "(bytes) (ticks) (ticks) (ticks) (ticks)\n"
263 "------- -------------- -------------- -------------- --------------");
265 /* Do tests where size is a variable */
266 for (i = 0; i < num_buf_sizes; i++) {
267 ALL_PERF_TESTS_FOR_SIZE((size_t)buf_sizes[i]);
270 #ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P
271 /* Do tests where size is a compile-time constant */
272 ALL_PERF_TESTS_FOR_SIZE(63U);
273 ALL_PERF_TESTS_FOR_SIZE(64U);
274 ALL_PERF_TESTS_FOR_SIZE(65U);
275 ALL_PERF_TESTS_FOR_SIZE(255U);
276 ALL_PERF_TESTS_FOR_SIZE(256U);
277 ALL_PERF_TESTS_FOR_SIZE(257U);
278 ALL_PERF_TESTS_FOR_SIZE(1023U);
279 ALL_PERF_TESTS_FOR_SIZE(1024U);
280 ALL_PERF_TESTS_FOR_SIZE(1025U);
281 ALL_PERF_TESTS_FOR_SIZE(1518U);
283 printf("\n======= ============== ============== ============== ==============\n\n");
290 /* Structure with base memcpy func pointer, and number of bytes it copies */
291 struct base_memcpy_func {
292 void (*func)(uint8_t *dst, const uint8_t *src);
296 /* To create base_memcpy_func structure entries */
297 #define BASE_FUNC(n) {rte_mov##n, n}
299 /* Max number of bytes that can be copies with a "base" memcpy functions */
300 #define MAX_BASE_FUNC_SIZE 256
303 * Test the "base" memcpy functions, that a copy fixed number of bytes.
308 const struct base_memcpy_func base_memcpy_funcs[6] = {
317 unsigned num_funcs = sizeof(base_memcpy_funcs) / sizeof(base_memcpy_funcs[0]);
318 uint8_t dst[MAX_BASE_FUNC_SIZE];
319 uint8_t src[MAX_BASE_FUNC_SIZE];
321 for (i = 0; i < num_funcs; i++) {
322 unsigned size = base_memcpy_funcs[i].size;
323 for (j = 0; j < size; j++) {
325 src[j] = (uint8_t) rte_rand();
327 base_memcpy_funcs[i].func(dst, src);
328 for (j = 0; j < size; j++)
329 if (dst[j] != src[j])
337 * Create two buffers, and initialise one with random values. These are copied
338 * to the second buffer and then compared to see if the copy was successful.
339 * The bytes outside the copied area are also checked to make sure they were not
343 test_single_memcpy(unsigned int off_src, unsigned int off_dst, size_t size)
346 uint8_t dest[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
347 uint8_t src[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
350 for (i = 0; i < SMALL_BUFFER_SIZE + ALIGNMENT_UNIT; i++) {
352 src[i] = (uint8_t) rte_rand();
356 rte_memcpy(dest + off_dst, src + off_src, size);
358 /* Check nothing before offset is affected */
359 for (i = 0; i < off_dst; i++) {
361 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
362 "[modified before start of dst].\n",
363 (unsigned)size, off_src, off_dst);
368 /* Check everything was copied */
369 for (i = 0; i < size; i++) {
370 if (dest[i + off_dst] != src[i + off_src]) {
371 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
372 "[didn't copy byte %u].\n",
373 (unsigned)size, off_src, off_dst, i);
378 /* Check nothing after copy was affected */
379 for (i = size; i < SMALL_BUFFER_SIZE; i++) {
380 if (dest[i + off_dst] != 0) {
381 printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
382 "[copied too many].\n",
383 (unsigned)size, off_src, off_dst);
391 * Check functionality for various buffer sizes and data offsets/alignments.
396 unsigned int off_src, off_dst, i;
397 unsigned int num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
400 for (off_src = 0; off_src < ALIGNMENT_UNIT; off_src++) {
401 for (off_dst = 0; off_dst < ALIGNMENT_UNIT; off_dst++) {
402 for (i = 0; i < num_buf_sizes; i++) {
403 ret = test_single_memcpy(off_src, off_dst,
421 ret = base_func_test();