4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_MEMCPY_H_
36 #define _RTE_MEMCPY_H_
41 * Functions for SSE implementation of memcpy().
52 * Copy 16 bytes from one location to another using optimised SSE
53 * instructions. The locations should not overlap.
56 * Pointer to the destination of the data.
58 * Pointer to the source data.
61 rte_mov16(uint8_t *dst, const uint8_t *src)
63 asm volatile ("movdqu (%[src]), %%xmm0\n\t"
64 "movdqu %%xmm0, (%[dst])\n\t"
72 * Copy 32 bytes from one location to another using optimised SSE
73 * instructions. The locations should not overlap.
76 * Pointer to the destination of the data.
78 * Pointer to the source data.
81 rte_mov32(uint8_t *dst, const uint8_t *src)
83 asm volatile ("movdqu (%[src]), %%xmm0\n\t"
84 "movdqu 16(%[src]), %%xmm1\n\t"
85 "movdqu %%xmm0, (%[dst])\n\t"
86 "movdqu %%xmm1, 16(%[dst])"
90 : "xmm0", "xmm1", "memory");
94 * Copy 48 bytes from one location to another using optimised SSE
95 * instructions. The locations should not overlap.
98 * Pointer to the destination of the data.
100 * Pointer to the source data.
103 rte_mov48(uint8_t *dst, const uint8_t *src)
105 asm volatile ("movdqu (%[src]), %%xmm0\n\t"
106 "movdqu 16(%[src]), %%xmm1\n\t"
107 "movdqu 32(%[src]), %%xmm2\n\t"
108 "movdqu %%xmm0, (%[dst])\n\t"
109 "movdqu %%xmm1, 16(%[dst])\n\t"
110 "movdqu %%xmm2, 32(%[dst])"
114 : "xmm0", "xmm1", "memory");
118 * Copy 64 bytes from one location to another using optimised SSE
119 * instructions. The locations should not overlap.
122 * Pointer to the destination of the data.
124 * Pointer to the source data.
127 rte_mov64(uint8_t *dst, const uint8_t *src)
129 asm volatile ("movdqu (%[src]), %%xmm0\n\t"
130 "movdqu 16(%[src]), %%xmm1\n\t"
131 "movdqu 32(%[src]), %%xmm2\n\t"
132 "movdqu 48(%[src]), %%xmm3\n\t"
133 "movdqu %%xmm0, (%[dst])\n\t"
134 "movdqu %%xmm1, 16(%[dst])\n\t"
135 "movdqu %%xmm2, 32(%[dst])\n\t"
136 "movdqu %%xmm3, 48(%[dst])"
140 : "xmm0", "xmm1", "xmm2", "xmm3","memory");
144 * Copy 128 bytes from one location to another using optimised SSE
145 * instructions. The locations should not overlap.
148 * Pointer to the destination of the data.
150 * Pointer to the source data.
153 rte_mov128(uint8_t *dst, const uint8_t *src)
155 asm volatile ("movdqu (%[src]), %%xmm0\n\t"
156 "movdqu 16(%[src]), %%xmm1\n\t"
157 "movdqu 32(%[src]), %%xmm2\n\t"
158 "movdqu 48(%[src]), %%xmm3\n\t"
159 "movdqu 64(%[src]), %%xmm4\n\t"
160 "movdqu 80(%[src]), %%xmm5\n\t"
161 "movdqu 96(%[src]), %%xmm6\n\t"
162 "movdqu 112(%[src]), %%xmm7\n\t"
163 "movdqu %%xmm0, (%[dst])\n\t"
164 "movdqu %%xmm1, 16(%[dst])\n\t"
165 "movdqu %%xmm2, 32(%[dst])\n\t"
166 "movdqu %%xmm3, 48(%[dst])\n\t"
167 "movdqu %%xmm4, 64(%[dst])\n\t"
168 "movdqu %%xmm5, 80(%[dst])\n\t"
169 "movdqu %%xmm6, 96(%[dst])\n\t"
170 "movdqu %%xmm7, 112(%[dst])"
174 : "xmm0", "xmm1", "xmm2", "xmm3",
175 "xmm4", "xmm5", "xmm6", "xmm7", "memory");
179 * Copy 256 bytes from one location to another using optimised SSE
180 * instructions. The locations should not overlap.
183 * Pointer to the destination of the data.
185 * Pointer to the source data.
188 rte_mov256(uint8_t *dst, const uint8_t *src)
191 * There are 16XMM registers, but this function does not use
192 * them all so that it can still be compiled as 32bit
193 * code. The performance increase was neglible if all 16
194 * registers were used.
196 rte_mov128(dst, src);
197 rte_mov128(dst + 128, src + 128);
200 #ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P
202 * Choose between compiler built-in implementation of memcpy or DPDK
203 * implementation depending if size is a compile-time constant
205 #define rte_memcpy(dst, src, n) \
206 (__builtin_constant_p (n) ? \
207 memcpy(dst, src, n) : rte_memcpy_func(dst, src, n))
210 * Always use DPDK implementation.
212 #define rte_memcpy rte_memcpy_func
216 * Copy bytes from one location to another. The locations must not overlap.
219 * Pointer to the destination of the data.
221 * Pointer to the source data.
223 * Number of bytes to copy.
225 * Pointer to the destination data.
228 rte_memcpy_func(void *dst, const void *src, size_t n)
232 /* We can't copy < 16 bytes using XMM registers so do it manually. */
235 *(uint8_t *)dst = *(const uint8_t *)src;
236 dst = (uint8_t *)dst + 1;
237 src = (const uint8_t *)src + 1;
240 *(uint16_t *)dst = *(const uint16_t *)src;
241 dst = (uint16_t *)dst + 1;
242 src = (const uint16_t *)src + 1;
246 * NOTE: doing this as a 32bit copy causes "strict
247 * aliasing" compile errors, but worked fine for 64bit
248 * copy below, for unknown reasons.
250 *(uint16_t *)dst = *(const uint16_t *)src;
251 *((uint16_t *)dst + 1) = *((const uint16_t *)src + 1);
252 dst = (uint32_t *)dst + 1;
253 src = (const uint32_t *)src + 1;
256 *(uint64_t *)dst = *(const uint64_t *)src;
261 /* Special fast cases for <= 128 bytes */
263 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
264 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
269 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
270 rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
275 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
276 rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
281 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
282 * copies was found to be faster than doing 128 and 32 byte copies as
285 for ( ; n >= 256; n -= 256) {
286 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
287 dst = (uint8_t *)dst + 256;
288 src = (const uint8_t *)src + 256;
292 * We split the remaining bytes (which will be less than 256) into
293 * 64byte (2^6) chunks.
294 * Using incrementing integers in the case labels of a switch statement
295 * enourages the compiler to use a jump table. To get incrementing
296 * integers, we shift the 2 relevant bits to the LSB position to first
297 * get decrementing integers, and then subtract.
299 switch (3 - (n >> 6)) {
301 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
303 dst = (uint8_t *)dst + 64;
304 src = (const uint8_t *)src + 64; /* fallthrough */
306 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
308 dst = (uint8_t *)dst + 64;
309 src = (const uint8_t *)src + 64; /* fallthrough */
311 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
313 dst = (uint8_t *)dst + 64;
314 src = (const uint8_t *)src + 64; /* fallthrough */
320 * We split the remaining bytes (which will be less than 64) into
321 * 16byte (2^4) chunks, using the same switch structure as above.
323 switch (3 - (n >> 4)) {
325 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
327 dst = (uint8_t *)dst + 16;
328 src = (const uint8_t *)src + 16; /* fallthrough */
330 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
332 dst = (uint8_t *)dst + 16;
333 src = (const uint8_t *)src + 16; /* fallthrough */
335 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
337 dst = (uint8_t *)dst + 16;
338 src = (const uint8_t *)src + 16; /* fallthrough */
343 /* Copy any remaining bytes, without going beyond end of buffers */
345 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
354 #endif /* _RTE_MEMCPY_H_ */