4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMCPY_H_
35 #define _RTE_MEMCPY_H_
40 * Functions for SSE implementation of memcpy().
45 #include <emmintrin.h>
51 #ifdef __INTEL_COMPILER
52 #pragma warning(disable:593) /* Stop unused variable warning (reg_a etc). */
56 * Copy 16 bytes from one location to another using optimised SSE
57 * instructions. The locations should not overlap.
60 * Pointer to the destination of the data.
62 * Pointer to the source data.
65 rte_mov16(uint8_t *dst, const uint8_t *src)
69 "movdqu (%[src]), %[reg_a]\n\t"
70 "movdqu %[reg_a], (%[dst])\n\t"
71 : [reg_a] "=x" (reg_a)
79 * Copy 32 bytes from one location to another using optimised SSE
80 * instructions. The locations should not overlap.
83 * Pointer to the destination of the data.
85 * Pointer to the source data.
88 rte_mov32(uint8_t *dst, const uint8_t *src)
92 "movdqu (%[src]), %[reg_a]\n\t"
93 "movdqu 16(%[src]), %[reg_b]\n\t"
94 "movdqu %[reg_a], (%[dst])\n\t"
95 "movdqu %[reg_b], 16(%[dst])\n\t"
96 : [reg_a] "=x" (reg_a),
105 * Copy 48 bytes from one location to another using optimised SSE
106 * instructions. The locations should not overlap.
109 * Pointer to the destination of the data.
111 * Pointer to the source data.
114 rte_mov48(uint8_t *dst, const uint8_t *src)
116 __m128i reg_a, reg_b, reg_c;
118 "movdqu (%[src]), %[reg_a]\n\t"
119 "movdqu 16(%[src]), %[reg_b]\n\t"
120 "movdqu 32(%[src]), %[reg_c]\n\t"
121 "movdqu %[reg_a], (%[dst])\n\t"
122 "movdqu %[reg_b], 16(%[dst])\n\t"
123 "movdqu %[reg_c], 32(%[dst])\n\t"
124 : [reg_a] "=x" (reg_a),
125 [reg_b] "=x" (reg_b),
134 * Copy 64 bytes from one location to another using optimised SSE
135 * instructions. The locations should not overlap.
138 * Pointer to the destination of the data.
140 * Pointer to the source data.
143 rte_mov64(uint8_t *dst, const uint8_t *src)
145 __m128i reg_a, reg_b, reg_c, reg_d;
147 "movdqu (%[src]), %[reg_a]\n\t"
148 "movdqu 16(%[src]), %[reg_b]\n\t"
149 "movdqu 32(%[src]), %[reg_c]\n\t"
150 "movdqu 48(%[src]), %[reg_d]\n\t"
151 "movdqu %[reg_a], (%[dst])\n\t"
152 "movdqu %[reg_b], 16(%[dst])\n\t"
153 "movdqu %[reg_c], 32(%[dst])\n\t"
154 "movdqu %[reg_d], 48(%[dst])\n\t"
155 : [reg_a] "=x" (reg_a),
156 [reg_b] "=x" (reg_b),
157 [reg_c] "=x" (reg_c),
166 * Copy 128 bytes from one location to another using optimised SSE
167 * instructions. The locations should not overlap.
170 * Pointer to the destination of the data.
172 * Pointer to the source data.
175 rte_mov128(uint8_t *dst, const uint8_t *src)
177 __m128i reg_a, reg_b, reg_c, reg_d, reg_e, reg_f, reg_g, reg_h;
179 "movdqu (%[src]), %[reg_a]\n\t"
180 "movdqu 16(%[src]), %[reg_b]\n\t"
181 "movdqu 32(%[src]), %[reg_c]\n\t"
182 "movdqu 48(%[src]), %[reg_d]\n\t"
183 "movdqu 64(%[src]), %[reg_e]\n\t"
184 "movdqu 80(%[src]), %[reg_f]\n\t"
185 "movdqu 96(%[src]), %[reg_g]\n\t"
186 "movdqu 112(%[src]), %[reg_h]\n\t"
187 "movdqu %[reg_a], (%[dst])\n\t"
188 "movdqu %[reg_b], 16(%[dst])\n\t"
189 "movdqu %[reg_c], 32(%[dst])\n\t"
190 "movdqu %[reg_d], 48(%[dst])\n\t"
191 "movdqu %[reg_e], 64(%[dst])\n\t"
192 "movdqu %[reg_f], 80(%[dst])\n\t"
193 "movdqu %[reg_g], 96(%[dst])\n\t"
194 "movdqu %[reg_h], 112(%[dst])\n\t"
195 : [reg_a] "=x" (reg_a),
196 [reg_b] "=x" (reg_b),
197 [reg_c] "=x" (reg_c),
198 [reg_d] "=x" (reg_d),
199 [reg_e] "=x" (reg_e),
200 [reg_f] "=x" (reg_f),
201 [reg_g] "=x" (reg_g),
209 #ifdef __INTEL_COMPILER
210 #pragma warning(enable:593)
214 * Copy 256 bytes from one location to another using optimised SSE
215 * instructions. The locations should not overlap.
218 * Pointer to the destination of the data.
220 * Pointer to the source data.
223 rte_mov256(uint8_t *dst, const uint8_t *src)
225 rte_mov128(dst, src);
226 rte_mov128(dst + 128, src + 128);
230 * Copy bytes from one location to another. The locations must not overlap.
232 * @note This is implemented as a macro, so it's address should not be taken
233 * and care is needed as parameter expressions may be evaluated multiple times.
236 * Pointer to the destination of the data.
238 * Pointer to the source data.
240 * Number of bytes to copy.
242 * Pointer to the destination data.
244 #define rte_memcpy(dst, src, n) \
245 ((__builtin_constant_p(n)) ? \
246 memcpy((dst), (src), (n)) : \
247 rte_memcpy_func((dst), (src), (n)))
250 * memcpy() function used by rte_memcpy macro
253 rte_memcpy_func(void *dst, const void *src, size_t n) __attribute__((always_inline));
256 rte_memcpy_func(void *dst, const void *src, size_t n)
260 /* We can't copy < 16 bytes using XMM registers so do it manually. */
263 *(uint8_t *)dst = *(const uint8_t *)src;
264 dst = (uint8_t *)dst + 1;
265 src = (const uint8_t *)src + 1;
268 *(uint16_t *)dst = *(const uint16_t *)src;
269 dst = (uint16_t *)dst + 1;
270 src = (const uint16_t *)src + 1;
273 *(uint32_t *)dst = *(const uint32_t *)src;
274 dst = (uint32_t *)dst + 1;
275 src = (const uint32_t *)src + 1;
278 *(uint64_t *)dst = *(const uint64_t *)src;
283 /* Special fast cases for <= 128 bytes */
285 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
286 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
291 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
292 rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
297 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
298 rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
303 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
304 * copies was found to be faster than doing 128 and 32 byte copies as
307 for ( ; n >= 256; n -= 256) {
308 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
309 dst = (uint8_t *)dst + 256;
310 src = (const uint8_t *)src + 256;
314 * We split the remaining bytes (which will be less than 256) into
315 * 64byte (2^6) chunks.
316 * Using incrementing integers in the case labels of a switch statement
317 * enourages the compiler to use a jump table. To get incrementing
318 * integers, we shift the 2 relevant bits to the LSB position to first
319 * get decrementing integers, and then subtract.
321 switch (3 - (n >> 6)) {
323 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
325 dst = (uint8_t *)dst + 64;
326 src = (const uint8_t *)src + 64; /* fallthrough */
328 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
330 dst = (uint8_t *)dst + 64;
331 src = (const uint8_t *)src + 64; /* fallthrough */
333 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
335 dst = (uint8_t *)dst + 64;
336 src = (const uint8_t *)src + 64; /* fallthrough */
342 * We split the remaining bytes (which will be less than 64) into
343 * 16byte (2^4) chunks, using the same switch structure as above.
345 switch (3 - (n >> 4)) {
347 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
349 dst = (uint8_t *)dst + 16;
350 src = (const uint8_t *)src + 16; /* fallthrough */
352 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
354 dst = (uint8_t *)dst + 16;
355 src = (const uint8_t *)src + 16; /* fallthrough */
357 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
359 dst = (uint8_t *)dst + 16;
360 src = (const uint8_t *)src + 16; /* fallthrough */
365 /* Copy any remaining bytes, without going beyond end of buffers */
367 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
376 #endif /* _RTE_MEMCPY_H_ */