4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMCPY_X86_64_H_
35 #define _RTE_MEMCPY_X86_64_H_
40 * Functions for SSE/AVX/AVX2 implementation of memcpy().
46 #include <x86intrin.h>
53 * Copy bytes from one location to another. The locations must not overlap.
55 * @note This is implemented as a macro, so it's address should not be taken
56 * and care is needed as parameter expressions may be evaluated multiple times.
59 * Pointer to the destination of the data.
61 * Pointer to the source data.
63 * Number of bytes to copy.
65 * Pointer to the destination data.
68 rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
70 #ifdef RTE_MACHINE_CPUFLAG_AVX2
73 * AVX2 implementation below
77 * Copy 16 bytes from one location to another,
78 * locations should not overlap.
81 rte_mov16(uint8_t *dst, const uint8_t *src)
85 xmm0 = _mm_loadu_si128((const __m128i *)src);
86 _mm_storeu_si128((__m128i *)dst, xmm0);
90 * Copy 32 bytes from one location to another,
91 * locations should not overlap.
94 rte_mov32(uint8_t *dst, const uint8_t *src)
98 ymm0 = _mm256_loadu_si256((const __m256i *)src);
99 _mm256_storeu_si256((__m256i *)dst, ymm0);
103 * Copy 64 bytes from one location to another,
104 * locations should not overlap.
107 rte_mov64(uint8_t *dst, const uint8_t *src)
109 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
110 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
114 * Copy 128 bytes from one location to another,
115 * locations should not overlap.
118 rte_mov128(uint8_t *dst, const uint8_t *src)
120 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
121 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
122 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
123 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
127 * Copy 256 bytes from one location to another,
128 * locations should not overlap.
131 rte_mov256(uint8_t *dst, const uint8_t *src)
133 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
134 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
135 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
136 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
137 rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
138 rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
139 rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
140 rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
144 * Copy 64-byte blocks from one location to another,
145 * locations should not overlap.
148 rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
153 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
155 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
156 src = (const uint8_t *)src + 64;
157 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
158 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
159 dst = (uint8_t *)dst + 64;
164 * Copy 256-byte blocks from one location to another,
165 * locations should not overlap.
168 rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
170 __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
173 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
175 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
176 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
177 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
178 ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
179 ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
180 ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
181 ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
182 src = (const uint8_t *)src + 256;
183 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
184 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
185 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
186 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
187 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
188 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
189 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
190 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
191 dst = (uint8_t *)dst + 256;
196 rte_memcpy(void *dst, const void *src, size_t n)
198 uintptr_t dstu = (uintptr_t)dst;
199 uintptr_t srcu = (uintptr_t)src;
205 * Copy less than 16 bytes
209 *(uint8_t *)dstu = *(const uint8_t *)srcu;
210 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
211 dstu = (uintptr_t)((uint8_t *)dstu + 1);
214 *(uint16_t *)dstu = *(const uint16_t *)srcu;
215 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
216 dstu = (uintptr_t)((uint16_t *)dstu + 1);
219 *(uint32_t *)dstu = *(const uint32_t *)srcu;
220 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
221 dstu = (uintptr_t)((uint32_t *)dstu + 1);
224 *(uint64_t *)dstu = *(const uint64_t *)srcu;
230 * Fast way when copy size doesn't exceed 512 bytes
233 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
234 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
238 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
239 rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
245 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
246 src = (const uint8_t *)src + 256;
247 dst = (uint8_t *)dst + 256;
251 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
252 src = (const uint8_t *)src + 128;
253 dst = (uint8_t *)dst + 128;
257 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
258 src = (const uint8_t *)src + 64;
259 dst = (uint8_t *)dst + 64;
261 COPY_BLOCK_64_BACK31:
263 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
264 rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
268 rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
274 * Make store aligned when copy size exceeds 512 bytes
276 dstofss = 32 - ((uintptr_t)dst & 0x1F);
278 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
279 src = (const uint8_t *)src + dstofss;
280 dst = (uint8_t *)dst + dstofss;
283 * Copy 256-byte blocks.
284 * Use copy block function for better instruction order control,
285 * which is important when load is unaligned.
287 rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
291 src = (const uint8_t *)src + bits;
292 dst = (uint8_t *)dst + bits;
295 * Copy 64-byte blocks.
296 * Use copy block function for better instruction order control,
297 * which is important when load is unaligned.
300 rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
304 src = (const uint8_t *)src + bits;
305 dst = (uint8_t *)dst + bits;
311 goto COPY_BLOCK_64_BACK31;
314 #else /* RTE_MACHINE_CPUFLAG_AVX2 */
317 * SSE & AVX implementation below
321 * Copy 16 bytes from one location to another,
322 * locations should not overlap.
325 rte_mov16(uint8_t *dst, const uint8_t *src)
329 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
330 _mm_storeu_si128((__m128i *)dst, xmm0);
334 * Copy 32 bytes from one location to another,
335 * locations should not overlap.
338 rte_mov32(uint8_t *dst, const uint8_t *src)
340 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
341 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
345 * Copy 64 bytes from one location to another,
346 * locations should not overlap.
349 rte_mov64(uint8_t *dst, const uint8_t *src)
351 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
352 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
353 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
354 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
358 * Copy 128 bytes from one location to another,
359 * locations should not overlap.
362 rte_mov128(uint8_t *dst, const uint8_t *src)
364 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
365 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
366 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
367 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
368 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
369 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
370 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
371 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
375 * Copy 256 bytes from one location to another,
376 * locations should not overlap.
379 rte_mov256(uint8_t *dst, const uint8_t *src)
381 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
382 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
383 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
384 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
385 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
386 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
387 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
388 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
389 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
390 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
391 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
392 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
393 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
394 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
395 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
396 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
400 * Macro for copying unaligned block from one location to another with constant load offset,
401 * 47 bytes leftover maximum,
402 * locations should not overlap.
405 * - Load offset is <offset>, which must be immediate value within [1, 15]
406 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
407 * - <dst>, <src>, <len> must be variables
408 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
410 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
413 while (len >= 128 + 16 - offset) { \
414 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
416 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
417 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
418 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
419 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
420 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
421 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
422 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
423 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
424 src = (const uint8_t *)src + 128; \
425 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
426 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
427 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
428 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
429 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
430 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
431 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
432 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
433 dst = (uint8_t *)dst + 128; \
436 len = ((len - 16 + offset) & 127) + 16 - offset; \
438 src = (const uint8_t *)src + tmp; \
439 dst = (uint8_t *)dst + tmp; \
440 if (len >= 32 + 16 - offset) { \
441 while (len >= 32 + 16 - offset) { \
442 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
444 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
445 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
446 src = (const uint8_t *)src + 32; \
447 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
448 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
449 dst = (uint8_t *)dst + 32; \
452 len = ((len - 16 + offset) & 31) + 16 - offset; \
454 src = (const uint8_t *)src + tmp; \
455 dst = (uint8_t *)dst + tmp; \
460 * Macro for copying unaligned block from one location to another,
461 * 47 bytes leftover maximum,
462 * locations should not overlap.
463 * Use switch here because the aligning instruction requires immediate value for shift count.
466 * - Load offset is <offset>, which must be within [1, 15]
467 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
468 * - <dst>, <src>, <len> must be variables
469 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
471 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
474 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
475 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
476 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
477 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
478 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
479 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
480 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
481 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
482 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
483 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
484 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
485 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
486 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
487 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
488 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
494 rte_memcpy(void *dst, const void *src, size_t n)
496 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
497 uintptr_t dstu = (uintptr_t)dst;
498 uintptr_t srcu = (uintptr_t)src;
504 * Copy less than 16 bytes
508 *(uint8_t *)dstu = *(const uint8_t *)srcu;
509 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
510 dstu = (uintptr_t)((uint8_t *)dstu + 1);
513 *(uint16_t *)dstu = *(const uint16_t *)srcu;
514 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
515 dstu = (uintptr_t)((uint16_t *)dstu + 1);
518 *(uint32_t *)dstu = *(const uint32_t *)srcu;
519 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
520 dstu = (uintptr_t)((uint32_t *)dstu + 1);
523 *(uint64_t *)dstu = *(const uint64_t *)srcu;
529 * Fast way when copy size doesn't exceed 512 bytes
532 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
533 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
537 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
538 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
542 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
543 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
544 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
548 goto COPY_BLOCK_128_BACK15;
553 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
554 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
555 src = (const uint8_t *)src + 256;
556 dst = (uint8_t *)dst + 256;
558 COPY_BLOCK_255_BACK15:
561 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
562 src = (const uint8_t *)src + 128;
563 dst = (uint8_t *)dst + 128;
565 COPY_BLOCK_128_BACK15:
568 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
569 src = (const uint8_t *)src + 64;
570 dst = (uint8_t *)dst + 64;
572 COPY_BLOCK_64_BACK15:
575 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
576 src = (const uint8_t *)src + 32;
577 dst = (uint8_t *)dst + 32;
580 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
581 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
585 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
591 * Make store aligned when copy size exceeds 512 bytes,
592 * and make sure the first 15 bytes are copied, because
593 * unaligned copy functions require up to 15 bytes
596 dstofss = 16 - ((uintptr_t)dst & 0x0F) + 16;
598 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
599 src = (const uint8_t *)src + dstofss;
600 dst = (uint8_t *)dst + dstofss;
601 srcofs = ((uintptr_t)src & 0x0F);
608 * Copy 256-byte blocks
610 for (; n >= 256; n -= 256) {
611 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
612 dst = (uint8_t *)dst + 256;
613 src = (const uint8_t *)src + 256;
619 goto COPY_BLOCK_255_BACK15;
623 * For copy with unaligned load
625 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
630 goto COPY_BLOCK_64_BACK15;
633 #endif /* RTE_MACHINE_CPUFLAG_AVX2 */
639 #endif /* _RTE_MEMCPY_X86_64_H_ */