4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMCPY_X86_64_H_
35 #define _RTE_MEMCPY_X86_64_H_
40 * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
53 * Copy bytes from one location to another. The locations must not overlap.
55 * @note This is implemented as a macro, so it's address should not be taken
56 * and care is needed as parameter expressions may be evaluated multiple times.
59 * Pointer to the destination of the data.
61 * Pointer to the source data.
63 * Number of bytes to copy.
65 * Pointer to the destination data.
68 rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
70 #ifdef RTE_MACHINE_CPUFLAG_AVX512F
72 #define ALIGNMENT_MASK 0x3F
75 * AVX512 implementation below
79 * Copy 16 bytes from one location to another,
80 * locations should not overlap.
83 rte_mov16(uint8_t *dst, const uint8_t *src)
87 xmm0 = _mm_loadu_si128((const __m128i *)src);
88 _mm_storeu_si128((__m128i *)dst, xmm0);
92 * Copy 32 bytes from one location to another,
93 * locations should not overlap.
96 rte_mov32(uint8_t *dst, const uint8_t *src)
100 ymm0 = _mm256_loadu_si256((const __m256i *)src);
101 _mm256_storeu_si256((__m256i *)dst, ymm0);
105 * Copy 64 bytes from one location to another,
106 * locations should not overlap.
109 rte_mov64(uint8_t *dst, const uint8_t *src)
113 zmm0 = _mm512_loadu_si512((const void *)src);
114 _mm512_storeu_si512((void *)dst, zmm0);
118 * Copy 128 bytes from one location to another,
119 * locations should not overlap.
122 rte_mov128(uint8_t *dst, const uint8_t *src)
124 rte_mov64(dst + 0 * 64, src + 0 * 64);
125 rte_mov64(dst + 1 * 64, src + 1 * 64);
129 * Copy 256 bytes from one location to another,
130 * locations should not overlap.
133 rte_mov256(uint8_t *dst, const uint8_t *src)
135 rte_mov64(dst + 0 * 64, src + 0 * 64);
136 rte_mov64(dst + 1 * 64, src + 1 * 64);
137 rte_mov64(dst + 2 * 64, src + 2 * 64);
138 rte_mov64(dst + 3 * 64, src + 3 * 64);
142 * Copy 128-byte blocks from one location to another,
143 * locations should not overlap.
146 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
151 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
153 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
155 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
156 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
162 * Copy 512-byte blocks from one location to another,
163 * locations should not overlap.
166 rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
168 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
171 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
173 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
174 zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
175 zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
176 zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
177 zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
178 zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
179 zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
181 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
182 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
183 _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
184 _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
185 _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
186 _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
187 _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
188 _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
194 rte_memcpy_generic(void *dst, const void *src, size_t n)
196 uintptr_t dstu = (uintptr_t)dst;
197 uintptr_t srcu = (uintptr_t)src;
203 * Copy less than 16 bytes
207 *(uint8_t *)dstu = *(const uint8_t *)srcu;
208 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
209 dstu = (uintptr_t)((uint8_t *)dstu + 1);
212 *(uint16_t *)dstu = *(const uint16_t *)srcu;
213 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
214 dstu = (uintptr_t)((uint16_t *)dstu + 1);
217 *(uint32_t *)dstu = *(const uint32_t *)srcu;
218 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
219 dstu = (uintptr_t)((uint32_t *)dstu + 1);
222 *(uint64_t *)dstu = *(const uint64_t *)srcu;
227 * Fast way when copy size doesn't exceed 512 bytes
230 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
231 rte_mov16((uint8_t *)dst - 16 + n,
232 (const uint8_t *)src - 16 + n);
236 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
237 rte_mov32((uint8_t *)dst - 32 + n,
238 (const uint8_t *)src - 32 + n);
244 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
245 src = (const uint8_t *)src + 256;
246 dst = (uint8_t *)dst + 256;
250 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
251 src = (const uint8_t *)src + 128;
252 dst = (uint8_t *)dst + 128;
254 COPY_BLOCK_128_BACK63:
256 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
257 rte_mov64((uint8_t *)dst - 64 + n,
258 (const uint8_t *)src - 64 + n);
262 rte_mov64((uint8_t *)dst - 64 + n,
263 (const uint8_t *)src - 64 + n);
268 * Make store aligned when copy size exceeds 512 bytes
270 dstofss = ((uintptr_t)dst & 0x3F);
272 dstofss = 64 - dstofss;
274 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
275 src = (const uint8_t *)src + dstofss;
276 dst = (uint8_t *)dst + dstofss;
280 * Copy 512-byte blocks.
281 * Use copy block function for better instruction order control,
282 * which is important when load is unaligned.
284 rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
288 src = (const uint8_t *)src + bits;
289 dst = (uint8_t *)dst + bits;
292 * Copy 128-byte blocks.
293 * Use copy block function for better instruction order control,
294 * which is important when load is unaligned.
297 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
301 src = (const uint8_t *)src + bits;
302 dst = (uint8_t *)dst + bits;
308 goto COPY_BLOCK_128_BACK63;
311 #elif defined RTE_MACHINE_CPUFLAG_AVX2
313 #define ALIGNMENT_MASK 0x1F
316 * AVX2 implementation below
320 * Copy 16 bytes from one location to another,
321 * locations should not overlap.
324 rte_mov16(uint8_t *dst, const uint8_t *src)
328 xmm0 = _mm_loadu_si128((const __m128i *)src);
329 _mm_storeu_si128((__m128i *)dst, xmm0);
333 * Copy 32 bytes from one location to another,
334 * locations should not overlap.
337 rte_mov32(uint8_t *dst, const uint8_t *src)
341 ymm0 = _mm256_loadu_si256((const __m256i *)src);
342 _mm256_storeu_si256((__m256i *)dst, ymm0);
346 * Copy 64 bytes from one location to another,
347 * locations should not overlap.
350 rte_mov64(uint8_t *dst, const uint8_t *src)
352 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
353 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
357 * Copy 128 bytes from one location to another,
358 * locations should not overlap.
361 rte_mov128(uint8_t *dst, const uint8_t *src)
363 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
364 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
365 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
366 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
370 * Copy 128-byte blocks from one location to another,
371 * locations should not overlap.
374 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
376 __m256i ymm0, ymm1, ymm2, ymm3;
379 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
381 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
382 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
383 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
384 src = (const uint8_t *)src + 128;
385 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
386 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
387 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
388 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
389 dst = (uint8_t *)dst + 128;
394 rte_memcpy_generic(void *dst, const void *src, size_t n)
396 uintptr_t dstu = (uintptr_t)dst;
397 uintptr_t srcu = (uintptr_t)src;
403 * Copy less than 16 bytes
407 *(uint8_t *)dstu = *(const uint8_t *)srcu;
408 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
409 dstu = (uintptr_t)((uint8_t *)dstu + 1);
412 *(uint16_t *)dstu = *(const uint16_t *)srcu;
413 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
414 dstu = (uintptr_t)((uint16_t *)dstu + 1);
417 *(uint32_t *)dstu = *(const uint32_t *)srcu;
418 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
419 dstu = (uintptr_t)((uint32_t *)dstu + 1);
422 *(uint64_t *)dstu = *(const uint64_t *)srcu;
428 * Fast way when copy size doesn't exceed 256 bytes
431 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
432 rte_mov16((uint8_t *)dst - 16 + n,
433 (const uint8_t *)src - 16 + n);
437 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
438 rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
439 rte_mov16((uint8_t *)dst - 16 + n,
440 (const uint8_t *)src - 16 + n);
444 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
445 rte_mov32((uint8_t *)dst - 32 + n,
446 (const uint8_t *)src - 32 + n);
452 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
453 src = (const uint8_t *)src + 128;
454 dst = (uint8_t *)dst + 128;
456 COPY_BLOCK_128_BACK31:
459 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
460 src = (const uint8_t *)src + 64;
461 dst = (uint8_t *)dst + 64;
464 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
465 rte_mov32((uint8_t *)dst - 32 + n,
466 (const uint8_t *)src - 32 + n);
470 rte_mov32((uint8_t *)dst - 32 + n,
471 (const uint8_t *)src - 32 + n);
477 * Make store aligned when copy size exceeds 256 bytes
479 dstofss = (uintptr_t)dst & 0x1F;
481 dstofss = 32 - dstofss;
483 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
484 src = (const uint8_t *)src + dstofss;
485 dst = (uint8_t *)dst + dstofss;
489 * Copy 128-byte blocks
491 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
495 src = (const uint8_t *)src + bits;
496 dst = (uint8_t *)dst + bits;
501 goto COPY_BLOCK_128_BACK31;
504 #else /* RTE_MACHINE_CPUFLAG */
506 #define ALIGNMENT_MASK 0x0F
509 * SSE & AVX implementation below
513 * Copy 16 bytes from one location to another,
514 * locations should not overlap.
517 rte_mov16(uint8_t *dst, const uint8_t *src)
521 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
522 _mm_storeu_si128((__m128i *)dst, xmm0);
526 * Copy 32 bytes from one location to another,
527 * locations should not overlap.
530 rte_mov32(uint8_t *dst, const uint8_t *src)
532 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
533 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
537 * Copy 64 bytes from one location to another,
538 * locations should not overlap.
541 rte_mov64(uint8_t *dst, const uint8_t *src)
543 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
544 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
545 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
546 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
550 * Copy 128 bytes from one location to another,
551 * locations should not overlap.
554 rte_mov128(uint8_t *dst, const uint8_t *src)
556 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
557 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
558 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
559 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
560 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
561 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
562 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
563 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
567 * Copy 256 bytes from one location to another,
568 * locations should not overlap.
571 rte_mov256(uint8_t *dst, const uint8_t *src)
573 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
574 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
575 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
576 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
577 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
578 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
579 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
580 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
581 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
582 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
583 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
584 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
585 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
586 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
587 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
588 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
592 * Macro for copying unaligned block from one location to another with constant load offset,
593 * 47 bytes leftover maximum,
594 * locations should not overlap.
597 * - Load offset is <offset>, which must be immediate value within [1, 15]
598 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
599 * - <dst>, <src>, <len> must be variables
600 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
602 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
605 while (len >= 128 + 16 - offset) { \
606 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
608 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
609 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
610 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
611 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
612 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
613 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
614 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
615 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
616 src = (const uint8_t *)src + 128; \
617 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
618 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
619 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
620 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
621 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
622 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
623 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
624 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
625 dst = (uint8_t *)dst + 128; \
628 len = ((len - 16 + offset) & 127) + 16 - offset; \
630 src = (const uint8_t *)src + tmp; \
631 dst = (uint8_t *)dst + tmp; \
632 if (len >= 32 + 16 - offset) { \
633 while (len >= 32 + 16 - offset) { \
634 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
636 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
637 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
638 src = (const uint8_t *)src + 32; \
639 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
640 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
641 dst = (uint8_t *)dst + 32; \
644 len = ((len - 16 + offset) & 31) + 16 - offset; \
646 src = (const uint8_t *)src + tmp; \
647 dst = (uint8_t *)dst + tmp; \
652 * Macro for copying unaligned block from one location to another,
653 * 47 bytes leftover maximum,
654 * locations should not overlap.
655 * Use switch here because the aligning instruction requires immediate value for shift count.
658 * - Load offset is <offset>, which must be within [1, 15]
659 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
660 * - <dst>, <src>, <len> must be variables
661 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
663 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
666 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
667 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
668 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
669 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
670 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
671 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
672 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
673 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
674 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
675 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
676 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
677 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
678 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
679 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
680 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
686 rte_memcpy_generic(void *dst, const void *src, size_t n)
688 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
689 uintptr_t dstu = (uintptr_t)dst;
690 uintptr_t srcu = (uintptr_t)src;
696 * Copy less than 16 bytes
700 *(uint8_t *)dstu = *(const uint8_t *)srcu;
701 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
702 dstu = (uintptr_t)((uint8_t *)dstu + 1);
705 *(uint16_t *)dstu = *(const uint16_t *)srcu;
706 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
707 dstu = (uintptr_t)((uint16_t *)dstu + 1);
710 *(uint32_t *)dstu = *(const uint32_t *)srcu;
711 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
712 dstu = (uintptr_t)((uint32_t *)dstu + 1);
715 *(uint64_t *)dstu = *(const uint64_t *)srcu;
721 * Fast way when copy size doesn't exceed 512 bytes
724 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
725 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
729 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
730 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
734 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
735 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
736 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
740 goto COPY_BLOCK_128_BACK15;
745 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
746 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
747 src = (const uint8_t *)src + 256;
748 dst = (uint8_t *)dst + 256;
750 COPY_BLOCK_255_BACK15:
753 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
754 src = (const uint8_t *)src + 128;
755 dst = (uint8_t *)dst + 128;
757 COPY_BLOCK_128_BACK15:
760 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
761 src = (const uint8_t *)src + 64;
762 dst = (uint8_t *)dst + 64;
764 COPY_BLOCK_64_BACK15:
767 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
768 src = (const uint8_t *)src + 32;
769 dst = (uint8_t *)dst + 32;
772 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
773 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
777 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
783 * Make store aligned when copy size exceeds 512 bytes,
784 * and make sure the first 15 bytes are copied, because
785 * unaligned copy functions require up to 15 bytes
788 dstofss = (uintptr_t)dst & 0x0F;
790 dstofss = 16 - dstofss + 16;
792 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
793 src = (const uint8_t *)src + dstofss;
794 dst = (uint8_t *)dst + dstofss;
796 srcofs = ((uintptr_t)src & 0x0F);
803 * Copy 256-byte blocks
805 for (; n >= 256; n -= 256) {
806 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
807 dst = (uint8_t *)dst + 256;
808 src = (const uint8_t *)src + 256;
814 goto COPY_BLOCK_255_BACK15;
818 * For copy with unaligned load
820 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
825 goto COPY_BLOCK_64_BACK15;
828 #endif /* RTE_MACHINE_CPUFLAG */
831 rte_memcpy_aligned(void *dst, const void *src, size_t n)
835 /* Copy size <= 16 bytes */
838 *(uint8_t *)dst = *(const uint8_t *)src;
839 src = (const uint8_t *)src + 1;
840 dst = (uint8_t *)dst + 1;
843 *(uint16_t *)dst = *(const uint16_t *)src;
844 src = (const uint16_t *)src + 1;
845 dst = (uint16_t *)dst + 1;
848 *(uint32_t *)dst = *(const uint32_t *)src;
849 src = (const uint32_t *)src + 1;
850 dst = (uint32_t *)dst + 1;
853 *(uint64_t *)dst = *(const uint64_t *)src;
858 /* Copy 16 <= size <= 32 bytes */
860 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
861 rte_mov16((uint8_t *)dst - 16 + n,
862 (const uint8_t *)src - 16 + n);
867 /* Copy 32 < size <= 64 bytes */
869 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
870 rte_mov32((uint8_t *)dst - 32 + n,
871 (const uint8_t *)src - 32 + n);
876 /* Copy 64 bytes blocks */
877 for (; n >= 64; n -= 64) {
878 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
879 dst = (uint8_t *)dst + 64;
880 src = (const uint8_t *)src + 64;
883 /* Copy whatever left */
884 rte_mov64((uint8_t *)dst - 64 + n,
885 (const uint8_t *)src - 64 + n);
891 rte_memcpy(void *dst, const void *src, size_t n)
893 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
894 return rte_memcpy_aligned(dst, src, n);
896 return rte_memcpy_generic(dst, src, n);
903 #endif /* _RTE_MEMCPY_X86_64_H_ */