4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_MEMCPY_X86_64_H_
35 #define _RTE_MEMCPY_X86_64_H_
40 * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
47 #include <rte_common.h>
54 * Copy bytes from one location to another. The locations must not overlap.
56 * @note This is implemented as a macro, so it's address should not be taken
57 * and care is needed as parameter expressions may be evaluated multiple times.
60 * Pointer to the destination of the data.
62 * Pointer to the source data.
64 * Number of bytes to copy.
66 * Pointer to the destination data.
68 static __rte_always_inline void *
69 rte_memcpy(void *dst, const void *src, size_t n);
71 #ifdef RTE_MACHINE_CPUFLAG_AVX512F
73 #define ALIGNMENT_MASK 0x3F
76 * AVX512 implementation below
80 * Copy 16 bytes from one location to another,
81 * locations should not overlap.
84 rte_mov16(uint8_t *dst, const uint8_t *src)
88 xmm0 = _mm_loadu_si128((const __m128i *)src);
89 _mm_storeu_si128((__m128i *)dst, xmm0);
93 * Copy 32 bytes from one location to another,
94 * locations should not overlap.
97 rte_mov32(uint8_t *dst, const uint8_t *src)
101 ymm0 = _mm256_loadu_si256((const __m256i *)src);
102 _mm256_storeu_si256((__m256i *)dst, ymm0);
106 * Copy 64 bytes from one location to another,
107 * locations should not overlap.
110 rte_mov64(uint8_t *dst, const uint8_t *src)
114 zmm0 = _mm512_loadu_si512((const void *)src);
115 _mm512_storeu_si512((void *)dst, zmm0);
119 * Copy 128 bytes from one location to another,
120 * locations should not overlap.
123 rte_mov128(uint8_t *dst, const uint8_t *src)
125 rte_mov64(dst + 0 * 64, src + 0 * 64);
126 rte_mov64(dst + 1 * 64, src + 1 * 64);
130 * Copy 256 bytes from one location to another,
131 * locations should not overlap.
134 rte_mov256(uint8_t *dst, const uint8_t *src)
136 rte_mov64(dst + 0 * 64, src + 0 * 64);
137 rte_mov64(dst + 1 * 64, src + 1 * 64);
138 rte_mov64(dst + 2 * 64, src + 2 * 64);
139 rte_mov64(dst + 3 * 64, src + 3 * 64);
143 * Copy 128-byte blocks from one location to another,
144 * locations should not overlap.
147 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
152 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
154 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
156 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
157 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
163 * Copy 512-byte blocks from one location to another,
164 * locations should not overlap.
167 rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
169 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
172 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
174 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
175 zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
176 zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
177 zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
178 zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
179 zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
180 zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
182 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
183 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
184 _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
185 _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
186 _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
187 _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
188 _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
189 _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
195 rte_memcpy_generic(void *dst, const void *src, size_t n)
197 uintptr_t dstu = (uintptr_t)dst;
198 uintptr_t srcu = (uintptr_t)src;
204 * Copy less than 16 bytes
208 *(uint8_t *)dstu = *(const uint8_t *)srcu;
209 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
210 dstu = (uintptr_t)((uint8_t *)dstu + 1);
213 *(uint16_t *)dstu = *(const uint16_t *)srcu;
214 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
215 dstu = (uintptr_t)((uint16_t *)dstu + 1);
218 *(uint32_t *)dstu = *(const uint32_t *)srcu;
219 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
220 dstu = (uintptr_t)((uint32_t *)dstu + 1);
223 *(uint64_t *)dstu = *(const uint64_t *)srcu;
228 * Fast way when copy size doesn't exceed 512 bytes
231 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
232 rte_mov16((uint8_t *)dst - 16 + n,
233 (const uint8_t *)src - 16 + n);
237 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
238 rte_mov32((uint8_t *)dst - 32 + n,
239 (const uint8_t *)src - 32 + n);
245 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
246 src = (const uint8_t *)src + 256;
247 dst = (uint8_t *)dst + 256;
251 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
252 src = (const uint8_t *)src + 128;
253 dst = (uint8_t *)dst + 128;
255 COPY_BLOCK_128_BACK63:
257 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
258 rte_mov64((uint8_t *)dst - 64 + n,
259 (const uint8_t *)src - 64 + n);
263 rte_mov64((uint8_t *)dst - 64 + n,
264 (const uint8_t *)src - 64 + n);
269 * Make store aligned when copy size exceeds 512 bytes
271 dstofss = ((uintptr_t)dst & 0x3F);
273 dstofss = 64 - dstofss;
275 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
276 src = (const uint8_t *)src + dstofss;
277 dst = (uint8_t *)dst + dstofss;
281 * Copy 512-byte blocks.
282 * Use copy block function for better instruction order control,
283 * which is important when load is unaligned.
285 rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
289 src = (const uint8_t *)src + bits;
290 dst = (uint8_t *)dst + bits;
293 * Copy 128-byte blocks.
294 * Use copy block function for better instruction order control,
295 * which is important when load is unaligned.
298 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
302 src = (const uint8_t *)src + bits;
303 dst = (uint8_t *)dst + bits;
309 goto COPY_BLOCK_128_BACK63;
312 #elif defined RTE_MACHINE_CPUFLAG_AVX2
314 #define ALIGNMENT_MASK 0x1F
317 * AVX2 implementation below
321 * Copy 16 bytes from one location to another,
322 * locations should not overlap.
325 rte_mov16(uint8_t *dst, const uint8_t *src)
329 xmm0 = _mm_loadu_si128((const __m128i *)src);
330 _mm_storeu_si128((__m128i *)dst, xmm0);
334 * Copy 32 bytes from one location to another,
335 * locations should not overlap.
338 rte_mov32(uint8_t *dst, const uint8_t *src)
342 ymm0 = _mm256_loadu_si256((const __m256i *)src);
343 _mm256_storeu_si256((__m256i *)dst, ymm0);
347 * Copy 64 bytes from one location to another,
348 * locations should not overlap.
351 rte_mov64(uint8_t *dst, const uint8_t *src)
353 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
354 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
358 * Copy 128 bytes from one location to another,
359 * locations should not overlap.
362 rte_mov128(uint8_t *dst, const uint8_t *src)
364 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
365 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
366 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
367 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
371 * Copy 128-byte blocks from one location to another,
372 * locations should not overlap.
375 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
377 __m256i ymm0, ymm1, ymm2, ymm3;
380 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
382 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
383 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
384 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
385 src = (const uint8_t *)src + 128;
386 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
387 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
388 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
389 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
390 dst = (uint8_t *)dst + 128;
395 rte_memcpy_generic(void *dst, const void *src, size_t n)
397 uintptr_t dstu = (uintptr_t)dst;
398 uintptr_t srcu = (uintptr_t)src;
404 * Copy less than 16 bytes
408 *(uint8_t *)dstu = *(const uint8_t *)srcu;
409 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
410 dstu = (uintptr_t)((uint8_t *)dstu + 1);
413 *(uint16_t *)dstu = *(const uint16_t *)srcu;
414 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
415 dstu = (uintptr_t)((uint16_t *)dstu + 1);
418 *(uint32_t *)dstu = *(const uint32_t *)srcu;
419 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
420 dstu = (uintptr_t)((uint32_t *)dstu + 1);
423 *(uint64_t *)dstu = *(const uint64_t *)srcu;
429 * Fast way when copy size doesn't exceed 256 bytes
432 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
433 rte_mov16((uint8_t *)dst - 16 + n,
434 (const uint8_t *)src - 16 + n);
438 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
439 rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
440 rte_mov16((uint8_t *)dst - 16 + n,
441 (const uint8_t *)src - 16 + n);
445 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
446 rte_mov32((uint8_t *)dst - 32 + n,
447 (const uint8_t *)src - 32 + n);
453 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
454 src = (const uint8_t *)src + 128;
455 dst = (uint8_t *)dst + 128;
457 COPY_BLOCK_128_BACK31:
460 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
461 src = (const uint8_t *)src + 64;
462 dst = (uint8_t *)dst + 64;
465 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
466 rte_mov32((uint8_t *)dst - 32 + n,
467 (const uint8_t *)src - 32 + n);
471 rte_mov32((uint8_t *)dst - 32 + n,
472 (const uint8_t *)src - 32 + n);
478 * Make store aligned when copy size exceeds 256 bytes
480 dstofss = (uintptr_t)dst & 0x1F;
482 dstofss = 32 - dstofss;
484 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
485 src = (const uint8_t *)src + dstofss;
486 dst = (uint8_t *)dst + dstofss;
490 * Copy 128-byte blocks
492 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
496 src = (const uint8_t *)src + bits;
497 dst = (uint8_t *)dst + bits;
502 goto COPY_BLOCK_128_BACK31;
505 #else /* RTE_MACHINE_CPUFLAG */
507 #define ALIGNMENT_MASK 0x0F
510 * SSE & AVX implementation below
514 * Copy 16 bytes from one location to another,
515 * locations should not overlap.
518 rte_mov16(uint8_t *dst, const uint8_t *src)
522 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
523 _mm_storeu_si128((__m128i *)dst, xmm0);
527 * Copy 32 bytes from one location to another,
528 * locations should not overlap.
531 rte_mov32(uint8_t *dst, const uint8_t *src)
533 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
534 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
538 * Copy 64 bytes from one location to another,
539 * locations should not overlap.
542 rte_mov64(uint8_t *dst, const uint8_t *src)
544 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
545 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
546 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
547 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
551 * Copy 128 bytes from one location to another,
552 * locations should not overlap.
555 rte_mov128(uint8_t *dst, const uint8_t *src)
557 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
558 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
559 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
560 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
561 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
562 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
563 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
564 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
568 * Copy 256 bytes from one location to another,
569 * locations should not overlap.
572 rte_mov256(uint8_t *dst, const uint8_t *src)
574 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
575 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
576 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
577 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
578 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
579 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
580 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
581 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
582 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
583 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
584 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
585 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
586 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
587 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
588 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
589 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
593 * Macro for copying unaligned block from one location to another with constant load offset,
594 * 47 bytes leftover maximum,
595 * locations should not overlap.
598 * - Load offset is <offset>, which must be immediate value within [1, 15]
599 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
600 * - <dst>, <src>, <len> must be variables
601 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
603 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
606 while (len >= 128 + 16 - offset) { \
607 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
609 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
610 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
611 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
612 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
613 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
614 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
615 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
616 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
617 src = (const uint8_t *)src + 128; \
618 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
619 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
620 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
621 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
622 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
623 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
624 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
625 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
626 dst = (uint8_t *)dst + 128; \
629 len = ((len - 16 + offset) & 127) + 16 - offset; \
631 src = (const uint8_t *)src + tmp; \
632 dst = (uint8_t *)dst + tmp; \
633 if (len >= 32 + 16 - offset) { \
634 while (len >= 32 + 16 - offset) { \
635 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
637 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
638 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
639 src = (const uint8_t *)src + 32; \
640 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
641 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
642 dst = (uint8_t *)dst + 32; \
645 len = ((len - 16 + offset) & 31) + 16 - offset; \
647 src = (const uint8_t *)src + tmp; \
648 dst = (uint8_t *)dst + tmp; \
653 * Macro for copying unaligned block from one location to another,
654 * 47 bytes leftover maximum,
655 * locations should not overlap.
656 * Use switch here because the aligning instruction requires immediate value for shift count.
659 * - Load offset is <offset>, which must be within [1, 15]
660 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
661 * - <dst>, <src>, <len> must be variables
662 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
664 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
667 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
668 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
669 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
670 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
671 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
672 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
673 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
674 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
675 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
676 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
677 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
678 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
679 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
680 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
681 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
687 rte_memcpy_generic(void *dst, const void *src, size_t n)
689 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
690 uintptr_t dstu = (uintptr_t)dst;
691 uintptr_t srcu = (uintptr_t)src;
697 * Copy less than 16 bytes
701 *(uint8_t *)dstu = *(const uint8_t *)srcu;
702 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
703 dstu = (uintptr_t)((uint8_t *)dstu + 1);
706 *(uint16_t *)dstu = *(const uint16_t *)srcu;
707 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
708 dstu = (uintptr_t)((uint16_t *)dstu + 1);
711 *(uint32_t *)dstu = *(const uint32_t *)srcu;
712 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
713 dstu = (uintptr_t)((uint32_t *)dstu + 1);
716 *(uint64_t *)dstu = *(const uint64_t *)srcu;
722 * Fast way when copy size doesn't exceed 512 bytes
725 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
726 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
730 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
731 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
735 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
736 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
737 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
741 goto COPY_BLOCK_128_BACK15;
746 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
747 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
748 src = (const uint8_t *)src + 256;
749 dst = (uint8_t *)dst + 256;
751 COPY_BLOCK_255_BACK15:
754 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
755 src = (const uint8_t *)src + 128;
756 dst = (uint8_t *)dst + 128;
758 COPY_BLOCK_128_BACK15:
761 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
762 src = (const uint8_t *)src + 64;
763 dst = (uint8_t *)dst + 64;
765 COPY_BLOCK_64_BACK15:
768 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
769 src = (const uint8_t *)src + 32;
770 dst = (uint8_t *)dst + 32;
773 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
774 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
778 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
784 * Make store aligned when copy size exceeds 512 bytes,
785 * and make sure the first 15 bytes are copied, because
786 * unaligned copy functions require up to 15 bytes
789 dstofss = (uintptr_t)dst & 0x0F;
791 dstofss = 16 - dstofss + 16;
793 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
794 src = (const uint8_t *)src + dstofss;
795 dst = (uint8_t *)dst + dstofss;
797 srcofs = ((uintptr_t)src & 0x0F);
804 * Copy 256-byte blocks
806 for (; n >= 256; n -= 256) {
807 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
808 dst = (uint8_t *)dst + 256;
809 src = (const uint8_t *)src + 256;
815 goto COPY_BLOCK_255_BACK15;
819 * For copy with unaligned load
821 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
826 goto COPY_BLOCK_64_BACK15;
829 #endif /* RTE_MACHINE_CPUFLAG */
832 rte_memcpy_aligned(void *dst, const void *src, size_t n)
836 /* Copy size <= 16 bytes */
839 *(uint8_t *)dst = *(const uint8_t *)src;
840 src = (const uint8_t *)src + 1;
841 dst = (uint8_t *)dst + 1;
844 *(uint16_t *)dst = *(const uint16_t *)src;
845 src = (const uint16_t *)src + 1;
846 dst = (uint16_t *)dst + 1;
849 *(uint32_t *)dst = *(const uint32_t *)src;
850 src = (const uint32_t *)src + 1;
851 dst = (uint32_t *)dst + 1;
854 *(uint64_t *)dst = *(const uint64_t *)src;
859 /* Copy 16 <= size <= 32 bytes */
861 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
862 rte_mov16((uint8_t *)dst - 16 + n,
863 (const uint8_t *)src - 16 + n);
868 /* Copy 32 < size <= 64 bytes */
870 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
871 rte_mov32((uint8_t *)dst - 32 + n,
872 (const uint8_t *)src - 32 + n);
877 /* Copy 64 bytes blocks */
878 for (; n >= 64; n -= 64) {
879 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
880 dst = (uint8_t *)dst + 64;
881 src = (const uint8_t *)src + 64;
884 /* Copy whatever left */
885 rte_mov64((uint8_t *)dst - 64 + n,
886 (const uint8_t *)src - 64 + n);
892 rte_memcpy(void *dst, const void *src, size_t n)
894 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
895 return rte_memcpy_aligned(dst, src, n);
897 return rte_memcpy_generic(dst, src, n);
904 #endif /* _RTE_MEMCPY_X86_64_H_ */