1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_MEMCPY_X86_64_H_
6 #define _RTE_MEMCPY_X86_64_H_
11 * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
18 #include <rte_common.h>
25 * Copy bytes from one location to another. The locations must not overlap.
27 * @note This is implemented as a macro, so it's address should not be taken
28 * and care is needed as parameter expressions may be evaluated multiple times.
31 * Pointer to the destination of the data.
33 * Pointer to the source data.
35 * Number of bytes to copy.
37 * Pointer to the destination data.
39 static __rte_always_inline void *
40 rte_memcpy(void *dst, const void *src, size_t n);
42 #ifdef RTE_MACHINE_CPUFLAG_AVX512F
44 #define ALIGNMENT_MASK 0x3F
47 * AVX512 implementation below
51 * Copy 16 bytes from one location to another,
52 * locations should not overlap.
55 rte_mov16(uint8_t *dst, const uint8_t *src)
59 xmm0 = _mm_loadu_si128((const __m128i *)src);
60 _mm_storeu_si128((__m128i *)dst, xmm0);
64 * Copy 32 bytes from one location to another,
65 * locations should not overlap.
68 rte_mov32(uint8_t *dst, const uint8_t *src)
72 ymm0 = _mm256_loadu_si256((const __m256i *)src);
73 _mm256_storeu_si256((__m256i *)dst, ymm0);
77 * Copy 64 bytes from one location to another,
78 * locations should not overlap.
81 rte_mov64(uint8_t *dst, const uint8_t *src)
85 zmm0 = _mm512_loadu_si512((const void *)src);
86 _mm512_storeu_si512((void *)dst, zmm0);
90 * Copy 128 bytes from one location to another,
91 * locations should not overlap.
94 rte_mov128(uint8_t *dst, const uint8_t *src)
96 rte_mov64(dst + 0 * 64, src + 0 * 64);
97 rte_mov64(dst + 1 * 64, src + 1 * 64);
101 * Copy 256 bytes from one location to another,
102 * locations should not overlap.
105 rte_mov256(uint8_t *dst, const uint8_t *src)
107 rte_mov64(dst + 0 * 64, src + 0 * 64);
108 rte_mov64(dst + 1 * 64, src + 1 * 64);
109 rte_mov64(dst + 2 * 64, src + 2 * 64);
110 rte_mov64(dst + 3 * 64, src + 3 * 64);
114 * Copy 128-byte blocks from one location to another,
115 * locations should not overlap.
118 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
123 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
125 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
127 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
128 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
134 * Copy 512-byte blocks from one location to another,
135 * locations should not overlap.
138 rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
140 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
143 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
145 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
146 zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
147 zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
148 zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
149 zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
150 zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
151 zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
153 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
154 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
155 _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
156 _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
157 _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
158 _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
159 _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
160 _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
166 rte_memcpy_generic(void *dst, const void *src, size_t n)
168 uintptr_t dstu = (uintptr_t)dst;
169 uintptr_t srcu = (uintptr_t)src;
175 * Copy less than 16 bytes
179 *(uint8_t *)dstu = *(const uint8_t *)srcu;
180 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
181 dstu = (uintptr_t)((uint8_t *)dstu + 1);
184 *(uint16_t *)dstu = *(const uint16_t *)srcu;
185 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
186 dstu = (uintptr_t)((uint16_t *)dstu + 1);
189 *(uint32_t *)dstu = *(const uint32_t *)srcu;
190 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
191 dstu = (uintptr_t)((uint32_t *)dstu + 1);
194 *(uint64_t *)dstu = *(const uint64_t *)srcu;
199 * Fast way when copy size doesn't exceed 512 bytes
202 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
203 rte_mov16((uint8_t *)dst - 16 + n,
204 (const uint8_t *)src - 16 + n);
208 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
209 rte_mov32((uint8_t *)dst - 32 + n,
210 (const uint8_t *)src - 32 + n);
216 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
217 src = (const uint8_t *)src + 256;
218 dst = (uint8_t *)dst + 256;
222 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
223 src = (const uint8_t *)src + 128;
224 dst = (uint8_t *)dst + 128;
226 COPY_BLOCK_128_BACK63:
228 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
229 rte_mov64((uint8_t *)dst - 64 + n,
230 (const uint8_t *)src - 64 + n);
234 rte_mov64((uint8_t *)dst - 64 + n,
235 (const uint8_t *)src - 64 + n);
240 * Make store aligned when copy size exceeds 512 bytes
242 dstofss = ((uintptr_t)dst & 0x3F);
244 dstofss = 64 - dstofss;
246 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
247 src = (const uint8_t *)src + dstofss;
248 dst = (uint8_t *)dst + dstofss;
252 * Copy 512-byte blocks.
253 * Use copy block function for better instruction order control,
254 * which is important when load is unaligned.
256 rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
260 src = (const uint8_t *)src + bits;
261 dst = (uint8_t *)dst + bits;
264 * Copy 128-byte blocks.
265 * Use copy block function for better instruction order control,
266 * which is important when load is unaligned.
269 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
273 src = (const uint8_t *)src + bits;
274 dst = (uint8_t *)dst + bits;
280 goto COPY_BLOCK_128_BACK63;
283 #elif defined RTE_MACHINE_CPUFLAG_AVX2
285 #define ALIGNMENT_MASK 0x1F
288 * AVX2 implementation below
292 * Copy 16 bytes from one location to another,
293 * locations should not overlap.
296 rte_mov16(uint8_t *dst, const uint8_t *src)
300 xmm0 = _mm_loadu_si128((const __m128i *)src);
301 _mm_storeu_si128((__m128i *)dst, xmm0);
305 * Copy 32 bytes from one location to another,
306 * locations should not overlap.
309 rte_mov32(uint8_t *dst, const uint8_t *src)
313 ymm0 = _mm256_loadu_si256((const __m256i *)src);
314 _mm256_storeu_si256((__m256i *)dst, ymm0);
318 * Copy 64 bytes from one location to another,
319 * locations should not overlap.
322 rte_mov64(uint8_t *dst, const uint8_t *src)
324 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
325 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
329 * Copy 128 bytes from one location to another,
330 * locations should not overlap.
333 rte_mov128(uint8_t *dst, const uint8_t *src)
335 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
336 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
337 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
338 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
342 * Copy 128-byte blocks from one location to another,
343 * locations should not overlap.
346 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
348 __m256i ymm0, ymm1, ymm2, ymm3;
351 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
353 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
354 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
355 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
356 src = (const uint8_t *)src + 128;
357 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
358 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
359 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
360 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
361 dst = (uint8_t *)dst + 128;
366 rte_memcpy_generic(void *dst, const void *src, size_t n)
368 uintptr_t dstu = (uintptr_t)dst;
369 uintptr_t srcu = (uintptr_t)src;
375 * Copy less than 16 bytes
379 *(uint8_t *)dstu = *(const uint8_t *)srcu;
380 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
381 dstu = (uintptr_t)((uint8_t *)dstu + 1);
384 *(uint16_t *)dstu = *(const uint16_t *)srcu;
385 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
386 dstu = (uintptr_t)((uint16_t *)dstu + 1);
389 *(uint32_t *)dstu = *(const uint32_t *)srcu;
390 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
391 dstu = (uintptr_t)((uint32_t *)dstu + 1);
394 *(uint64_t *)dstu = *(const uint64_t *)srcu;
400 * Fast way when copy size doesn't exceed 256 bytes
403 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
404 rte_mov16((uint8_t *)dst - 16 + n,
405 (const uint8_t *)src - 16 + n);
409 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
410 rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
411 rte_mov16((uint8_t *)dst - 16 + n,
412 (const uint8_t *)src - 16 + n);
416 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
417 rte_mov32((uint8_t *)dst - 32 + n,
418 (const uint8_t *)src - 32 + n);
424 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
425 src = (const uint8_t *)src + 128;
426 dst = (uint8_t *)dst + 128;
428 COPY_BLOCK_128_BACK31:
431 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
432 src = (const uint8_t *)src + 64;
433 dst = (uint8_t *)dst + 64;
436 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
437 rte_mov32((uint8_t *)dst - 32 + n,
438 (const uint8_t *)src - 32 + n);
442 rte_mov32((uint8_t *)dst - 32 + n,
443 (const uint8_t *)src - 32 + n);
449 * Make store aligned when copy size exceeds 256 bytes
451 dstofss = (uintptr_t)dst & 0x1F;
453 dstofss = 32 - dstofss;
455 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
456 src = (const uint8_t *)src + dstofss;
457 dst = (uint8_t *)dst + dstofss;
461 * Copy 128-byte blocks
463 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
467 src = (const uint8_t *)src + bits;
468 dst = (uint8_t *)dst + bits;
473 goto COPY_BLOCK_128_BACK31;
476 #else /* RTE_MACHINE_CPUFLAG */
478 #define ALIGNMENT_MASK 0x0F
481 * SSE & AVX implementation below
485 * Copy 16 bytes from one location to another,
486 * locations should not overlap.
489 rte_mov16(uint8_t *dst, const uint8_t *src)
493 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
494 _mm_storeu_si128((__m128i *)dst, xmm0);
498 * Copy 32 bytes from one location to another,
499 * locations should not overlap.
502 rte_mov32(uint8_t *dst, const uint8_t *src)
504 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
505 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
509 * Copy 64 bytes from one location to another,
510 * locations should not overlap.
513 rte_mov64(uint8_t *dst, const uint8_t *src)
515 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
516 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
517 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
518 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
522 * Copy 128 bytes from one location to another,
523 * locations should not overlap.
526 rte_mov128(uint8_t *dst, const uint8_t *src)
528 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
529 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
530 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
531 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
532 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
533 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
534 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
535 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
539 * Copy 256 bytes from one location to another,
540 * locations should not overlap.
543 rte_mov256(uint8_t *dst, const uint8_t *src)
545 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
546 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
547 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
548 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
549 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
550 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
551 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
552 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
553 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
554 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
555 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
556 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
557 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
558 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
559 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
560 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
564 * Macro for copying unaligned block from one location to another with constant load offset,
565 * 47 bytes leftover maximum,
566 * locations should not overlap.
569 * - Load offset is <offset>, which must be immediate value within [1, 15]
570 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
571 * - <dst>, <src>, <len> must be variables
572 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
574 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
577 while (len >= 128 + 16 - offset) { \
578 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
580 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
581 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
582 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
583 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
584 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
585 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
586 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
587 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
588 src = (const uint8_t *)src + 128; \
589 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
590 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
591 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
592 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
593 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
594 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
595 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
596 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
597 dst = (uint8_t *)dst + 128; \
600 len = ((len - 16 + offset) & 127) + 16 - offset; \
602 src = (const uint8_t *)src + tmp; \
603 dst = (uint8_t *)dst + tmp; \
604 if (len >= 32 + 16 - offset) { \
605 while (len >= 32 + 16 - offset) { \
606 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
608 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
609 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
610 src = (const uint8_t *)src + 32; \
611 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
612 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
613 dst = (uint8_t *)dst + 32; \
616 len = ((len - 16 + offset) & 31) + 16 - offset; \
618 src = (const uint8_t *)src + tmp; \
619 dst = (uint8_t *)dst + tmp; \
624 * Macro for copying unaligned block from one location to another,
625 * 47 bytes leftover maximum,
626 * locations should not overlap.
627 * Use switch here because the aligning instruction requires immediate value for shift count.
630 * - Load offset is <offset>, which must be within [1, 15]
631 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
632 * - <dst>, <src>, <len> must be variables
633 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
635 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
638 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
639 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
640 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
641 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
642 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
643 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
644 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
645 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
646 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
647 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
648 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
649 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
650 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
651 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
652 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
658 rte_memcpy_generic(void *dst, const void *src, size_t n)
660 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
661 uintptr_t dstu = (uintptr_t)dst;
662 uintptr_t srcu = (uintptr_t)src;
668 * Copy less than 16 bytes
672 *(uint8_t *)dstu = *(const uint8_t *)srcu;
673 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
674 dstu = (uintptr_t)((uint8_t *)dstu + 1);
677 *(uint16_t *)dstu = *(const uint16_t *)srcu;
678 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
679 dstu = (uintptr_t)((uint16_t *)dstu + 1);
682 *(uint32_t *)dstu = *(const uint32_t *)srcu;
683 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
684 dstu = (uintptr_t)((uint32_t *)dstu + 1);
687 *(uint64_t *)dstu = *(const uint64_t *)srcu;
693 * Fast way when copy size doesn't exceed 512 bytes
696 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
697 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
701 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
702 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
706 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
707 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
708 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
712 goto COPY_BLOCK_128_BACK15;
717 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
718 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
719 src = (const uint8_t *)src + 256;
720 dst = (uint8_t *)dst + 256;
722 COPY_BLOCK_255_BACK15:
725 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
726 src = (const uint8_t *)src + 128;
727 dst = (uint8_t *)dst + 128;
729 COPY_BLOCK_128_BACK15:
732 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
733 src = (const uint8_t *)src + 64;
734 dst = (uint8_t *)dst + 64;
736 COPY_BLOCK_64_BACK15:
739 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
740 src = (const uint8_t *)src + 32;
741 dst = (uint8_t *)dst + 32;
744 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
745 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
749 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
755 * Make store aligned when copy size exceeds 512 bytes,
756 * and make sure the first 15 bytes are copied, because
757 * unaligned copy functions require up to 15 bytes
760 dstofss = (uintptr_t)dst & 0x0F;
762 dstofss = 16 - dstofss + 16;
764 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
765 src = (const uint8_t *)src + dstofss;
766 dst = (uint8_t *)dst + dstofss;
768 srcofs = ((uintptr_t)src & 0x0F);
775 * Copy 256-byte blocks
777 for (; n >= 256; n -= 256) {
778 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
779 dst = (uint8_t *)dst + 256;
780 src = (const uint8_t *)src + 256;
786 goto COPY_BLOCK_255_BACK15;
790 * For copy with unaligned load
792 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
797 goto COPY_BLOCK_64_BACK15;
800 #endif /* RTE_MACHINE_CPUFLAG */
803 rte_memcpy_aligned(void *dst, const void *src, size_t n)
807 /* Copy size <= 16 bytes */
810 *(uint8_t *)dst = *(const uint8_t *)src;
811 src = (const uint8_t *)src + 1;
812 dst = (uint8_t *)dst + 1;
815 *(uint16_t *)dst = *(const uint16_t *)src;
816 src = (const uint16_t *)src + 1;
817 dst = (uint16_t *)dst + 1;
820 *(uint32_t *)dst = *(const uint32_t *)src;
821 src = (const uint32_t *)src + 1;
822 dst = (uint32_t *)dst + 1;
825 *(uint64_t *)dst = *(const uint64_t *)src;
830 /* Copy 16 <= size <= 32 bytes */
832 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
833 rte_mov16((uint8_t *)dst - 16 + n,
834 (const uint8_t *)src - 16 + n);
839 /* Copy 32 < size <= 64 bytes */
841 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
842 rte_mov32((uint8_t *)dst - 32 + n,
843 (const uint8_t *)src - 32 + n);
848 /* Copy 64 bytes blocks */
849 for (; n >= 64; n -= 64) {
850 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
851 dst = (uint8_t *)dst + 64;
852 src = (const uint8_t *)src + 64;
855 /* Copy whatever left */
856 rte_mov64((uint8_t *)dst - 64 + n,
857 (const uint8_t *)src - 64 + n);
863 rte_memcpy(void *dst, const void *src, size_t n)
865 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
866 return rte_memcpy_aligned(dst, src, n);
868 return rte_memcpy_generic(dst, src, n);
875 #endif /* _RTE_MEMCPY_X86_64_H_ */