1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_MEMCPY_X86_64_H_
6 #define _RTE_MEMCPY_X86_64_H_
11 * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
18 #include <rte_common.h>
19 #include <rte_config.h>
25 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 100000)
26 #pragma GCC diagnostic push
27 #pragma GCC diagnostic ignored "-Wstringop-overflow"
31 * Copy bytes from one location to another. The locations must not overlap.
33 * @note This is implemented as a macro, so it's address should not be taken
34 * and care is needed as parameter expressions may be evaluated multiple times.
37 * Pointer to the destination of the data.
39 * Pointer to the source data.
41 * Number of bytes to copy.
43 * Pointer to the destination data.
45 static __rte_always_inline void *
46 rte_memcpy(void *dst, const void *src, size_t n);
50 #define ALIGNMENT_MASK 0x3F
53 * AVX512 implementation below
57 * Copy 16 bytes from one location to another,
58 * locations should not overlap.
60 static __rte_always_inline void
61 rte_mov16(uint8_t *dst, const uint8_t *src)
65 xmm0 = _mm_loadu_si128((const __m128i *)src);
66 _mm_storeu_si128((__m128i *)dst, xmm0);
70 * Copy 32 bytes from one location to another,
71 * locations should not overlap.
73 static __rte_always_inline void
74 rte_mov32(uint8_t *dst, const uint8_t *src)
78 ymm0 = _mm256_loadu_si256((const __m256i *)src);
79 _mm256_storeu_si256((__m256i *)dst, ymm0);
83 * Copy 64 bytes from one location to another,
84 * locations should not overlap.
86 static __rte_always_inline void
87 rte_mov64(uint8_t *dst, const uint8_t *src)
91 zmm0 = _mm512_loadu_si512((const void *)src);
92 _mm512_storeu_si512((void *)dst, zmm0);
96 * Copy 128 bytes from one location to another,
97 * locations should not overlap.
99 static __rte_always_inline void
100 rte_mov128(uint8_t *dst, const uint8_t *src)
102 rte_mov64(dst + 0 * 64, src + 0 * 64);
103 rte_mov64(dst + 1 * 64, src + 1 * 64);
107 * Copy 256 bytes from one location to another,
108 * locations should not overlap.
110 static __rte_always_inline void
111 rte_mov256(uint8_t *dst, const uint8_t *src)
113 rte_mov64(dst + 0 * 64, src + 0 * 64);
114 rte_mov64(dst + 1 * 64, src + 1 * 64);
115 rte_mov64(dst + 2 * 64, src + 2 * 64);
116 rte_mov64(dst + 3 * 64, src + 3 * 64);
120 * Copy 128-byte blocks from one location to another,
121 * locations should not overlap.
123 static __rte_always_inline void
124 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
129 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
131 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
133 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
134 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
140 * Copy 512-byte blocks from one location to another,
141 * locations should not overlap.
144 rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
146 __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
149 zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
151 zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
152 zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
153 zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
154 zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
155 zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
156 zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
157 zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
159 _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
160 _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
161 _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
162 _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
163 _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
164 _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
165 _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
166 _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
171 static __rte_always_inline void *
172 rte_memcpy_generic(void *dst, const void *src, size_t n)
174 uintptr_t dstu = (uintptr_t)dst;
175 uintptr_t srcu = (uintptr_t)src;
181 * Copy less than 16 bytes
185 *(uint8_t *)dstu = *(const uint8_t *)srcu;
186 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
187 dstu = (uintptr_t)((uint8_t *)dstu + 1);
190 *(uint16_t *)dstu = *(const uint16_t *)srcu;
191 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
192 dstu = (uintptr_t)((uint16_t *)dstu + 1);
195 *(uint32_t *)dstu = *(const uint32_t *)srcu;
196 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
197 dstu = (uintptr_t)((uint32_t *)dstu + 1);
200 *(uint64_t *)dstu = *(const uint64_t *)srcu;
205 * Fast way when copy size doesn't exceed 512 bytes
208 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
209 rte_mov16((uint8_t *)dst - 16 + n,
210 (const uint8_t *)src - 16 + n);
214 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
215 rte_mov32((uint8_t *)dst - 32 + n,
216 (const uint8_t *)src - 32 + n);
222 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
223 src = (const uint8_t *)src + 256;
224 dst = (uint8_t *)dst + 256;
228 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
229 src = (const uint8_t *)src + 128;
230 dst = (uint8_t *)dst + 128;
232 COPY_BLOCK_128_BACK63:
234 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
235 rte_mov64((uint8_t *)dst - 64 + n,
236 (const uint8_t *)src - 64 + n);
240 rte_mov64((uint8_t *)dst - 64 + n,
241 (const uint8_t *)src - 64 + n);
246 * Make store aligned when copy size exceeds 512 bytes
248 dstofss = ((uintptr_t)dst & 0x3F);
250 dstofss = 64 - dstofss;
252 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
253 src = (const uint8_t *)src + dstofss;
254 dst = (uint8_t *)dst + dstofss;
258 * Copy 512-byte blocks.
259 * Use copy block function for better instruction order control,
260 * which is important when load is unaligned.
262 rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
266 src = (const uint8_t *)src + bits;
267 dst = (uint8_t *)dst + bits;
270 * Copy 128-byte blocks.
271 * Use copy block function for better instruction order control,
272 * which is important when load is unaligned.
275 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
279 src = (const uint8_t *)src + bits;
280 dst = (uint8_t *)dst + bits;
286 goto COPY_BLOCK_128_BACK63;
289 #elif defined __AVX2__
291 #define ALIGNMENT_MASK 0x1F
294 * AVX2 implementation below
298 * Copy 16 bytes from one location to another,
299 * locations should not overlap.
301 static __rte_always_inline void
302 rte_mov16(uint8_t *dst, const uint8_t *src)
306 xmm0 = _mm_loadu_si128((const __m128i *)src);
307 _mm_storeu_si128((__m128i *)dst, xmm0);
311 * Copy 32 bytes from one location to another,
312 * locations should not overlap.
314 static __rte_always_inline void
315 rte_mov32(uint8_t *dst, const uint8_t *src)
319 ymm0 = _mm256_loadu_si256((const __m256i *)src);
320 _mm256_storeu_si256((__m256i *)dst, ymm0);
324 * Copy 64 bytes from one location to another,
325 * locations should not overlap.
327 static __rte_always_inline void
328 rte_mov64(uint8_t *dst, const uint8_t *src)
330 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
331 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
335 * Copy 128 bytes from one location to another,
336 * locations should not overlap.
338 static __rte_always_inline void
339 rte_mov128(uint8_t *dst, const uint8_t *src)
341 rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
342 rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
343 rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
344 rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
348 * Copy 128-byte blocks from one location to another,
349 * locations should not overlap.
351 static __rte_always_inline void
352 rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
354 __m256i ymm0, ymm1, ymm2, ymm3;
357 ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
359 ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
360 ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
361 ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
362 src = (const uint8_t *)src + 128;
363 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
364 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
365 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
366 _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
367 dst = (uint8_t *)dst + 128;
371 static __rte_always_inline void *
372 rte_memcpy_generic(void *dst, const void *src, size_t n)
374 uintptr_t dstu = (uintptr_t)dst;
375 uintptr_t srcu = (uintptr_t)src;
381 * Copy less than 16 bytes
385 *(uint8_t *)dstu = *(const uint8_t *)srcu;
386 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
387 dstu = (uintptr_t)((uint8_t *)dstu + 1);
390 *(uint16_t *)dstu = *(const uint16_t *)srcu;
391 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
392 dstu = (uintptr_t)((uint16_t *)dstu + 1);
395 *(uint32_t *)dstu = *(const uint32_t *)srcu;
396 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
397 dstu = (uintptr_t)((uint32_t *)dstu + 1);
400 *(uint64_t *)dstu = *(const uint64_t *)srcu;
406 * Fast way when copy size doesn't exceed 256 bytes
409 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
410 rte_mov16((uint8_t *)dst - 16 + n,
411 (const uint8_t *)src - 16 + n);
415 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
416 rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
417 rte_mov16((uint8_t *)dst - 16 + n,
418 (const uint8_t *)src - 16 + n);
422 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
423 rte_mov32((uint8_t *)dst - 32 + n,
424 (const uint8_t *)src - 32 + n);
430 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
431 src = (const uint8_t *)src + 128;
432 dst = (uint8_t *)dst + 128;
434 COPY_BLOCK_128_BACK31:
437 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
438 src = (const uint8_t *)src + 64;
439 dst = (uint8_t *)dst + 64;
442 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
443 rte_mov32((uint8_t *)dst - 32 + n,
444 (const uint8_t *)src - 32 + n);
448 rte_mov32((uint8_t *)dst - 32 + n,
449 (const uint8_t *)src - 32 + n);
455 * Make store aligned when copy size exceeds 256 bytes
457 dstofss = (uintptr_t)dst & 0x1F;
459 dstofss = 32 - dstofss;
461 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
462 src = (const uint8_t *)src + dstofss;
463 dst = (uint8_t *)dst + dstofss;
467 * Copy 128-byte blocks
469 rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
473 src = (const uint8_t *)src + bits;
474 dst = (uint8_t *)dst + bits;
479 goto COPY_BLOCK_128_BACK31;
482 #else /* __AVX512F__ */
484 #define ALIGNMENT_MASK 0x0F
487 * SSE & AVX implementation below
491 * Copy 16 bytes from one location to another,
492 * locations should not overlap.
494 static __rte_always_inline void
495 rte_mov16(uint8_t *dst, const uint8_t *src)
499 xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
500 _mm_storeu_si128((__m128i *)dst, xmm0);
504 * Copy 32 bytes from one location to another,
505 * locations should not overlap.
507 static __rte_always_inline void
508 rte_mov32(uint8_t *dst, const uint8_t *src)
510 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
511 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
515 * Copy 64 bytes from one location to another,
516 * locations should not overlap.
518 static __rte_always_inline void
519 rte_mov64(uint8_t *dst, const uint8_t *src)
521 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
522 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
523 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
524 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
528 * Copy 128 bytes from one location to another,
529 * locations should not overlap.
531 static __rte_always_inline void
532 rte_mov128(uint8_t *dst, const uint8_t *src)
534 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
535 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
536 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
537 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
538 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
539 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
540 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
541 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
545 * Copy 256 bytes from one location to another,
546 * locations should not overlap.
549 rte_mov256(uint8_t *dst, const uint8_t *src)
551 rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
552 rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
553 rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
554 rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
555 rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
556 rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
557 rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
558 rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
559 rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
560 rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
561 rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
562 rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
563 rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
564 rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
565 rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
566 rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
570 * Macro for copying unaligned block from one location to another with constant load offset,
571 * 47 bytes leftover maximum,
572 * locations should not overlap.
575 * - Load offset is <offset>, which must be immediate value within [1, 15]
576 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
577 * - <dst>, <src>, <len> must be variables
578 * - __m128i <xmm0> ~ <xmm8> must be pre-defined
580 #define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
583 while (len >= 128 + 16 - offset) { \
584 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
586 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
587 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
588 xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16)); \
589 xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16)); \
590 xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16)); \
591 xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16)); \
592 xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16)); \
593 xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16)); \
594 src = (const uint8_t *)src + 128; \
595 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
596 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
597 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
598 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
599 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
600 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
601 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
602 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
603 dst = (uint8_t *)dst + 128; \
606 len = ((len - 16 + offset) & 127) + 16 - offset; \
608 src = (const uint8_t *)src + tmp; \
609 dst = (uint8_t *)dst + tmp; \
610 if (len >= 32 + 16 - offset) { \
611 while (len >= 32 + 16 - offset) { \
612 xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
614 xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16)); \
615 xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16)); \
616 src = (const uint8_t *)src + 32; \
617 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
618 _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
619 dst = (uint8_t *)dst + 32; \
622 len = ((len - 16 + offset) & 31) + 16 - offset; \
624 src = (const uint8_t *)src + tmp; \
625 dst = (uint8_t *)dst + tmp; \
630 * Macro for copying unaligned block from one location to another,
631 * 47 bytes leftover maximum,
632 * locations should not overlap.
633 * Use switch here because the aligning instruction requires immediate value for shift count.
636 * - Load offset is <offset>, which must be within [1, 15]
637 * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
638 * - <dst>, <src>, <len> must be variables
639 * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
641 #define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \
644 case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \
645 case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \
646 case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \
647 case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \
648 case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \
649 case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \
650 case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \
651 case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \
652 case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \
653 case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \
654 case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \
655 case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \
656 case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \
657 case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \
658 case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \
663 static __rte_always_inline void *
664 rte_memcpy_generic(void *dst, const void *src, size_t n)
666 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
667 uintptr_t dstu = (uintptr_t)dst;
668 uintptr_t srcu = (uintptr_t)src;
674 * Copy less than 16 bytes
678 *(uint8_t *)dstu = *(const uint8_t *)srcu;
679 srcu = (uintptr_t)((const uint8_t *)srcu + 1);
680 dstu = (uintptr_t)((uint8_t *)dstu + 1);
683 *(uint16_t *)dstu = *(const uint16_t *)srcu;
684 srcu = (uintptr_t)((const uint16_t *)srcu + 1);
685 dstu = (uintptr_t)((uint16_t *)dstu + 1);
688 *(uint32_t *)dstu = *(const uint32_t *)srcu;
689 srcu = (uintptr_t)((const uint32_t *)srcu + 1);
690 dstu = (uintptr_t)((uint32_t *)dstu + 1);
693 *(uint64_t *)dstu = *(const uint64_t *)srcu;
699 * Fast way when copy size doesn't exceed 512 bytes
702 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
703 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
707 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
708 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
712 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
713 rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
714 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
718 goto COPY_BLOCK_128_BACK15;
723 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
724 rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
725 src = (const uint8_t *)src + 256;
726 dst = (uint8_t *)dst + 256;
728 COPY_BLOCK_255_BACK15:
731 rte_mov128((uint8_t *)dst, (const uint8_t *)src);
732 src = (const uint8_t *)src + 128;
733 dst = (uint8_t *)dst + 128;
735 COPY_BLOCK_128_BACK15:
738 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
739 src = (const uint8_t *)src + 64;
740 dst = (uint8_t *)dst + 64;
742 COPY_BLOCK_64_BACK15:
745 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
746 src = (const uint8_t *)src + 32;
747 dst = (uint8_t *)dst + 32;
750 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
751 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
755 rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
761 * Make store aligned when copy size exceeds 512 bytes,
762 * and make sure the first 15 bytes are copied, because
763 * unaligned copy functions require up to 15 bytes
766 dstofss = (uintptr_t)dst & 0x0F;
768 dstofss = 16 - dstofss + 16;
770 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
771 src = (const uint8_t *)src + dstofss;
772 dst = (uint8_t *)dst + dstofss;
774 srcofs = ((uintptr_t)src & 0x0F);
781 * Copy 256-byte blocks
783 for (; n >= 256; n -= 256) {
784 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
785 dst = (uint8_t *)dst + 256;
786 src = (const uint8_t *)src + 256;
792 goto COPY_BLOCK_255_BACK15;
796 * For copy with unaligned load
798 MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
803 goto COPY_BLOCK_64_BACK15;
806 #endif /* __AVX512F__ */
808 static __rte_always_inline void *
809 rte_memcpy_aligned(void *dst, const void *src, size_t n)
813 /* Copy size <= 16 bytes */
816 *(uint8_t *)dst = *(const uint8_t *)src;
817 src = (const uint8_t *)src + 1;
818 dst = (uint8_t *)dst + 1;
821 *(uint16_t *)dst = *(const uint16_t *)src;
822 src = (const uint16_t *)src + 1;
823 dst = (uint16_t *)dst + 1;
826 *(uint32_t *)dst = *(const uint32_t *)src;
827 src = (const uint32_t *)src + 1;
828 dst = (uint32_t *)dst + 1;
831 *(uint64_t *)dst = *(const uint64_t *)src;
836 /* Copy 16 <= size <= 32 bytes */
838 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
839 rte_mov16((uint8_t *)dst - 16 + n,
840 (const uint8_t *)src - 16 + n);
845 /* Copy 32 < size <= 64 bytes */
847 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
848 rte_mov32((uint8_t *)dst - 32 + n,
849 (const uint8_t *)src - 32 + n);
854 /* Copy 64 bytes blocks */
855 for (; n >= 64; n -= 64) {
856 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
857 dst = (uint8_t *)dst + 64;
858 src = (const uint8_t *)src + 64;
861 /* Copy whatever left */
862 rte_mov64((uint8_t *)dst - 64 + n,
863 (const uint8_t *)src - 64 + n);
868 static __rte_always_inline void *
869 rte_memcpy(void *dst, const void *src, size_t n)
871 if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
872 return rte_memcpy_aligned(dst, src, n);
874 return rte_memcpy_generic(dst, src, n);
877 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 100000)
878 #pragma GCC diagnostic pop
885 #endif /* _RTE_MEMCPY_X86_64_H_ */