4 * Copyright(c) 2015 RehiveTech. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of RehiveTech nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _RTE_MEMCPY_ARM32_H_
34 #define _RTE_MEMCPY_ARM32_H_
38 /* ARM NEON Intrinsics are used to copy data */
45 #include "generic/rte_memcpy.h"
48 rte_mov16(uint8_t *dst, const uint8_t *src)
50 vst1q_u8(dst, vld1q_u8(src));
54 rte_mov32(uint8_t *dst, const uint8_t *src)
57 "vld1.8 {d0-d3}, [%0]\n\t"
58 "vst1.8 {d0-d3}, [%1]\n\t"
59 : "+r" (src), "+r" (dst)
60 : : "memory", "d0", "d1", "d2", "d3");
64 rte_mov48(uint8_t *dst, const uint8_t *src)
67 "vld1.8 {d0-d3}, [%0]!\n\t"
68 "vld1.8 {d4-d5}, [%0]\n\t"
69 "vst1.8 {d0-d3}, [%1]!\n\t"
70 "vst1.8 {d4-d5}, [%1]\n\t"
71 : "+r" (src), "+r" (dst)
73 : "memory", "d0", "d1", "d2", "d3", "d4", "d5");
77 rte_mov64(uint8_t *dst, const uint8_t *src)
80 "vld1.8 {d0-d3}, [%0]!\n\t"
81 "vld1.8 {d4-d7}, [%0]\n\t"
82 "vst1.8 {d0-d3}, [%1]!\n\t"
83 "vst1.8 {d4-d7}, [%1]\n\t"
84 : "+r" (src), "+r" (dst)
86 : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7");
90 rte_mov128(uint8_t *dst, const uint8_t *src)
92 asm volatile ("pld [%0, #64]" : : "r" (src));
94 "vld1.8 {d0-d3}, [%0]!\n\t"
95 "vld1.8 {d4-d7}, [%0]!\n\t"
96 "vld1.8 {d8-d11}, [%0]!\n\t"
97 "vld1.8 {d12-d15}, [%0]\n\t"
98 "vst1.8 {d0-d3}, [%1]!\n\t"
99 "vst1.8 {d4-d7}, [%1]!\n\t"
100 "vst1.8 {d8-d11}, [%1]!\n\t"
101 "vst1.8 {d12-d15}, [%1]\n\t"
102 : "+r" (src), "+r" (dst)
104 : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
105 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
109 rte_mov256(uint8_t *dst, const uint8_t *src)
111 asm volatile ("pld [%0, #64]" : : "r" (src));
112 asm volatile ("pld [%0, #128]" : : "r" (src));
113 asm volatile ("pld [%0, #192]" : : "r" (src));
114 asm volatile ("pld [%0, #256]" : : "r" (src));
115 asm volatile ("pld [%0, #320]" : : "r" (src));
116 asm volatile ("pld [%0, #384]" : : "r" (src));
117 asm volatile ("pld [%0, #448]" : : "r" (src));
119 "vld1.8 {d0-d3}, [%0]!\n\t"
120 "vld1.8 {d4-d7}, [%0]!\n\t"
121 "vld1.8 {d8-d11}, [%0]!\n\t"
122 "vld1.8 {d12-d15}, [%0]!\n\t"
123 "vld1.8 {d16-d19}, [%0]!\n\t"
124 "vld1.8 {d20-d23}, [%0]!\n\t"
125 "vld1.8 {d24-d27}, [%0]!\n\t"
126 "vld1.8 {d28-d31}, [%0]\n\t"
127 "vst1.8 {d0-d3}, [%1]!\n\t"
128 "vst1.8 {d4-d7}, [%1]!\n\t"
129 "vst1.8 {d8-d11}, [%1]!\n\t"
130 "vst1.8 {d12-d15}, [%1]!\n\t"
131 "vst1.8 {d16-d19}, [%1]!\n\t"
132 "vst1.8 {d20-d23}, [%1]!\n\t"
133 "vst1.8 {d24-d27}, [%1]!\n\t"
134 "vst1.8 {d28-d31}, [%1]!\n\t"
135 : "+r" (src), "+r" (dst)
137 : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
138 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
139 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
140 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31");
143 #define rte_memcpy(dst, src, n) \
144 ({ (__builtin_constant_p(n)) ? \
145 memcpy((dst), (src), (n)) : \
146 rte_memcpy_func((dst), (src), (n)); })
149 rte_memcpy_func(void *dst, const void *src, size_t n)
153 /* We can't copy < 16 bytes using XMM registers so do it manually. */
156 *(uint8_t *)dst = *(const uint8_t *)src;
157 dst = (uint8_t *)dst + 1;
158 src = (const uint8_t *)src + 1;
161 *(uint16_t *)dst = *(const uint16_t *)src;
162 dst = (uint16_t *)dst + 1;
163 src = (const uint16_t *)src + 1;
166 *(uint32_t *)dst = *(const uint32_t *)src;
167 dst = (uint32_t *)dst + 1;
168 src = (const uint32_t *)src + 1;
171 /* ARMv7 can not handle unaligned access to long long
172 * (uint64_t). Therefore two uint32_t operations are
175 *(uint32_t *)dst = *(const uint32_t *)src;
176 dst = (uint32_t *)dst + 1;
177 src = (const uint32_t *)src + 1;
178 *(uint32_t *)dst = *(const uint32_t *)src;
183 /* Special fast cases for <= 128 bytes */
185 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
186 rte_mov16((uint8_t *)dst - 16 + n,
187 (const uint8_t *)src - 16 + n);
192 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
193 rte_mov32((uint8_t *)dst - 32 + n,
194 (const uint8_t *)src - 32 + n);
199 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
200 rte_mov64((uint8_t *)dst - 64 + n,
201 (const uint8_t *)src - 64 + n);
206 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
207 * copies was found to be faster than doing 128 and 32 byte copies as
210 for ( ; n >= 256; n -= 256) {
211 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
212 dst = (uint8_t *)dst + 256;
213 src = (const uint8_t *)src + 256;
217 * We split the remaining bytes (which will be less than 256) into
218 * 64byte (2^6) chunks.
219 * Using incrementing integers in the case labels of a switch statement
220 * enourages the compiler to use a jump table. To get incrementing
221 * integers, we shift the 2 relevant bits to the LSB position to first
222 * get decrementing integers, and then subtract.
224 switch (3 - (n >> 6)) {
226 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
228 dst = (uint8_t *)dst + 64;
229 src = (const uint8_t *)src + 64; /* fallthrough */
231 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
233 dst = (uint8_t *)dst + 64;
234 src = (const uint8_t *)src + 64; /* fallthrough */
236 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
238 dst = (uint8_t *)dst + 64;
239 src = (const uint8_t *)src + 64; /* fallthrough */
245 * We split the remaining bytes (which will be less than 64) into
246 * 16byte (2^4) chunks, using the same switch structure as above.
248 switch (3 - (n >> 4)) {
250 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
252 dst = (uint8_t *)dst + 16;
253 src = (const uint8_t *)src + 16; /* fallthrough */
255 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
257 dst = (uint8_t *)dst + 16;
258 src = (const uint8_t *)src + 16; /* fallthrough */
260 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
262 dst = (uint8_t *)dst + 16;
263 src = (const uint8_t *)src + 16; /* fallthrough */
268 /* Copy any remaining bytes, without going beyond end of buffers */
270 rte_mov16((uint8_t *)dst - 16 + n,
271 (const uint8_t *)src - 16 + n);
279 #endif /* _RTE_MEMCPY_ARM32_H_ */