4 * Copyright (C) IBM Corporation 2014.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of IBM Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _RTE_MEMCPY_PPC_64_H_
34 #define _RTE_MEMCPY_PPC_64_H_
38 /*To include altivec.h, GCC version must >= 4.8 */
45 #include "generic/rte_memcpy.h"
48 rte_mov16(uint8_t *dst, const uint8_t *src)
50 vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
54 rte_mov32(uint8_t *dst, const uint8_t *src)
56 vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
57 vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
61 rte_mov48(uint8_t *dst, const uint8_t *src)
63 vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
64 vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
65 vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
69 rte_mov64(uint8_t *dst, const uint8_t *src)
71 vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
72 vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
73 vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
74 vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
78 rte_mov128(uint8_t *dst, const uint8_t *src)
80 vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
81 vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
82 vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
83 vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
84 vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
85 vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
86 vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
87 vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
91 rte_mov256(uint8_t *dst, const uint8_t *src)
94 rte_mov128(dst + 128, src + 128);
97 #define rte_memcpy(dst, src, n) \
98 ({ (__builtin_constant_p(n)) ? \
99 memcpy((dst), (src), (n)) : \
100 rte_memcpy_func((dst), (src), (n)); })
103 rte_memcpy_func(void *dst, const void *src, size_t n)
107 /* We can't copy < 16 bytes using XMM registers so do it manually. */
110 *(uint8_t *)dst = *(const uint8_t *)src;
111 dst = (uint8_t *)dst + 1;
112 src = (const uint8_t *)src + 1;
115 *(uint16_t *)dst = *(const uint16_t *)src;
116 dst = (uint16_t *)dst + 1;
117 src = (const uint16_t *)src + 1;
120 *(uint32_t *)dst = *(const uint32_t *)src;
121 dst = (uint32_t *)dst + 1;
122 src = (const uint32_t *)src + 1;
125 *(uint64_t *)dst = *(const uint64_t *)src;
129 /* Special fast cases for <= 128 bytes */
131 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
132 rte_mov16((uint8_t *)dst - 16 + n,
133 (const uint8_t *)src - 16 + n);
138 rte_mov32((uint8_t *)dst, (const uint8_t *)src);
139 rte_mov32((uint8_t *)dst - 32 + n,
140 (const uint8_t *)src - 32 + n);
145 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
146 rte_mov64((uint8_t *)dst - 64 + n,
147 (const uint8_t *)src - 64 + n);
152 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
153 * copies was found to be faster than doing 128 and 32 byte copies as
156 for ( ; n >= 256; n -= 256) {
157 rte_mov256((uint8_t *)dst, (const uint8_t *)src);
158 dst = (uint8_t *)dst + 256;
159 src = (const uint8_t *)src + 256;
163 * We split the remaining bytes (which will be less than 256) into
164 * 64byte (2^6) chunks.
165 * Using incrementing integers in the case labels of a switch statement
166 * enourages the compiler to use a jump table. To get incrementing
167 * integers, we shift the 2 relevant bits to the LSB position to first
168 * get decrementing integers, and then subtract.
170 switch (3 - (n >> 6)) {
172 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
174 dst = (uint8_t *)dst + 64;
175 src = (const uint8_t *)src + 64; /* fallthrough */
177 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
179 dst = (uint8_t *)dst + 64;
180 src = (const uint8_t *)src + 64; /* fallthrough */
182 rte_mov64((uint8_t *)dst, (const uint8_t *)src);
184 dst = (uint8_t *)dst + 64;
185 src = (const uint8_t *)src + 64; /* fallthrough */
191 * We split the remaining bytes (which will be less than 64) into
192 * 16byte (2^4) chunks, using the same switch structure as above.
194 switch (3 - (n >> 4)) {
196 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
198 dst = (uint8_t *)dst + 16;
199 src = (const uint8_t *)src + 16; /* fallthrough */
201 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
203 dst = (uint8_t *)dst + 16;
204 src = (const uint8_t *)src + 16; /* fallthrough */
206 rte_mov16((uint8_t *)dst, (const uint8_t *)src);
208 dst = (uint8_t *)dst + 16;
209 src = (const uint8_t *)src + 16; /* fallthrough */
214 /* Copy any remaining bytes, without going beyond end of buffers */
216 rte_mov16((uint8_t *)dst - 16 + n,
217 (const uint8_t *)src - 16 + n);
225 #endif /* _RTE_MEMCPY_PPC_64_H_ */