4 * Copyright(c) 2015 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _RTE_VECT_ARM_H_
34 #define _RTE_VECT_ARM_H_
37 #include "generic/rte_vect.h"
38 #include "rte_debug.h"
45 typedef int32x4_t xmm_t;
47 #define XMM_SIZE (sizeof(xmm_t))
48 #define XMM_MASK (XMM_SIZE - 1)
50 typedef union rte_xmm {
52 uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
53 uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
54 uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
55 uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
56 double pd[XMM_SIZE / sizeof(double)];
57 } __attribute__((aligned(16))) rte_xmm_t;
60 /* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
61 static __inline uint8x16_t
62 vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
65 rte_xmm_t rte_a, rte_b, rte_ret;
67 vst1q_u8(rte_a.u8, a);
68 vst1q_u8(rte_b.u8, b);
70 for (i = 0; i < 16; i++) {
73 rte_ret.u8[i] = rte_a.u8[pos];
78 return vld1q_u8(rte_ret.u8);
81 static inline uint16_t
82 vaddvq_u16(uint16x8_t a)
84 uint32x4_t m = vpaddlq_u16(a);
85 uint64x2_t n = vpaddlq_u32(m);
86 uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
88 return vget_lane_u32((uint32x2_t)o, 0);
93 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000)
94 static inline uint32x4_t
95 vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
96 uint32x4_t b, const int lane_b)
98 return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
102 #if defined(RTE_ARCH_ARM64)
103 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70000)
105 #if (GCC_VERSION < 40900)
106 typedef uint64_t poly64_t;
107 typedef uint64x2_t poly64x2_t;
108 typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
111 /* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
112 static inline uint64x2_t
113 vreinterpretq_u64_p128(poly128_t x)
115 return (uint64x2_t)x;
118 /* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
119 static inline poly64x2_t
120 vreinterpretq_p64_u64(uint64x2_t x)
122 return (poly64x2_t)x;
125 /* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
126 static inline poly64_t
127 vgetq_lane_p64(poly64x2_t x, const int lane)
129 RTE_ASSERT(lane >= 0 && lane <= 1);
131 poly64_t *p = (poly64_t *)&x;
139 * If (0 <= index <= 15), then call the ASIMD ext instruction on the
140 * 128 bit regs v0 and v1 with the appropriate index.
142 * Else returns a zero vector.
144 static inline uint8x16_t
145 vextract(uint8x16_t v0, uint8x16_t v1, const int index)
148 case 0: return vextq_u8(v0, v1, 0);
149 case 1: return vextq_u8(v0, v1, 1);
150 case 2: return vextq_u8(v0, v1, 2);
151 case 3: return vextq_u8(v0, v1, 3);
152 case 4: return vextq_u8(v0, v1, 4);
153 case 5: return vextq_u8(v0, v1, 5);
154 case 6: return vextq_u8(v0, v1, 6);
155 case 7: return vextq_u8(v0, v1, 7);
156 case 8: return vextq_u8(v0, v1, 8);
157 case 9: return vextq_u8(v0, v1, 9);
158 case 10: return vextq_u8(v0, v1, 10);
159 case 11: return vextq_u8(v0, v1, 11);
160 case 12: return vextq_u8(v0, v1, 12);
161 case 13: return vextq_u8(v0, v1, 13);
162 case 14: return vextq_u8(v0, v1, 14);
163 case 15: return vextq_u8(v0, v1, 15);
165 return vdupq_n_u8(0);
169 * Shifts right 128 bit register by specified number of bytes
171 * Value of shift parameter must be in range 0 - 16
173 static inline uint64x2_t
174 vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
176 return vreinterpretq_u64_u8(vextract(
177 vreinterpretq_u8_u64(reg),
183 * Shifts left 128 bit register by specified number of bytes
185 * Value of shift parameter must be in range 0 - 16
187 static inline uint64x2_t
188 vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
190 return vreinterpretq_u64_u8(vextract(
192 vreinterpretq_u8_u64(reg),