1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 Cavium, Inc
5 #ifndef _RTE_VECT_ARM_H_
6 #define _RTE_VECT_ARM_H_
9 #include "generic/rte_vect.h"
10 #include "rte_debug.h"
12 #ifdef __ARM_FEATURE_SVE
20 #define RTE_VECT_DEFAULT_SIMD_BITWIDTH RTE_VECT_SIMD_MAX
22 typedef int32x4_t xmm_t;
24 #define XMM_SIZE (sizeof(xmm_t))
25 #define XMM_MASK (XMM_SIZE - 1)
27 typedef union rte_xmm {
29 uint8_t u8[XMM_SIZE / sizeof(uint8_t)];
30 uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
31 uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
32 uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
33 double pd[XMM_SIZE / sizeof(double)];
34 } __rte_aligned(16) rte_xmm_t;
36 #if defined(RTE_ARCH_ARM) && defined(RTE_ARCH_32)
37 /* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
38 static __inline uint8x16_t
39 vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
42 rte_xmm_t rte_a, rte_b, rte_ret;
44 vst1q_u8(rte_a.u8, a);
45 vst1q_u8(rte_b.u8, b);
47 for (i = 0; i < 16; i++) {
50 rte_ret.u8[i] = rte_a.u8[pos];
55 return vld1q_u8(rte_ret.u8);
58 static inline uint16_t
59 vaddvq_u16(uint16x8_t a)
61 uint32x4_t m = vpaddlq_u16(a);
62 uint64x2_t n = vpaddlq_u32(m);
63 uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
65 return vget_lane_u32((uint32x2_t)o, 0);
70 #if (defined(RTE_ARCH_ARM) && defined(RTE_ARCH_32)) || \
71 (defined(RTE_ARCH_ARM64) && RTE_CC_IS_GNU && (GCC_VERSION < 70000))
72 /* NEON intrinsic vcopyq_laneq_u32() is not supported in ARMv7-A(AArch32)
73 * On AArch64, this intrinsic is supported since GCC version 7.
75 static inline uint32x4_t
76 vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
77 uint32x4_t b, const int lane_b)
79 return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
83 #if defined(RTE_ARCH_ARM64)
84 #if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
86 #if (GCC_VERSION < 40900)
87 typedef uint64_t poly64_t;
88 typedef uint64x2_t poly64x2_t;
89 typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
91 static inline uint32x4_t
92 vceqzq_u32(uint32x4_t a)
98 /* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
99 static inline uint64x2_t
100 vreinterpretq_u64_p128(poly128_t x)
102 return (uint64x2_t)x;
105 /* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
106 static inline poly64x2_t
107 vreinterpretq_p64_u64(uint64x2_t x)
109 return (poly64x2_t)x;
112 /* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
113 static inline poly64_t
114 vgetq_lane_p64(poly64x2_t x, const int lane)
116 RTE_ASSERT(lane >= 0 && lane <= 1);
118 poly64_t *p = (poly64_t *)&x;
126 * If (0 <= index <= 15), then call the ASIMD ext instruction on the
127 * 128 bit regs v0 and v1 with the appropriate index.
129 * Else returns a zero vector.
131 static inline uint8x16_t
132 vextract(uint8x16_t v0, uint8x16_t v1, const int index)
135 case 0: return vextq_u8(v0, v1, 0);
136 case 1: return vextq_u8(v0, v1, 1);
137 case 2: return vextq_u8(v0, v1, 2);
138 case 3: return vextq_u8(v0, v1, 3);
139 case 4: return vextq_u8(v0, v1, 4);
140 case 5: return vextq_u8(v0, v1, 5);
141 case 6: return vextq_u8(v0, v1, 6);
142 case 7: return vextq_u8(v0, v1, 7);
143 case 8: return vextq_u8(v0, v1, 8);
144 case 9: return vextq_u8(v0, v1, 9);
145 case 10: return vextq_u8(v0, v1, 10);
146 case 11: return vextq_u8(v0, v1, 11);
147 case 12: return vextq_u8(v0, v1, 12);
148 case 13: return vextq_u8(v0, v1, 13);
149 case 14: return vextq_u8(v0, v1, 14);
150 case 15: return vextq_u8(v0, v1, 15);
152 return vdupq_n_u8(0);
156 * Shifts right 128 bit register by specified number of bytes
158 * Value of shift parameter must be in range 0 - 16
160 static inline uint64x2_t
161 vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
163 return vreinterpretq_u64_u8(vextract(
164 vreinterpretq_u8_u64(reg),
170 * Shifts left 128 bit register by specified number of bytes
172 * Value of shift parameter must be in range 0 - 16
174 static inline uint64x2_t
175 vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
177 return vreinterpretq_u64_u8(vextract(
179 vreinterpretq_u8_u64(reg),