1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 Cavium, Inc
3 * Copyright(c) 2022 StarFive
4 * Copyright(c) 2022 SiFive
5 * Copyright(c) 2022 Semihalf
8 #ifndef _TEST_XMMT_OPS_H_
9 #define _TEST_XMMT_OPS_H_
13 #if defined(RTE_ARCH_ARM)
15 /* vect_* abstraction implementation using NEON */
17 /* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
18 #define vect_loadu_sil128(p) vld1q_s32((const int32_t *)p)
20 /* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
21 static __rte_always_inline xmm_t
22 vect_set_epi32(int i3, int i2, int i1, int i0)
24 int32_t data[4] = {i0, i1, i2, i3};
26 return vld1q_s32(data);
29 #elif defined(RTE_ARCH_X86)
31 /* vect_* abstraction implementation using SSE */
33 /* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
34 #define vect_loadu_sil128(p) _mm_loadu_si128(p)
36 /* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
37 #define vect_set_epi32(i3, i2, i1, i0) _mm_set_epi32(i3, i2, i1, i0)
39 #elif defined(RTE_ARCH_PPC_64)
41 /* vect_* abstraction implementation using ALTIVEC */
43 /* loads the xmm_t value from address p(does not need to be 16-byte aligned)*/
44 #define vect_loadu_sil128(p) vec_ld(0, p)
46 /* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
47 static __rte_always_inline xmm_t
48 vect_set_epi32(int i3, int i2, int i1, int i0)
50 xmm_t data = (xmm_t){i0, i1, i2, i3};
55 #elif defined(RTE_ARCH_RISCV)
57 #define vect_loadu_sil128(p) vect_load_128(p)
59 /* sets the 4 signed 32-bit integer values and returns the xmm_t variable */
60 static __rte_always_inline xmm_t
61 vect_set_epi32(int i3, int i2, int i1, int i0)
63 xmm_t data = (xmm_t){i0, i1, i2, i3};
70 #endif /* _TEST_XMMT_OPS_H_ */