From b56f46c327034e403af20a543b13aad7b09d5653 Mon Sep 17 00:00:00 2001 From: Chao Zhu Date: Tue, 28 Oct 2014 13:50:50 +0100 Subject: [PATCH] eal: split byte order operations to architecture specific This patch splits the byte order operations from DPDK and push them to architecture specific arch directories, so that other processor architecture to support DPDK can be easily adopted. Signed-off-by: Chao Zhu Signed-off-by: David Marchand Acked-by: Thomas Monjalon --- lib/librte_eal/common/Makefile | 4 +- .../common/include/arch/i686/rte_byteorder.h | 129 +++++++++++++++++ .../include/arch/x86_64/rte_byteorder.h | 130 +++++++++++++++++ .../include/{ => generic}/rte_byteorder.h | 135 ++++-------------- 4 files changed, 288 insertions(+), 110 deletions(-) create mode 100644 lib/librte_eal/common/include/arch/i686/rte_byteorder.h create mode 100644 lib/librte_eal/common/include/arch/x86_64/rte_byteorder.h rename lib/librte_eal/common/include/{ => generic}/rte_byteorder.h (63%) diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile index 8ab363b9fe..62a39cd86b 100644 --- a/lib/librte_eal/common/Makefile +++ b/lib/librte_eal/common/Makefile @@ -31,7 +31,7 @@ include $(RTE_SDK)/mk/rte.vars.mk -INC := rte_branch_prediction.h rte_byteorder.h rte_common.h +INC := rte_branch_prediction.h rte_common.h INC += rte_cycles.h rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h INC += rte_log.h rte_memcpy.h rte_memory.h rte_memzone.h rte_pci.h INC += rte_pci_dev_ids.h rte_per_lcore.h rte_prefetch.h rte_random.h @@ -46,7 +46,7 @@ ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y) INC += rte_warnings.h endif -GENERIC_INC := rte_atomic.h +GENERIC_INC := rte_atomic.h rte_byteorder.h ARCH_INC := $(GENERIC_INC) SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC)) diff --git a/lib/librte_eal/common/include/arch/i686/rte_byteorder.h b/lib/librte_eal/common/include/arch/i686/rte_byteorder.h new file mode 100644 index 0000000000..6d5b23e851 --- /dev/null +++ b/lib/librte_eal/common/include/arch/i686/rte_byteorder.h @@ -0,0 +1,129 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_BYTEORDER_I686_H_ +#define _RTE_BYTEORDER_I686_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_byteorder.h" + +/* + * An architecture-optimized byte swap for a 16-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap16(). + */ +static inline uint16_t rte_arch_bswap16(uint16_t _x) +{ + register uint16_t x = _x; + asm volatile ("xchgb %b[x1],%h[x2]" + : [x1] "=Q" (x) + : [x2] "0" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 32-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap32(). + */ +static inline uint32_t rte_arch_bswap32(uint32_t _x) +{ + register uint32_t x = _x; + asm volatile ("bswap %[x]" + : [x] "+r" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 64-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap64(). + */ +/* Compat./Leg. mode */ +static inline uint64_t rte_arch_bswap64(uint64_t x) +{ + uint64_t ret = 0; + ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32); + ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL)); + return ret; +} + +#ifndef RTE_FORCE_INTRINSICS +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) + +#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap32(x) : \ + rte_arch_bswap32(x))) + +#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap64(x) : \ + rte_arch_bswap64(x))) +#else +/* + * __builtin_bswap16 is only available gcc 4.8 and upwards + */ +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) +#endif +#endif + +#define rte_cpu_to_le_16(x) (x) +#define rte_cpu_to_le_32(x) (x) +#define rte_cpu_to_le_64(x) (x) + +#define rte_cpu_to_be_16(x) rte_bswap16(x) +#define rte_cpu_to_be_32(x) rte_bswap32(x) +#define rte_cpu_to_be_64(x) rte_bswap64(x) + +#define rte_le_to_cpu_16(x) (x) +#define rte_le_to_cpu_32(x) (x) +#define rte_le_to_cpu_64(x) (x) + +#define rte_be_to_cpu_16(x) rte_bswap16(x) +#define rte_be_to_cpu_32(x) rte_bswap32(x) +#define rte_be_to_cpu_64(x) rte_bswap64(x) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_BYTEORDER_I686_H_ */ diff --git a/lib/librte_eal/common/include/arch/x86_64/rte_byteorder.h b/lib/librte_eal/common/include/arch/x86_64/rte_byteorder.h new file mode 100644 index 0000000000..825e5763ba --- /dev/null +++ b/lib/librte_eal/common/include/arch/x86_64/rte_byteorder.h @@ -0,0 +1,130 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_BYTEORDER_X86_64_H_ +#define _RTE_BYTEORDER_X86_64_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "generic/rte_byteorder.h" + +/* + * An architecture-optimized byte swap for a 16-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap16(). + */ +static inline uint16_t rte_arch_bswap16(uint16_t _x) +{ + register uint16_t x = _x; + asm volatile ("xchgb %b[x1],%h[x2]" + : [x1] "=Q" (x) + : [x2] "0" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 32-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap32(). + */ +static inline uint32_t rte_arch_bswap32(uint32_t _x) +{ + register uint32_t x = _x; + asm volatile ("bswap %[x]" + : [x] "+r" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 64-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap64(). + */ +/* 64-bit mode */ +static inline uint64_t rte_arch_bswap64(uint64_t _x) +{ + register uint64_t x = _x; + asm volatile ("bswap %[x]" + : [x] "+r" (x) + ); + return x; +} + +#ifndef RTE_FORCE_INTRINSICS +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) + +#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap32(x) : \ + rte_arch_bswap32(x))) + +#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap64(x) : \ + rte_arch_bswap64(x))) +#else +/* + * __builtin_bswap16 is only available gcc 4.8 and upwards + */ +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8) +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) +#endif +#endif + +#define rte_cpu_to_le_16(x) (x) +#define rte_cpu_to_le_32(x) (x) +#define rte_cpu_to_le_64(x) (x) + +#define rte_cpu_to_be_16(x) rte_bswap16(x) +#define rte_cpu_to_be_32(x) rte_bswap32(x) +#define rte_cpu_to_be_64(x) rte_bswap64(x) + +#define rte_le_to_cpu_16(x) (x) +#define rte_le_to_cpu_32(x) (x) +#define rte_le_to_cpu_64(x) (x) + +#define rte_be_to_cpu_16(x) rte_bswap16(x) +#define rte_be_to_cpu_32(x) rte_bswap32(x) +#define rte_be_to_cpu_64(x) rte_bswap64(x) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_BYTEORDER_X86_64_H_ */ diff --git a/lib/librte_eal/common/include/rte_byteorder.h b/lib/librte_eal/common/include/generic/rte_byteorder.h similarity index 63% rename from lib/librte_eal/common/include/rte_byteorder.h rename to lib/librte_eal/common/include/generic/rte_byteorder.h index 30fbd56394..9358136ac0 100644 --- a/lib/librte_eal/common/include/rte_byteorder.h +++ b/lib/librte_eal/common/include/generic/rte_byteorder.h @@ -43,10 +43,6 @@ * the implementation is architecture-specific. */ -#ifdef __cplusplus -extern "C" { -#endif - #include /* @@ -96,175 +92,98 @@ rte_constant_bswap64(uint64_t x) ((x & 0xff00000000000000ULL) >> 56); } -/* - * An architecture-optimized byte swap for a 16-bit value. - * - * Do not use this function directly. The preferred function is rte_bswap16(). - */ -static inline uint16_t rte_arch_bswap16(uint16_t _x) -{ - register uint16_t x = _x; - asm volatile ("xchgb %b[x1],%h[x2]" - : [x1] "=Q" (x) - : [x2] "0" (x) - ); - return x; -} - -/* - * An architecture-optimized byte swap for a 32-bit value. - * - * Do not use this function directly. The preferred function is rte_bswap32(). - */ -static inline uint32_t rte_arch_bswap32(uint32_t _x) -{ - register uint32_t x = _x; - asm volatile ("bswap %[x]" - : [x] "+r" (x) - ); - return x; -} - -/* - * An architecture-optimized byte swap for a 64-bit value. - * - * Do not use this function directly. The preferred function is rte_bswap64(). - */ -#ifdef RTE_ARCH_X86_64 -/* 64-bit mode */ -static inline uint64_t rte_arch_bswap64(uint64_t _x) -{ - register uint64_t x = _x; - asm volatile ("bswap %[x]" - : [x] "+r" (x) - ); - return x; -} -#else /* ! RTE_ARCH_X86_64 */ -/* Compat./Leg. mode */ -static inline uint64_t rte_arch_bswap64(uint64_t x) -{ - uint64_t ret = 0; - ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32); - ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL)); - return ret; -} -#endif /* RTE_ARCH_X86_64 */ +#ifdef __DOXYGEN__ -#ifndef RTE_FORCE_INTRINSICS /** * Swap bytes in a 16-bit value. */ -#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap16(x) : \ - rte_arch_bswap16(x))) +static uint16_t rte_bswap16(uint16_t _x); /** * Swap bytes in a 32-bit value. */ -#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap32(x) : \ - rte_arch_bswap32(x))) +static uint32_t rte_bswap32(uint32_t x); /** * Swap bytes in a 64-bit value. */ -#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap64(x) : \ - rte_arch_bswap64(x))) - -#else - -/** - * Swap bytes in a 16-bit value. - * __builtin_bswap16 is only available gcc 4.8 and upwards - */ -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) -#define rte_bswap16(x) __builtin_bswap16(x) -#else -#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ - rte_constant_bswap16(x) : \ - rte_arch_bswap16(x))) -#endif - -/** - * Swap bytes in a 32-bit value. - */ -#define rte_bswap32(x) __builtin_bswap32(x) - -/** - * Swap bytes in a 64-bit value. - */ -#define rte_bswap64(x) __builtin_bswap64(x) - -#endif +static uint64_t rte_bswap64(uint64_t x); /** * Convert a 16-bit value from CPU order to little endian. */ -#define rte_cpu_to_le_16(x) (x) +static uint16_t rte_cpu_to_le_16(uint16_t x); /** * Convert a 32-bit value from CPU order to little endian. */ -#define rte_cpu_to_le_32(x) (x) +static uint32_t rte_cpu_to_le_32(uint32_t x); /** * Convert a 64-bit value from CPU order to little endian. */ -#define rte_cpu_to_le_64(x) (x) +static uint64_t rte_cpu_to_le_64(uint64_t x); /** * Convert a 16-bit value from CPU order to big endian. */ -#define rte_cpu_to_be_16(x) rte_bswap16(x) +static uint16_t rte_cpu_to_be_16(uint16_t x); /** * Convert a 32-bit value from CPU order to big endian. */ -#define rte_cpu_to_be_32(x) rte_bswap32(x) +static uint32_t rte_cpu_to_be_32(uint32_t x); /** * Convert a 64-bit value from CPU order to big endian. */ -#define rte_cpu_to_be_64(x) rte_bswap64(x) +static uint64_t rte_cpu_to_be_64(uint64_t x); /** * Convert a 16-bit value from little endian to CPU order. */ -#define rte_le_to_cpu_16(x) (x) +static uint16_t rte_le_to_cpu_16(uint16_t x); /** * Convert a 32-bit value from little endian to CPU order. */ -#define rte_le_to_cpu_32(x) (x) +static uint32_t rte_le_to_cpu_32(uint32_t x); /** * Convert a 64-bit value from little endian to CPU order. */ -#define rte_le_to_cpu_64(x) (x) +static uint64_t rte_le_to_cpu_64(uint64_t x); /** * Convert a 16-bit value from big endian to CPU order. */ -#define rte_be_to_cpu_16(x) rte_bswap16(x) +static uint16_t rte_be_to_cpu_16(uint16_t x); /** * Convert a 32-bit value from big endian to CPU order. */ -#define rte_be_to_cpu_32(x) rte_bswap32(x) +static uint32_t rte_be_to_cpu_32(uint32_t x); /** * Convert a 64-bit value from big endian to CPU order. */ -#define rte_be_to_cpu_64(x) rte_bswap64(x) +static uint64_t rte_be_to_cpu_64(uint64_t x); + +#endif /* __DOXYGEN__ */ + +#ifdef RTE_FORCE_INTRINSICS +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) +#define rte_bswap16(x) __builtin_bswap16(x) +#endif + +#define rte_bswap32(x) __builtin_bswap32(x) + +#define rte_bswap64(x) __builtin_bswap64(x) -#ifdef __cplusplus -} #endif #endif /* _RTE_BYTEORDER_H_ */ -- 2.20.1