eal: move arch-specific header files
authorThomas Monjalon <thomas@monjalon.net>
Fri, 27 Mar 2020 01:15:36 +0000 (02:15 +0100)
committerDavid Marchand <david.marchand@redhat.com>
Tue, 31 Mar 2020 11:08:55 +0000 (13:08 +0200)
The arch-specific directories arm, ppc and x86 in common/include/arch/
are moved as include/ sub-directories of respective arch directories:
- arm/include/
- ppc/include/
- x86/include/

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: David Christensen <drc@linux.vnet.ibm.com>
Acked-by: David Marchand <david.marchand@redhat.com>
131 files changed:
MAINTAINERS
config/arm/meson.build
config/common_armv8a_linux
devtools/build-tags.sh
lib/librte_eal/arm/include/meson.build [new file with mode: 0644]
lib/librte_eal/arm/include/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_atomic_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_atomic_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_byteorder.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cpuflags.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cpuflags_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cpuflags_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cycles.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cycles_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_cycles_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_io.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_io_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_mcslock.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_memcpy.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_memcpy_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_memcpy_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_pause.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_pause_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_pause_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_prefetch.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_prefetch_32.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_prefetch_64.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_rwlock.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_spinlock.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_ticketlock.h [new file with mode: 0644]
lib/librte_eal/arm/include/rte_vect.h [new file with mode: 0644]
lib/librte_eal/arm/meson.build
lib/librte_eal/common/Makefile
lib/librte_eal/common/include/arch/arm/meson.build [deleted file]
lib/librte_eal/common/include/arch/arm/rte_atomic.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_atomic_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_atomic_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_byteorder.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cpuflags.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cycles.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cycles_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_cycles_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_io.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_io_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_mcslock.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_memcpy.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_pause.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_pause_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_pause_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_prefetch.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_rwlock.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_spinlock.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_ticketlock.h [deleted file]
lib/librte_eal/common/include/arch/arm/rte_vect.h [deleted file]
lib/librte_eal/common/include/arch/ppc [deleted symlink]
lib/librte_eal/common/include/arch/ppc_64/meson.build [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_byteorder.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_cycles.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_io.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_mcslock.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_pause.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_prefetch.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_ticketlock.h [deleted file]
lib/librte_eal/common/include/arch/ppc_64/rte_vect.h [deleted file]
lib/librte_eal/common/include/arch/x86/meson.build [deleted file]
lib/librte_eal/common/include/arch/x86/rte_atomic.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_atomic_32.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_atomic_64.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_byteorder.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_cpuflags.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_cycles.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_io.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_mcslock.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_memcpy.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_pause.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_prefetch.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_rtm.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_rwlock.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_spinlock.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_ticketlock.h [deleted file]
lib/librte_eal/common/include/arch/x86/rte_vect.h [deleted file]
lib/librte_eal/common/meson.build
lib/librte_eal/meson.build
lib/librte_eal/ppc/include/meson.build [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_byteorder.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_cpuflags.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_cycles.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_io.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_mcslock.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_memcpy.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_pause.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_prefetch.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_rwlock.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_spinlock.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_ticketlock.h [new file with mode: 0644]
lib/librte_eal/ppc/include/rte_vect.h [new file with mode: 0644]
lib/librte_eal/ppc/meson.build
lib/librte_eal/x86/include/meson.build [new file with mode: 0644]
lib/librte_eal/x86/include/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_atomic_32.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_atomic_64.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_byteorder.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_byteorder_32.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_byteorder_64.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_cpuflags.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_cycles.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_io.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_mcslock.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_memcpy.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_pause.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_prefetch.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_rtm.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_rwlock.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_spinlock.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_ticketlock.h [new file with mode: 0644]
lib/librte_eal/x86/include/rte_vect.h [new file with mode: 0644]
lib/librte_eal/x86/meson.build

index b373efc..840be6f 100644 (file)
@@ -168,9 +168,8 @@ Environment Abstraction Layer
 T: git://dpdk.org/dpdk
 
 EAL API and common code
-F: lib/librte_eal/common/*
-F: lib/librte_eal/common/include/*
-F: lib/librte_eal/common/include/generic/
+F: lib/librte_eal/common/
+F: lib/librte_eal/common/include/
 F: lib/librte_eal/rte_eal_version.map
 F: doc/guides/prog_guide/env_abstraction_layer.rst
 F: app/test/test_alarm.c
@@ -262,12 +261,11 @@ ARM v7
 M: Jan Viktorin <viktorin@rehivetech.com>
 M: Gavin Hu <gavin.hu@arm.com>
 F: lib/librte_eal/arm/
-F: lib/librte_eal/common/include/arch/arm/
 
 ARM v8
 M: Jerin Jacob <jerinj@marvell.com>
 M: Gavin Hu <gavin.hu@arm.com>
-F: lib/librte_eal/common/include/arch/arm/*_64.h
+F: lib/librte_eal/arm/include/*_64.h
 F: lib/librte_net/net_crc_neon.h
 F: lib/librte_acl/acl_run_neon.*
 F: lib/librte_bpf/bpf_jit_arm64.c
@@ -282,7 +280,6 @@ F: drivers/net/virtio/virtio_rxtx_simple_neon.c
 IBM POWER (alpha)
 M: David Christensen <drc@linux.vnet.ibm.com>
 F: lib/librte_eal/ppc/
-F: lib/librte_eal/common/include/arch/ppc_64/
 F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
 F: examples/l3fwd/*altivec.h
 
@@ -290,7 +287,6 @@ Intel x86
 M: Bruce Richardson <bruce.richardson@intel.com>
 M: Konstantin Ananyev <konstantin.ananyev@intel.com>
 F: lib/librte_eal/x86/
-F: lib/librte_eal/common/include/arch/x86/
 
 Linux EAL (with overlaps)
 F: lib/librte_eal/linux/Makefile
index 7e22358..6e75e6d 100644 (file)
@@ -11,7 +11,7 @@ arm_force_default_march = (machine == 'default')
 flags_common_default = [
        # Accelarate rte_memcpy. Be sure to run unit test (memcpy_perf_autotest)
        # to determine the best threshold in code. Refer to notes in source file
-       # (lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h) for more info.
+       # (lib/librte_eal/arm/include/rte_memcpy_64.h) for more info.
        ['RTE_ARCH_ARM64_MEMCPY', false],
        #       ['RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD', 2048],
        #       ['RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD', 512],
index 020f8f0..e942657 100644 (file)
@@ -21,7 +21,7 @@ CONFIG_RTE_USE_C11_MEM_MODEL=y
 
 # Accelarate rte_memcpy. Be sure to run unit test (memcpy_perf_autotest)
 # to determine the best threshold in code. Refer to notes in source file
-# (lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h) for more info.
+# (lib/librte_eal/arm/include/rte_memcpy_64.h) for more info.
 CONFIG_RTE_ARCH_ARM64_MEMCPY=n
 #CONFIG_RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD=2048
 #CONFIG_RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD=512
index 8221f90..276fff6 100755 (executable)
@@ -79,28 +79,26 @@ bsd_sources()
 
 arm_common()
 {
-       find_sources "lib/librte_eal/arm" '*.[chS]'
        find_sources "$source_dirs" '*neon*.[chS]'
 }
 
 arm_32_sources()
 {
        arm_common
-       find_sources "lib/librte_eal/common/include/arch/arm" '*.[chS]' \
+       find_sources "lib/librte_eal/arm" '*.[chS]' \
                                        "$skip_64b_files"
 }
 
 arm_64_sources()
 {
        arm_common
-       find_sources "lib/librte_eal/common/include/arch/arm" '*.[chS]' \
+       find_sources "lib/librte_eal/arm" '*.[chS]' \
                                         "$skip_32b_files"
        find_sources "$source_dirs" '*arm64.[chS]'
 }
 
 x86_common()
 {
-       find_sources "lib/librte_eal/x86" '*.[chS]'
        find_sources "examples/performance-thread/common/arch/x86" '*.[chS]'
        find_sources "$source_dirs" '*_sse*.[chS]'
        find_sources "$source_dirs" '*_avx*.[chS]'
@@ -110,21 +108,20 @@ x86_common()
 x86_32_sources()
 {
        x86_common
-       find_sources "lib/librte_eal/common/include/arch/x86" '*.[chS]' \
+       find_sources "lib/librte_eal/x86" '*.[chS]' \
                                        "$skip_64b_files"
 }
 
 x86_64_sources()
 {
        x86_common
-       find_sources "lib/librte_eal/common/include/arch/x86" '*.[chS]' \
+       find_sources "lib/librte_eal/x86" '*.[chS]' \
                                        "$skip_32b_files"
 }
 
 ppc_64_sources()
 {
        find_sources "lib/librte_eal/ppc" '*.[chS]'
-       find_sources "lib/librte_eal/common/include/arch/ppc_64" '*.[chS]'
        find_sources "$source_dirs" '*altivec*.[chS]'
 }
 
diff --git a/lib/librte_eal/arm/include/meson.build b/lib/librte_eal/arm/include/meson.build
new file mode 100644 (file)
index 0000000..1721743
--- /dev/null
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation.
+
+includes += include_directories('.')
+
+arch_headers = files(
+       'rte_atomic_32.h',
+       'rte_atomic_64.h',
+       'rte_atomic.h',
+       'rte_byteorder.h',
+       'rte_cpuflags_32.h',
+       'rte_cpuflags_64.h',
+       'rte_cpuflags.h',
+       'rte_cycles_32.h',
+       'rte_cycles_64.h',
+       'rte_cycles.h',
+       'rte_io_64.h',
+       'rte_io.h',
+       'rte_memcpy_32.h',
+       'rte_memcpy_64.h',
+       'rte_memcpy.h',
+       'rte_pause_32.h',
+       'rte_pause_64.h',
+       'rte_pause.h',
+       'rte_prefetch_32.h',
+       'rte_prefetch_64.h',
+       'rte_prefetch.h',
+       'rte_rwlock.h',
+       'rte_spinlock.h',
+       'rte_vect.h',
+)
+install_headers(arch_headers, subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/arm/include/rte_atomic.h b/lib/librte_eal/arm/include/rte_atomic.h
new file mode 100644 (file)
index 0000000..40e14e5
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_ARM_H_
+#define _RTE_ATOMIC_ARM_H_
+
+#ifdef RTE_ARCH_64
+#include <rte_atomic_64.h>
+#else
+#include <rte_atomic_32.h>
+#endif
+
+#endif /* _RTE_ATOMIC_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_atomic_32.h b/lib/librte_eal/arm/include/rte_atomic_32.h
new file mode 100644 (file)
index 0000000..7dc0d06
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_ARM32_H_
+#define _RTE_ATOMIC_ARM32_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_atomic.h"
+
+#define        rte_mb()  __sync_synchronize()
+
+#define        rte_wmb() do { asm volatile ("dmb st" : : : "memory"); } while (0)
+
+#define        rte_rmb() __sync_synchronize()
+
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_wmb()
+
+#define rte_smp_rmb() rte_rmb()
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
+
+#define rte_cio_wmb() rte_wmb()
+
+#define rte_cio_rmb() rte_rmb()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_atomic_64.h b/lib/librte_eal/arm/include/rte_atomic_64.h
new file mode 100644 (file)
index 0000000..7b7099c
--- /dev/null
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_ATOMIC_ARM64_H_
+#define _RTE_ATOMIC_ARM64_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_atomic.h"
+#include <rte_branch_prediction.h>
+#include <rte_compat.h>
+#include <rte_debug.h>
+
+#define rte_mb() asm volatile("dsb sy" : : : "memory")
+
+#define rte_wmb() asm volatile("dsb st" : : : "memory")
+
+#define rte_rmb() asm volatile("dsb ld" : : : "memory")
+
+#define rte_smp_mb() asm volatile("dmb ish" : : : "memory")
+
+#define rte_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+
+#define rte_smp_rmb() asm volatile("dmb ishld" : : : "memory")
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
+
+#define rte_cio_wmb() asm volatile("dmb oshst" : : : "memory")
+
+#define rte_cio_rmb() asm volatile("dmb oshld" : : : "memory")
+
+/*------------------------ 128 bit atomic operations -------------------------*/
+
+#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
+#define __ATOMIC128_CAS_OP(cas_op_name, op_string)                          \
+static __rte_noinline rte_int128_t                                          \
+cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated)      \
+{                                                                           \
+       /* caspX instructions register pair must start from even-numbered
+        * register at operand 1.
+        * So, specify registers for local variables here.
+        */                                                                 \
+       register uint64_t x0 __asm("x0") = (uint64_t)old.val[0];            \
+       register uint64_t x1 __asm("x1") = (uint64_t)old.val[1];            \
+       register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0];        \
+       register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1];        \
+       asm volatile(                                                       \
+               op_string " %[old0], %[old1], %[upd0], %[upd1], [%[dst]]"   \
+               : [old0] "+r" (x0),                                         \
+               [old1] "+r" (x1)                                            \
+               : [upd0] "r" (x2),                                          \
+               [upd1] "r" (x3),                                            \
+               [dst] "r" (dst)                                             \
+               : "memory");                                                \
+       old.val[0] = x0;                                                    \
+       old.val[1] = x1;                                                    \
+       return old;                                                         \
+}
+
+__ATOMIC128_CAS_OP(__cas_128_relaxed, "casp")
+__ATOMIC128_CAS_OP(__cas_128_acquire, "caspa")
+__ATOMIC128_CAS_OP(__cas_128_release, "caspl")
+__ATOMIC128_CAS_OP(__cas_128_acq_rel, "caspal")
+
+#undef __ATOMIC128_CAS_OP
+
+#endif
+
+__rte_experimental
+static inline int
+rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp,
+               const rte_int128_t *src, unsigned int weak, int success,
+               int failure)
+{
+       /* Always do strong CAS */
+       RTE_SET_USED(weak);
+       /* Ignore memory ordering for failure, memory order for
+        * success must be stronger or equal
+        */
+       RTE_SET_USED(failure);
+       /* Find invalid memory order */
+       RTE_ASSERT(success == __ATOMIC_RELAXED ||
+               success == __ATOMIC_ACQUIRE ||
+               success == __ATOMIC_RELEASE ||
+               success == __ATOMIC_ACQ_REL ||
+               success == __ATOMIC_SEQ_CST);
+
+       rte_int128_t expected = *exp;
+       rte_int128_t desired = *src;
+       rte_int128_t old;
+
+#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
+       if (success == __ATOMIC_RELAXED)
+               old = __cas_128_relaxed(dst, expected, desired);
+       else if (success == __ATOMIC_ACQUIRE)
+               old = __cas_128_acquire(dst, expected, desired);
+       else if (success == __ATOMIC_RELEASE)
+               old = __cas_128_release(dst, expected, desired);
+       else
+               old = __cas_128_acq_rel(dst, expected, desired);
+#else
+#define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)
+#define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \
+               (mo) == __ATOMIC_SEQ_CST)
+
+       int ldx_mo = __HAS_ACQ(success) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
+       int stx_mo = __HAS_RLS(success) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED;
+
+#undef __HAS_ACQ
+#undef __HAS_RLS
+
+       uint32_t ret = 1;
+
+       /* ldx128 can not guarantee atomic,
+        * Must write back src or old to verify atomicity of ldx128;
+        */
+       do {
+
+#define __LOAD_128(op_string, src, dst) { \
+       asm volatile(                     \
+               op_string " %0, %1, %2"   \
+               : "=&r" (dst.val[0]),     \
+                 "=&r" (dst.val[1])      \
+               : "Q" (src->val[0])       \
+               : "memory"); }
+
+               if (ldx_mo == __ATOMIC_RELAXED)
+                       __LOAD_128("ldxp", dst, old)
+               else
+                       __LOAD_128("ldaxp", dst, old)
+
+#undef __LOAD_128
+
+#define __STORE_128(op_string, dst, src, ret) { \
+       asm volatile(                           \
+               op_string " %w0, %1, %2, %3"    \
+               : "=&r" (ret)                   \
+               : "r" (src.val[0]),             \
+                 "r" (src.val[1]),             \
+                 "Q" (dst->val[0])             \
+               : "memory"); }
+
+               if (likely(old.int128 == expected.int128)) {
+                       if (stx_mo == __ATOMIC_RELAXED)
+                               __STORE_128("stxp", dst, desired, ret)
+                       else
+                               __STORE_128("stlxp", dst, desired, ret)
+               } else {
+                       /* In the failure case (since 'weak' is ignored and only
+                        * weak == 0 is implemented), expected should contain
+                        * the atomically read value of dst. This means, 'old'
+                        * needs to be stored back to ensure it was read
+                        * atomically.
+                        */
+                       if (stx_mo == __ATOMIC_RELAXED)
+                               __STORE_128("stxp", dst, old, ret)
+                       else
+                               __STORE_128("stlxp", dst, old, ret)
+               }
+
+#undef __STORE_128
+
+       } while (unlikely(ret));
+#endif
+
+       /* Unconditionally updating expected removes an 'if' statement.
+        * expected should already be in register if not in the cache.
+        */
+       *exp = old;
+
+       return (old.int128 == expected.int128);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_ARM64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_byteorder.h b/lib/librte_eal/arm/include/rte_byteorder.h
new file mode 100644 (file)
index 0000000..9ec4a97
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_BYTEORDER_ARM_H_
+#define _RTE_BYTEORDER_ARM_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_common.h>
+#include "generic/rte_byteorder.h"
+
+/* fix missing __builtin_bswap16 for gcc older then 4.8 */
+#if !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+       uint16_t x = _x;
+
+       asm volatile ("rev16 %w0,%w1"
+                     : "=r" (x)
+                     : "r" (x)
+                     );
+       return x;
+}
+
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
+                                  rte_constant_bswap16(x) : \
+                                  rte_arch_bswap16(x)))
+#endif
+
+/* ARM architecture is bi-endian (both big and little). */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define rte_cpu_to_le_16(x) (x)
+#define rte_cpu_to_le_32(x) (x)
+#define rte_cpu_to_le_64(x) (x)
+
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+#define rte_le_to_cpu_64(x) (x)
+
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define rte_cpu_to_le_16(x) rte_bswap16(x)
+#define rte_cpu_to_le_32(x) rte_bswap32(x)
+#define rte_cpu_to_le_64(x) rte_bswap64(x)
+
+#define rte_cpu_to_be_16(x) (x)
+#define rte_cpu_to_be_32(x) (x)
+#define rte_cpu_to_be_64(x) (x)
+
+#define rte_le_to_cpu_16(x) rte_bswap16(x)
+#define rte_le_to_cpu_32(x) rte_bswap32(x)
+#define rte_le_to_cpu_64(x) rte_bswap64(x)
+
+#define rte_be_to_cpu_16(x) (x)
+#define rte_be_to_cpu_32(x) (x)
+#define rte_be_to_cpu_64(x) (x)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cpuflags.h b/lib/librte_eal/arm/include/rte_cpuflags.h
new file mode 100644 (file)
index 0000000..022e7da
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_CPUFLAGS_ARM_H_
+#define _RTE_CPUFLAGS_ARM_H_
+
+#ifdef RTE_ARCH_64
+#include <rte_cpuflags_64.h>
+#else
+#include <rte_cpuflags_32.h>
+#endif
+
+#endif /* _RTE_CPUFLAGS_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cpuflags_32.h b/lib/librte_eal/arm/include/rte_cpuflags_32.h
new file mode 100644 (file)
index 0000000..b5347be
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_CPUFLAGS_ARM32_H_
+#define _RTE_CPUFLAGS_ARM32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t {
+       RTE_CPUFLAG_SWP = 0,
+       RTE_CPUFLAG_HALF,
+       RTE_CPUFLAG_THUMB,
+       RTE_CPUFLAG_A26BIT,
+       RTE_CPUFLAG_FAST_MULT,
+       RTE_CPUFLAG_FPA,
+       RTE_CPUFLAG_VFP,
+       RTE_CPUFLAG_EDSP,
+       RTE_CPUFLAG_JAVA,
+       RTE_CPUFLAG_IWMMXT,
+       RTE_CPUFLAG_CRUNCH,
+       RTE_CPUFLAG_THUMBEE,
+       RTE_CPUFLAG_NEON,
+       RTE_CPUFLAG_VFPv3,
+       RTE_CPUFLAG_VFPv3D16,
+       RTE_CPUFLAG_TLS,
+       RTE_CPUFLAG_VFPv4,
+       RTE_CPUFLAG_IDIVA,
+       RTE_CPUFLAG_IDIVT,
+       RTE_CPUFLAG_VFPD32,
+       RTE_CPUFLAG_LPAE,
+       RTE_CPUFLAG_EVTSTRM,
+       RTE_CPUFLAG_AES,
+       RTE_CPUFLAG_PMULL,
+       RTE_CPUFLAG_SHA1,
+       RTE_CPUFLAG_SHA2,
+       RTE_CPUFLAG_CRC32,
+       RTE_CPUFLAG_V7L,
+       /* The last item */
+       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
+};
+
+#include "generic/rte_cpuflags.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cpuflags_64.h b/lib/librte_eal/arm/include/rte_cpuflags_64.h
new file mode 100644 (file)
index 0000000..95cc014
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ */
+
+#ifndef _RTE_CPUFLAGS_ARM64_H_
+#define _RTE_CPUFLAGS_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t {
+       RTE_CPUFLAG_FP = 0,
+       RTE_CPUFLAG_NEON,
+       RTE_CPUFLAG_EVTSTRM,
+       RTE_CPUFLAG_AES,
+       RTE_CPUFLAG_PMULL,
+       RTE_CPUFLAG_SHA1,
+       RTE_CPUFLAG_SHA2,
+       RTE_CPUFLAG_CRC32,
+       RTE_CPUFLAG_ATOMICS,
+       RTE_CPUFLAG_AARCH64,
+       /* The last item */
+       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
+};
+
+#include "generic/rte_cpuflags.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_ARM64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cycles.h b/lib/librte_eal/arm/include/rte_cycles.h
new file mode 100644 (file)
index 0000000..e8ffa89
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_CYCLES_ARM_H_
+#define _RTE_CYCLES_ARM_H_
+
+#ifdef RTE_ARCH_64
+#include <rte_cycles_64.h>
+#else
+#include <rte_cycles_32.h>
+#endif
+
+#endif /* _RTE_CYCLES_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cycles_32.h b/lib/librte_eal/arm/include/rte_cycles_32.h
new file mode 100644 (file)
index 0000000..859b097
--- /dev/null
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_CYCLES_ARM32_H_
+#define _RTE_CYCLES_ARM32_H_
+
+/* ARM v7 does not have suitable source of clock signals. The only clock counter
+   available in the core is 32 bit wide. Therefore it is unsuitable as the
+   counter overlaps every few seconds and probably is not accessible by
+   userspace programs. Therefore we use clock_gettime(CLOCK_MONOTONIC_RAW) to
+   simulate counter running at 1GHz.
+*/
+
+#include <time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+/**
+ * Read the time base register.
+ *
+ * @return
+ *   The time base for this lcore.
+ */
+#ifndef RTE_ARM_EAL_RDTSC_USE_PMU
+
+/**
+ * This call is easily portable to any architecture, however,
+ * it may require a system call and inprecise for some tasks.
+ */
+static inline uint64_t
+__rte_rdtsc_syscall(void)
+{
+       struct timespec val;
+       uint64_t v;
+
+       while (clock_gettime(CLOCK_MONOTONIC_RAW, &val) != 0)
+               /* no body */;
+
+       v  = (uint64_t) val.tv_sec * 1000000000LL;
+       v += (uint64_t) val.tv_nsec;
+       return v;
+}
+#define rte_rdtsc __rte_rdtsc_syscall
+
+#else
+
+/**
+ * This function requires to configure the PMCCNTR and enable
+ * userspace access to it:
+ *
+ *      asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r"(1));
+ *      asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(29));
+ *      asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(0x8000000f));
+ *
+ * which is possible only from the priviledged mode (kernel space).
+ */
+static inline uint64_t
+__rte_rdtsc_pmccntr(void)
+{
+       unsigned tsc;
+       uint64_t final_tsc;
+
+       /* Read PMCCNTR */
+       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(tsc));
+       /* 1 tick = 64 clocks */
+       final_tsc = ((uint64_t)tsc) << 6;
+
+       return (uint64_t)final_tsc;
+}
+#define rte_rdtsc __rte_rdtsc_pmccntr
+
+#endif /* RTE_ARM_EAL_RDTSC_USE_PMU */
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+       rte_mb();
+       return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_cycles_64.h b/lib/librte_eal/arm/include/rte_cycles_64.h
new file mode 100644 (file)
index 0000000..da557b6
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ */
+
+#ifndef _RTE_CYCLES_ARM64_H_
+#define _RTE_CYCLES_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+/**
+ * Read the time base register.
+ *
+ * @return
+ *   The time base for this lcore.
+ */
+#ifndef RTE_ARM_EAL_RDTSC_USE_PMU
+/**
+ * This call is portable to any ARMv8 architecture, however, typically
+ * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks.
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+       uint64_t tsc;
+
+       asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+       return tsc;
+}
+#else
+/**
+ * This is an alternative method to enable rte_rdtsc() with high resolution
+ * PMU cycles counter.The cycle counter runs at cpu frequency and this scheme
+ * uses ARMv8 PMU subsystem to get the cycle counter at userspace, However,
+ * access to PMU cycle counter from user space is not enabled by default in
+ * arm64 linux kernel.
+ * It is possible to enable cycle counter at user space access by configuring
+ * the PMU from the privileged mode (kernel space).
+ *
+ * asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)(0 << 31)));
+ * asm volatile("msr pmcntenset_el0, %0" :: "r" BIT(31));
+ * asm volatile("msr pmuserenr_el0, %0" : : "r"(BIT(0) | BIT(2)));
+ * asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ * val |= (BIT(0) | BIT(2));
+ * isb();
+ * asm volatile("msr pmcr_el0, %0" : : "r" (val));
+ *
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+       uint64_t tsc;
+
+       asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc));
+       return tsc;
+}
+#endif
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+       asm volatile("isb" : : : "memory");
+       return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_ARM64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_io.h b/lib/librte_eal/arm/include/rte_io.h
new file mode 100644 (file)
index 0000000..f4e66e6
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_IO_ARM_H_
+#define _RTE_IO_ARM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTE_ARCH_64
+#include "rte_io_64.h"
+#else
+#include "generic/rte_io.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IO_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_io_64.h b/lib/librte_eal/arm/include/rte_io_64.h
new file mode 100644 (file)
index 0000000..e534624
--- /dev/null
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_IO_ARM64_H_
+#define _RTE_IO_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#define RTE_OVERRIDE_IO_H
+
+#include "generic/rte_io.h"
+#include "rte_atomic_64.h"
+
+static __rte_always_inline uint8_t
+rte_read8_relaxed(const volatile void *addr)
+{
+       uint8_t val;
+
+       asm volatile(
+                   "ldrb %w[val], [%x[addr]]"
+                   : [val] "=r" (val)
+                   : [addr] "r" (addr));
+       return val;
+}
+
+static __rte_always_inline uint16_t
+rte_read16_relaxed(const volatile void *addr)
+{
+       uint16_t val;
+
+       asm volatile(
+                   "ldrh %w[val], [%x[addr]]"
+                   : [val] "=r" (val)
+                   : [addr] "r" (addr));
+       return val;
+}
+
+static __rte_always_inline uint32_t
+rte_read32_relaxed(const volatile void *addr)
+{
+       uint32_t val;
+
+       asm volatile(
+                   "ldr %w[val], [%x[addr]]"
+                   : [val] "=r" (val)
+                   : [addr] "r" (addr));
+       return val;
+}
+
+static __rte_always_inline uint64_t
+rte_read64_relaxed(const volatile void *addr)
+{
+       uint64_t val;
+
+       asm volatile(
+                   "ldr %x[val], [%x[addr]]"
+                   : [val] "=r" (val)
+                   : [addr] "r" (addr));
+       return val;
+}
+
+static __rte_always_inline void
+rte_write8_relaxed(uint8_t val, volatile void *addr)
+{
+       asm volatile(
+                   "strb %w[val], [%x[addr]]"
+                   :
+                   : [val] "r" (val), [addr] "r" (addr));
+}
+
+static __rte_always_inline void
+rte_write16_relaxed(uint16_t val, volatile void *addr)
+{
+       asm volatile(
+                   "strh %w[val], [%x[addr]]"
+                   :
+                   : [val] "r" (val), [addr] "r" (addr));
+}
+
+static __rte_always_inline void
+rte_write32_relaxed(uint32_t val, volatile void *addr)
+{
+       asm volatile(
+                   "str %w[val], [%x[addr]]"
+                   :
+                   : [val] "r" (val), [addr] "r" (addr));
+}
+
+static __rte_always_inline void
+rte_write64_relaxed(uint64_t val, volatile void *addr)
+{
+       asm volatile(
+                   "str %x[val], [%x[addr]]"
+                   :
+                   : [val] "r" (val), [addr] "r" (addr));
+}
+
+static __rte_always_inline uint8_t
+rte_read8(const volatile void *addr)
+{
+       uint8_t val;
+       val = rte_read8_relaxed(addr);
+       rte_io_rmb();
+       return val;
+}
+
+static __rte_always_inline uint16_t
+rte_read16(const volatile void *addr)
+{
+       uint16_t val;
+       val = rte_read16_relaxed(addr);
+       rte_io_rmb();
+       return val;
+}
+
+static __rte_always_inline uint32_t
+rte_read32(const volatile void *addr)
+{
+       uint32_t val;
+       val = rte_read32_relaxed(addr);
+       rte_io_rmb();
+       return val;
+}
+
+static __rte_always_inline uint64_t
+rte_read64(const volatile void *addr)
+{
+       uint64_t val;
+       val = rte_read64_relaxed(addr);
+       rte_io_rmb();
+       return val;
+}
+
+static __rte_always_inline void
+rte_write8(uint8_t value, volatile void *addr)
+{
+       rte_io_wmb();
+       rte_write8_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write16(uint16_t value, volatile void *addr)
+{
+       rte_io_wmb();
+       rte_write16_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write32(uint32_t value, volatile void *addr)
+{
+       rte_io_wmb();
+       rte_write32_relaxed(value, addr);
+}
+
+static __rte_always_inline void
+rte_write64(uint64_t value, volatile void *addr)
+{
+       rte_io_wmb();
+       rte_write64_relaxed(value, addr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IO_ARM64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_mcslock.h b/lib/librte_eal/arm/include/rte_mcslock.h
new file mode 100644 (file)
index 0000000..dd1fe13
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_MCSLOCK_ARM_H_
+#define _RTE_MCSLOCK_ARM_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_mcslock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MCSLOCK_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_memcpy.h b/lib/librte_eal/arm/include/rte_memcpy.h
new file mode 100644 (file)
index 0000000..47dea9a
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_MEMCPY_ARM_H_
+#define _RTE_MEMCPY_ARM_H_
+
+#ifdef RTE_ARCH_64
+#include <rte_memcpy_64.h>
+#else
+#include <rte_memcpy_32.h>
+#endif
+
+#endif /* _RTE_MEMCPY_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_memcpy_32.h b/lib/librte_eal/arm/include/rte_memcpy_32.h
new file mode 100644 (file)
index 0000000..eb02c3b
--- /dev/null
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_MEMCPY_ARM32_H_
+#define _RTE_MEMCPY_ARM32_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_memcpy.h"
+
+#ifdef RTE_ARCH_ARM_NEON_MEMCPY
+
+#ifndef RTE_MACHINE_CPUFLAG_NEON
+#error "Cannot optimize memcpy by NEON as the CPU seems to not support this"
+#endif
+
+/* ARM NEON Intrinsics are used to copy data */
+#include <arm_neon.h>
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       vst1q_u8(dst, vld1q_u8(src));
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile (
+               "vld1.8 {d0-d3}, [%0]\n\t"
+               "vst1.8 {d0-d3}, [%1]\n\t"
+               : "+r" (src), "+r" (dst)
+               : : "memory", "d0", "d1", "d2", "d3");
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile (
+               "vld1.8 {d0-d3}, [%0]!\n\t"
+               "vld1.8 {d4-d5}, [%0]\n\t"
+               "vst1.8 {d0-d3}, [%1]!\n\t"
+               "vst1.8 {d4-d5}, [%1]\n\t"
+               : "+r" (src), "+r" (dst)
+               :
+               : "memory", "d0", "d1", "d2", "d3", "d4", "d5");
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile (
+               "vld1.8 {d0-d3}, [%0]!\n\t"
+               "vld1.8 {d4-d7}, [%0]\n\t"
+               "vst1.8 {d0-d3}, [%1]!\n\t"
+               "vst1.8 {d4-d7}, [%1]\n\t"
+               : "+r" (src), "+r" (dst)
+               :
+               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7");
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("pld [%0, #64]" : : "r" (src));
+       asm volatile (
+               "vld1.8 {d0-d3},   [%0]!\n\t"
+               "vld1.8 {d4-d7},   [%0]!\n\t"
+               "vld1.8 {d8-d11},  [%0]!\n\t"
+               "vld1.8 {d12-d15}, [%0]\n\t"
+               "vst1.8 {d0-d3},   [%1]!\n\t"
+               "vst1.8 {d4-d7},   [%1]!\n\t"
+               "vst1.8 {d8-d11},  [%1]!\n\t"
+               "vst1.8 {d12-d15}, [%1]\n\t"
+               : "+r" (src), "+r" (dst)
+               :
+               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+               "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("pld [%0,  #64]" : : "r" (src));
+       asm volatile ("pld [%0, #128]" : : "r" (src));
+       asm volatile ("pld [%0, #192]" : : "r" (src));
+       asm volatile ("pld [%0, #256]" : : "r" (src));
+       asm volatile ("pld [%0, #320]" : : "r" (src));
+       asm volatile ("pld [%0, #384]" : : "r" (src));
+       asm volatile ("pld [%0, #448]" : : "r" (src));
+       asm volatile (
+               "vld1.8 {d0-d3},   [%0]!\n\t"
+               "vld1.8 {d4-d7},   [%0]!\n\t"
+               "vld1.8 {d8-d11},  [%0]!\n\t"
+               "vld1.8 {d12-d15}, [%0]!\n\t"
+               "vld1.8 {d16-d19}, [%0]!\n\t"
+               "vld1.8 {d20-d23}, [%0]!\n\t"
+               "vld1.8 {d24-d27}, [%0]!\n\t"
+               "vld1.8 {d28-d31}, [%0]\n\t"
+               "vst1.8 {d0-d3},   [%1]!\n\t"
+               "vst1.8 {d4-d7},   [%1]!\n\t"
+               "vst1.8 {d8-d11},  [%1]!\n\t"
+               "vst1.8 {d12-d15}, [%1]!\n\t"
+               "vst1.8 {d16-d19}, [%1]!\n\t"
+               "vst1.8 {d20-d23}, [%1]!\n\t"
+               "vst1.8 {d24-d27}, [%1]!\n\t"
+               "vst1.8 {d28-d31}, [%1]!\n\t"
+               : "+r" (src), "+r" (dst)
+               :
+               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+               "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+               "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+               "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31");
+}
+
+#define rte_memcpy(dst, src, n)              \
+       __extension__ ({                     \
+       (__builtin_constant_p(n)) ?          \
+       memcpy((dst), (src), (n)) :          \
+       rte_memcpy_func((dst), (src), (n)); })
+
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+       void *ret = dst;
+
+       /* We can't copy < 16 bytes using XMM registers so do it manually. */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dst = *(const uint8_t *)src;
+                       dst = (uint8_t *)dst + 1;
+                       src = (const uint8_t *)src + 1;
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dst = *(const uint16_t *)src;
+                       dst = (uint16_t *)dst + 1;
+                       src = (const uint16_t *)src + 1;
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dst = *(const uint32_t *)src;
+                       dst = (uint32_t *)dst + 1;
+                       src = (const uint32_t *)src + 1;
+               }
+               if (n & 0x08) {
+                       /* ARMv7 can not handle unaligned access to long long
+                        * (uint64_t). Therefore two uint32_t operations are
+                        * used.
+                        */
+                       *(uint32_t *)dst = *(const uint32_t *)src;
+                       dst = (uint32_t *)dst + 1;
+                       src = (const uint32_t *)src + 1;
+                       *(uint32_t *)dst = *(const uint32_t *)src;
+               }
+               return ret;
+       }
+
+       /* Special fast cases for <= 128 bytes */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                       (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n,
+                       (const uint8_t *)src - 32 + n);
+               return ret;
+       }
+
+       if (n <= 128) {
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov64((uint8_t *)dst - 64 + n,
+                       (const uint8_t *)src - 64 + n);
+               return ret;
+       }
+
+       /*
+        * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+        * copies was found to be faster than doing 128 and 32 byte copies as
+        * well.
+        */
+       for ( ; n >= 256; n -= 256) {
+               rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+               dst = (uint8_t *)dst + 256;
+               src = (const uint8_t *)src + 256;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 256) into
+        * 64byte (2^6) chunks.
+        * Using incrementing integers in the case labels of a switch statement
+        * encourages the compiler to use a jump table. To get incrementing
+        * integers, we shift the 2 relevant bits to the LSB position to first
+        * get decrementing integers, and then subtract.
+        */
+       switch (3 - (n >> 6)) {
+       case 0x00:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x01:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x02:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       default:
+               break;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 64) into
+        * 16byte (2^4) chunks, using the same switch structure as above.
+        */
+       switch (3 - (n >> 4)) {
+       case 0x00:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x01:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x02:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       default:
+               break;
+       }
+
+       /* Copy any remaining bytes, without going beyond end of buffers */
+       if (n != 0)
+               rte_mov16((uint8_t *)dst - 16 + n,
+                       (const uint8_t *)src - 16 + n);
+       return ret;
+}
+
+#else
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 16);
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 32);
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 48);
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 64);
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 128);
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 256);
+}
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+       return memcpy(dst, src, n);
+}
+
+#endif /* RTE_ARCH_ARM_NEON_MEMCPY */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_memcpy_64.h b/lib/librte_eal/arm/include/rte_memcpy_64.h
new file mode 100644 (file)
index 0000000..85ad587
--- /dev/null
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ */
+
+#ifndef _RTE_MEMCPY_ARM64_H_
+#define _RTE_MEMCPY_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <string.h>
+
+#include "generic/rte_memcpy.h"
+
+#ifdef RTE_ARCH_ARM64_MEMCPY
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+/*
+ * The memory copy performance differs on different AArch64 micro-architectures.
+ * And the most recent glibc (e.g. 2.23 or later) can provide a better memcpy()
+ * performance compared to old glibc versions. It's always suggested to use a
+ * more recent glibc if possible, from which the entire system can get benefit.
+ *
+ * This implementation improves memory copy on some aarch64 micro-architectures,
+ * when an old glibc (e.g. 2.19, 2.17...) is being used. It is disabled by
+ * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate. It's not
+ * always providing better performance than memcpy() so users need to run unit
+ * test "memcpy_perf_autotest" and customize parameters in customization section
+ * below for best performance.
+ *
+ * Compiler version will also impact the rte_memcpy() performance. It's observed
+ * on some platforms and with the same code, GCC 7.2.0 compiled binaries can
+ * provide better performance than GCC 4.8.5 compiled binaries.
+ */
+
+/**************************************
+ * Beginning of customization section
+ **************************************/
+#ifndef RTE_ARM64_MEMCPY_ALIGN_MASK
+#define RTE_ARM64_MEMCPY_ALIGN_MASK ((RTE_CACHE_LINE_SIZE >> 3) - 1)
+#endif
+
+#ifndef RTE_ARM64_MEMCPY_STRICT_ALIGN
+/* Only src unalignment will be treated as unaligned copy */
+#define RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) \
+       ((uintptr_t)(src) & RTE_ARM64_MEMCPY_ALIGN_MASK)
+#else
+/* Both dst and src unalignment will be treated as unaligned copy */
+#define RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) \
+       (((uintptr_t)(dst) | (uintptr_t)(src)) & RTE_ARM64_MEMCPY_ALIGN_MASK)
+#endif
+
+
+/*
+ * If copy size is larger than threshold, memcpy() will be used.
+ * Run "memcpy_perf_autotest" to determine the proper threshold.
+ */
+#ifdef RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD
+#define USE_ALIGNED_RTE_MEMCPY(dst, src, n) \
+(!RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) && \
+n <= (size_t)RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD)
+#else
+#define USE_ALIGNED_RTE_MEMCPY(dst, src, n) \
+(!RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src))
+#endif
+#ifdef RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD
+#define USE_UNALIGNED_RTE_MEMCPY(dst, src, n) \
+(RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) && \
+n <= (size_t)RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD)
+#else
+#define USE_UNALIGNED_RTE_MEMCPY(dst, src, n) \
+(RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src))
+#endif
+/*
+ * The logic of USE_RTE_MEMCPY() can also be modified to best fit platform.
+ */
+#if defined(RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD) \
+|| defined(RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD)
+#define USE_RTE_MEMCPY(dst, src, n) \
+(USE_ALIGNED_RTE_MEMCPY(dst, src, n) || USE_UNALIGNED_RTE_MEMCPY(dst, src, n))
+#else
+#define USE_RTE_MEMCPY(dst, src, n) (1)
+#endif
+/**************************************
+ * End of customization section
+ **************************************/
+
+
+#if RTE_CC_IS_GNU && !defined RTE_ARM64_MEMCPY_SKIP_GCC_VER_CHECK
+#if (GCC_VERSION < 50400)
+#warning "The GCC version is quite old, which may result in sub-optimal \
+performance of the compiled code. It is suggested that at least GCC 5.4.0 \
+be used."
+#endif
+#endif
+
+static __rte_always_inline
+void rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       __uint128_t *dst128 = (__uint128_t *)dst;
+       const __uint128_t *src128 = (const __uint128_t *)src;
+       *dst128 = *src128;
+}
+
+static __rte_always_inline
+void rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       __uint128_t *dst128 = (__uint128_t *)dst;
+       const __uint128_t *src128 = (const __uint128_t *)src;
+       const __uint128_t x0 = src128[0], x1 = src128[1];
+       dst128[0] = x0;
+       dst128[1] = x1;
+}
+
+static __rte_always_inline
+void rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       __uint128_t *dst128 = (__uint128_t *)dst;
+       const __uint128_t *src128 = (const __uint128_t *)src;
+       const __uint128_t x0 = src128[0], x1 = src128[1], x2 = src128[2];
+       dst128[0] = x0;
+       dst128[1] = x1;
+       dst128[2] = x2;
+}
+
+static __rte_always_inline
+void rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       __uint128_t *dst128 = (__uint128_t *)dst;
+       const __uint128_t *src128 = (const __uint128_t *)src;
+       const __uint128_t
+               x0 = src128[0], x1 = src128[1], x2 = src128[2], x3 = src128[3];
+       dst128[0] = x0;
+       dst128[1] = x1;
+       dst128[2] = x2;
+       dst128[3] = x3;
+}
+
+static __rte_always_inline
+void rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       __uint128_t *dst128 = (__uint128_t *)dst;
+       const __uint128_t *src128 = (const __uint128_t *)src;
+       /* Keep below declaration & copy sequence for optimized instructions */
+       const __uint128_t
+               x0 = src128[0], x1 = src128[1], x2 = src128[2], x3 = src128[3];
+       dst128[0] = x0;
+       __uint128_t x4 = src128[4];
+       dst128[1] = x1;
+       __uint128_t x5 = src128[5];
+       dst128[2] = x2;
+       __uint128_t x6 = src128[6];
+       dst128[3] = x3;
+       __uint128_t x7 = src128[7];
+       dst128[4] = x4;
+       dst128[5] = x5;
+       dst128[6] = x6;
+       dst128[7] = x7;
+}
+
+static __rte_always_inline
+void rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov128(dst, src);
+       rte_mov128(dst + 128, src + 128);
+}
+
+static __rte_always_inline void
+rte_memcpy_lt16(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       if (n & 0x08) {
+               /* copy 8 ~ 15 bytes */
+               *(uint64_t *)dst = *(const uint64_t *)src;
+               *(uint64_t *)(dst - 8 + n) = *(const uint64_t *)(src - 8 + n);
+       } else if (n & 0x04) {
+               /* copy 4 ~ 7 bytes */
+               *(uint32_t *)dst = *(const uint32_t *)src;
+               *(uint32_t *)(dst - 4 + n) = *(const uint32_t *)(src - 4 + n);
+       } else if (n & 0x02) {
+               /* copy 2 ~ 3 bytes */
+               *(uint16_t *)dst = *(const uint16_t *)src;
+               *(uint16_t *)(dst - 2 + n) = *(const uint16_t *)(src - 2 + n);
+       } else if (n & 0x01) {
+               /* copy 1 byte */
+               *dst = *src;
+       }
+}
+
+static __rte_always_inline
+void rte_memcpy_ge16_lt128(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       if (n < 64) {
+               if (n == 16) {
+                       rte_mov16(dst, src);
+               } else if (n <= 32) {
+                       rte_mov16(dst, src);
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+               } else if (n <= 48) {
+                       rte_mov32(dst, src);
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+               } else {
+                       rte_mov48(dst, src);
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+               }
+       } else {
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               if (n > 48 + 64)
+                       rte_mov64(dst - 64 + n, src - 64 + n);
+               else if (n > 32 + 64)
+                       rte_mov48(dst - 48 + n, src - 48 + n);
+               else if (n > 16 + 64)
+                       rte_mov32(dst - 32 + n, src - 32 + n);
+               else if (n > 64)
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+       }
+}
+
+static __rte_always_inline
+void rte_memcpy_ge128(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       do {
+               rte_mov128(dst, src);
+               src += 128;
+               dst += 128;
+               n -= 128;
+       } while (likely(n >= 128));
+
+       if (likely(n)) {
+               if (n <= 16)
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+               else if (n <= 32)
+                       rte_mov32(dst - 32 + n, src - 32 + n);
+               else if (n <= 48)
+                       rte_mov48(dst - 48 + n, src - 48 + n);
+               else if (n <= 64)
+                       rte_mov64(dst - 64 + n, src - 64 + n);
+               else
+                       rte_memcpy_ge16_lt128(dst, src, n);
+       }
+}
+
+static __rte_always_inline
+void rte_memcpy_ge16_lt64(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       if (n == 16) {
+               rte_mov16(dst, src);
+       } else if (n <= 32) {
+               rte_mov16(dst, src);
+               rte_mov16(dst - 16 + n, src - 16 + n);
+       } else if (n <= 48) {
+               rte_mov32(dst, src);
+               rte_mov16(dst - 16 + n, src - 16 + n);
+       } else {
+               rte_mov48(dst, src);
+               rte_mov16(dst - 16 + n, src - 16 + n);
+       }
+}
+
+static __rte_always_inline
+void rte_memcpy_ge64(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       do {
+               rte_mov64(dst, src);
+               src += 64;
+               dst += 64;
+               n -= 64;
+       } while (likely(n >= 64));
+
+       if (likely(n)) {
+               if (n <= 16)
+                       rte_mov16(dst - 16 + n, src - 16 + n);
+               else if (n <= 32)
+                       rte_mov32(dst - 32 + n, src - 32 + n);
+               else if (n <= 48)
+                       rte_mov48(dst - 48 + n, src - 48 + n);
+               else
+                       rte_mov64(dst - 64 + n, src - 64 + n);
+       }
+}
+
+#if RTE_CACHE_LINE_SIZE >= 128
+static __rte_always_inline
+void *rte_memcpy(void *dst, const void *src, size_t n)
+{
+       if (n < 16) {
+               rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       }
+       if (n < 128) {
+               rte_memcpy_ge16_lt128((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       }
+       __builtin_prefetch(src, 0, 0);
+       __builtin_prefetch(dst, 1, 0);
+       if (likely(USE_RTE_MEMCPY(dst, src, n))) {
+               rte_memcpy_ge128((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       } else
+               return memcpy(dst, src, n);
+}
+
+#else
+static __rte_always_inline
+void *rte_memcpy(void *dst, const void *src, size_t n)
+{
+       if (n < 16) {
+               rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       }
+       if (n < 64) {
+               rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       }
+       __builtin_prefetch(src, 0, 0);
+       __builtin_prefetch(dst, 1, 0);
+       if (likely(USE_RTE_MEMCPY(dst, src, n))) {
+               rte_memcpy_ge64((uint8_t *)dst, (const uint8_t *)src, n);
+               return dst;
+       } else
+               return memcpy(dst, src, n);
+}
+#endif /* RTE_CACHE_LINE_SIZE >= 128 */
+
+#else
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 16);
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 32);
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 48);
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 64);
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 128);
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       memcpy(dst, src, 256);
+}
+
+#define rte_memcpy(d, s, n)    memcpy((d), (s), (n))
+
+#endif /* RTE_ARCH_ARM64_MEMCPY */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_ARM_64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_pause.h b/lib/librte_eal/arm/include/rte_pause.h
new file mode 100644 (file)
index 0000000..6c7002a
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_PAUSE_ARM_H_
+#define _RTE_PAUSE_ARM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef RTE_ARCH_64
+#include <rte_pause_64.h>
+#else
+#include <rte_pause_32.h>
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_pause_32.h b/lib/librte_eal/arm/include/rte_pause_32.h
new file mode 100644 (file)
index 0000000..d4768c7
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_PAUSE_ARM32_H_
+#define _RTE_PAUSE_ARM32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_pause_64.h b/lib/librte_eal/arm/include/rte_pause_64.h
new file mode 100644 (file)
index 0000000..e87d10b
--- /dev/null
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_PAUSE_ARM64_H_
+#define _RTE_PAUSE_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#ifdef RTE_ARM_USE_WFE
+#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+#endif
+
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+       asm volatile("yield" ::: "memory");
+}
+
+#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
+
+/* Send an event to quit WFE. */
+#define __SEVL() { asm volatile("sevl" : : : "memory"); }
+
+/* Put processor into low power WFE(Wait For Event) state. */
+#define __WFE() { asm volatile("wfe" : : : "memory"); }
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+               int memorder)
+{
+       uint16_t value;
+
+       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+       /*
+        * Atomic exclusive load from addr, it returns the 16-bit content of
+        * *addr while making it 'monitored',when it is written by someone
+        * else, the 'monitored' state is cleared and a event is generated
+        * implicitly to exit WFE.
+        */
+#define __LOAD_EXC_16(src, dst, memorder) {               \
+       if (memorder == __ATOMIC_RELAXED) {               \
+               asm volatile("ldxrh %w[tmp], [%x[addr]]"  \
+                       : [tmp] "=&r" (dst)               \
+                       : [addr] "r"(src)                 \
+                       : "memory");                      \
+       } else {                                          \
+               asm volatile("ldaxrh %w[tmp], [%x[addr]]" \
+                       : [tmp] "=&r" (dst)               \
+                       : [addr] "r"(src)                 \
+                       : "memory");                      \
+       } }
+
+       __LOAD_EXC_16(addr, value, memorder)
+       if (value != expected) {
+               __SEVL()
+               do {
+                       __WFE()
+                       __LOAD_EXC_16(addr, value, memorder)
+               } while (value != expected);
+       }
+#undef __LOAD_EXC_16
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
+               int memorder)
+{
+       uint32_t value;
+
+       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+       /*
+        * Atomic exclusive load from addr, it returns the 32-bit content of
+        * *addr while making it 'monitored',when it is written by someone
+        * else, the 'monitored' state is cleared and a event is generated
+        * implicitly to exit WFE.
+        */
+#define __LOAD_EXC_32(src, dst, memorder) {              \
+       if (memorder == __ATOMIC_RELAXED) {              \
+               asm volatile("ldxr %w[tmp], [%x[addr]]"  \
+                       : [tmp] "=&r" (dst)              \
+                       : [addr] "r"(src)                \
+                       : "memory");                     \
+       } else {                                         \
+               asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+                       : [tmp] "=&r" (dst)              \
+                       : [addr] "r"(src)                \
+                       : "memory");                     \
+       } }
+
+       __LOAD_EXC_32(addr, value, memorder)
+       if (value != expected) {
+               __SEVL()
+               do {
+                       __WFE()
+                       __LOAD_EXC_32(addr, value, memorder)
+               } while (value != expected);
+       }
+#undef __LOAD_EXC_32
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
+               int memorder)
+{
+       uint64_t value;
+
+       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+       /*
+        * Atomic exclusive load from addr, it returns the 64-bit content of
+        * *addr while making it 'monitored',when it is written by someone
+        * else, the 'monitored' state is cleared and a event is generated
+        * implicitly to exit WFE.
+        */
+#define __LOAD_EXC_64(src, dst, memorder) {              \
+       if (memorder == __ATOMIC_RELAXED) {              \
+               asm volatile("ldxr %x[tmp], [%x[addr]]"  \
+                       : [tmp] "=&r" (dst)              \
+                       : [addr] "r"(src)                \
+                       : "memory");                     \
+       } else {                                         \
+               asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+                       : [tmp] "=&r" (dst)              \
+                       : [addr] "r"(src)                \
+                       : "memory");                     \
+       } }
+
+       __LOAD_EXC_64(addr, value, memorder)
+       if (value != expected) {
+               __SEVL()
+               do {
+                       __WFE()
+                       __LOAD_EXC_64(addr, value, memorder)
+               } while (value != expected);
+       }
+}
+#undef __LOAD_EXC_64
+
+#undef __SEVL
+#undef __WFE
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_ARM64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_prefetch.h b/lib/librte_eal/arm/include/rte_prefetch.h
new file mode 100644 (file)
index 0000000..27870c2
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_PREFETCH_ARM_H_
+#define _RTE_PREFETCH_ARM_H_
+
+#ifdef RTE_ARCH_64
+#include <rte_prefetch_64.h>
+#else
+#include <rte_prefetch_32.h>
+#endif
+
+#endif /* _RTE_PREFETCH_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_prefetch_32.h b/lib/librte_eal/arm/include/rte_prefetch_32.h
new file mode 100644 (file)
index 0000000..e53420a
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_PREFETCH_ARM32_H_
+#define _RTE_PREFETCH_ARM32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(const volatile void *p)
+{
+       asm volatile ("pld [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch1(const volatile void *p)
+{
+       asm volatile ("pld [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch2(const volatile void *p)
+{
+       asm volatile ("pld [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch_non_temporal(const volatile void *p)
+{
+       /* non-temporal version not available, fallback to rte_prefetch0 */
+       rte_prefetch0(p);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_ARM32_H_ */
diff --git a/lib/librte_eal/arm/include/rte_prefetch_64.h b/lib/librte_eal/arm/include/rte_prefetch_64.h
new file mode 100644 (file)
index 0000000..fc2b391
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ */
+
+#ifndef _RTE_PREFETCH_ARM_64_H_
+#define _RTE_PREFETCH_ARM_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(const volatile void *p)
+{
+       asm volatile ("PRFM PLDL1KEEP, [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch1(const volatile void *p)
+{
+       asm volatile ("PRFM PLDL2KEEP, [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch2(const volatile void *p)
+{
+       asm volatile ("PRFM PLDL3KEEP, [%0]" : : "r" (p));
+}
+
+static inline void rte_prefetch_non_temporal(const volatile void *p)
+{
+       asm volatile ("PRFM PLDL1STRM, [%0]" : : "r" (p));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_ARM_64_H_ */
diff --git a/lib/librte_eal/arm/include/rte_rwlock.h b/lib/librte_eal/arm/include/rte_rwlock.h
new file mode 100644 (file)
index 0000000..18bb37b
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ */
+/* copied from ppc_64 */
+
+#ifndef _RTE_RWLOCK_ARM_H_
+#define _RTE_RWLOCK_ARM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_rwlock.h"
+
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_read_lock(rwl);
+}
+
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_read_unlock(rwl);
+}
+
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_write_lock(rwl);
+}
+
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_write_unlock(rwl);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_spinlock.h b/lib/librte_eal/arm/include/rte_spinlock.h
new file mode 100644 (file)
index 0000000..1a6916b
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
+ */
+
+#ifndef _RTE_SPINLOCK_ARM_H_
+#define _RTE_SPINLOCK_ARM_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_spinlock.h"
+
+static inline int rte_tm_supported(void)
+{
+       return 0;
+}
+
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl)
+{
+       rte_spinlock_lock(sl); /* fall-back */
+}
+
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+{
+       return rte_spinlock_trylock(sl);
+}
+
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+{
+       rte_spinlock_unlock(sl);
+}
+
+static inline void
+rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+{
+       rte_spinlock_recursive_lock(slr); /* fall-back */
+}
+
+static inline void
+rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+{
+       rte_spinlock_recursive_unlock(slr);
+}
+
+static inline int
+rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+{
+       return rte_spinlock_recursive_trylock(slr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_ticketlock.h b/lib/librte_eal/arm/include/rte_ticketlock.h
new file mode 100644 (file)
index 0000000..e09fbd6
--- /dev/null
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_ARM_H_
+#define _RTE_TICKETLOCK_ARM_H_
+
+#ifndef RTE_FORCE_INTRINSICS
+#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_ticketlock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TICKETLOCK_ARM_H_ */
diff --git a/lib/librte_eal/arm/include/rte_vect.h b/lib/librte_eal/arm/include/rte_vect.h
new file mode 100644 (file)
index 0000000..454ac7e
--- /dev/null
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Cavium, Inc
+ */
+
+#ifndef _RTE_VECT_ARM_H_
+#define _RTE_VECT_ARM_H_
+
+#include <stdint.h>
+#include "generic/rte_vect.h"
+#include "rte_debug.h"
+#include "arm_neon.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int32x4_t xmm_t;
+
+#define        XMM_SIZE        (sizeof(xmm_t))
+#define        XMM_MASK        (XMM_SIZE - 1)
+
+typedef union rte_xmm {
+       xmm_t    x;
+       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
+       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
+       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
+       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
+       double   pd[XMM_SIZE / sizeof(double)];
+} __attribute__((aligned(16))) rte_xmm_t;
+
+#ifdef RTE_ARCH_ARM
+/* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
+static __inline uint8x16_t
+vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
+{
+       uint8_t i, pos;
+       rte_xmm_t rte_a, rte_b, rte_ret;
+
+       vst1q_u8(rte_a.u8, a);
+       vst1q_u8(rte_b.u8, b);
+
+       for (i = 0; i < 16; i++) {
+               pos = rte_b.u8[i];
+               if (pos < 16)
+                       rte_ret.u8[i] = rte_a.u8[pos];
+               else
+                       rte_ret.u8[i] = 0;
+       }
+
+       return vld1q_u8(rte_ret.u8);
+}
+
+static inline uint16_t
+vaddvq_u16(uint16x8_t a)
+{
+       uint32x4_t m = vpaddlq_u16(a);
+       uint64x2_t n = vpaddlq_u32(m);
+       uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
+
+       return vget_lane_u32((uint32x2_t)o, 0);
+}
+
+#endif
+
+#if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
+static inline uint32x4_t
+vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
+                uint32x4_t b, const int lane_b)
+{
+       return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
+}
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
+
+#if (GCC_VERSION < 40900)
+typedef uint64_t poly64_t;
+typedef uint64x2_t poly64x2_t;
+typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
+
+static inline uint32x4_t
+vceqzq_u32(uint32x4_t a)
+{
+       return (a == 0);
+}
+#endif
+
+/* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
+static inline uint64x2_t
+vreinterpretq_u64_p128(poly128_t x)
+{
+       return (uint64x2_t)x;
+}
+
+/* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
+static inline poly64x2_t
+vreinterpretq_p64_u64(uint64x2_t x)
+{
+       return (poly64x2_t)x;
+}
+
+/* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
+static inline poly64_t
+vgetq_lane_p64(poly64x2_t x, const int lane)
+{
+       RTE_ASSERT(lane >= 0 && lane <= 1);
+
+       poly64_t *p = (poly64_t *)&x;
+
+       return p[lane];
+}
+#endif
+#endif
+
+/*
+ * If (0 <= index <= 15), then call the ASIMD ext instruction on the
+ * 128 bit regs v0 and v1 with the appropriate index.
+ *
+ * Else returns a zero vector.
+ */
+static inline uint8x16_t
+vextract(uint8x16_t v0, uint8x16_t v1, const int index)
+{
+       switch (index) {
+       case 0: return vextq_u8(v0, v1, 0);
+       case 1: return vextq_u8(v0, v1, 1);
+       case 2: return vextq_u8(v0, v1, 2);
+       case 3: return vextq_u8(v0, v1, 3);
+       case 4: return vextq_u8(v0, v1, 4);
+       case 5: return vextq_u8(v0, v1, 5);
+       case 6: return vextq_u8(v0, v1, 6);
+       case 7: return vextq_u8(v0, v1, 7);
+       case 8: return vextq_u8(v0, v1, 8);
+       case 9: return vextq_u8(v0, v1, 9);
+       case 10: return vextq_u8(v0, v1, 10);
+       case 11: return vextq_u8(v0, v1, 11);
+       case 12: return vextq_u8(v0, v1, 12);
+       case 13: return vextq_u8(v0, v1, 13);
+       case 14: return vextq_u8(v0, v1, 14);
+       case 15: return vextq_u8(v0, v1, 15);
+       }
+       return vdupq_n_u8(0);
+}
+
+/**
+ * Shifts right 128 bit register by specified number of bytes
+ *
+ * Value of shift parameter must be in range 0 - 16
+ */
+static inline uint64x2_t
+vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
+{
+       return vreinterpretq_u64_u8(vextract(
+                               vreinterpretq_u8_u64(reg),
+                               vdupq_n_u8(0),
+                               shift));
+}
+
+/**
+ * Shifts left 128 bit register by specified number of bytes
+ *
+ * Value of shift parameter must be in range 0 - 16
+ */
+static inline uint64x2_t
+vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
+{
+       return vreinterpretq_u64_u8(vextract(
+                               vdupq_n_u8(0),
+                               vreinterpretq_u8_u64(reg),
+                               16 - shift));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
index f8f7dea..d62875e 100644 (file)
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation.
 
+subdir('include')
+
 sources += files(
        'rte_cpuflags.c',
        'rte_cycles.c',
index c2c6d92..6c52f50 100644 (file)
@@ -27,11 +27,11 @@ GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
 
 # defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
 ARCH_DIR ?= $(RTE_ARCH)
-ARCH_INC := $(sort $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h)))
+ARCH_INC := $(sort $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/$(ARCH_DIR)/include/*.h)))
 
 SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
 SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \
-       $(addprefix include/arch/$(ARCH_DIR)/,$(ARCH_INC))
+       $(addprefix ../$(ARCH_DIR)/include/,$(ARCH_INC))
 SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/generic := \
        $(addprefix include/generic/,$(GENERIC_INC))
 
diff --git a/lib/librte_eal/common/include/arch/arm/meson.build b/lib/librte_eal/common/include/arch/arm/meson.build
deleted file mode 100644 (file)
index 77893fa..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation.
-
-install_headers(
-       'rte_atomic_32.h',
-       'rte_atomic_64.h',
-       'rte_atomic.h',
-       'rte_byteorder.h',
-       'rte_cpuflags_32.h',
-       'rte_cpuflags_64.h',
-       'rte_cpuflags.h',
-       'rte_cycles_32.h',
-       'rte_cycles_64.h',
-       'rte_cycles.h',
-       'rte_io_64.h',
-       'rte_io.h',
-       'rte_memcpy_32.h',
-       'rte_memcpy_64.h',
-       'rte_memcpy.h',
-       'rte_pause_32.h',
-       'rte_pause_64.h',
-       'rte_pause.h',
-       'rte_prefetch_32.h',
-       'rte_prefetch_64.h',
-       'rte_prefetch.h',
-       'rte_rwlock.h',
-       'rte_spinlock.h',
-       'rte_vect.h',
-       subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic.h b/lib/librte_eal/common/include/arch/arm/rte_atomic.h
deleted file mode 100644 (file)
index 40e14e5..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_ARM_H_
-#define _RTE_ATOMIC_ARM_H_
-
-#ifdef RTE_ARCH_64
-#include <rte_atomic_64.h>
-#else
-#include <rte_atomic_32.h>
-#endif
-
-#endif /* _RTE_ATOMIC_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
deleted file mode 100644 (file)
index 7dc0d06..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_ARM32_H_
-#define _RTE_ATOMIC_ARM32_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_atomic.h"
-
-#define        rte_mb()  __sync_synchronize()
-
-#define        rte_wmb() do { asm volatile ("dmb st" : : : "memory"); } while (0)
-
-#define        rte_rmb() __sync_synchronize()
-
-#define rte_smp_mb() rte_mb()
-
-#define rte_smp_wmb() rte_wmb()
-
-#define rte_smp_rmb() rte_rmb()
-
-#define rte_io_mb() rte_mb()
-
-#define rte_io_wmb() rte_wmb()
-
-#define rte_io_rmb() rte_rmb()
-
-#define rte_cio_wmb() rte_wmb()
-
-#define rte_cio_rmb() rte_rmb()
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ATOMIC_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
deleted file mode 100644 (file)
index 7b7099c..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_ATOMIC_ARM64_H_
-#define _RTE_ATOMIC_ARM64_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_atomic.h"
-#include <rte_branch_prediction.h>
-#include <rte_compat.h>
-#include <rte_debug.h>
-
-#define rte_mb() asm volatile("dsb sy" : : : "memory")
-
-#define rte_wmb() asm volatile("dsb st" : : : "memory")
-
-#define rte_rmb() asm volatile("dsb ld" : : : "memory")
-
-#define rte_smp_mb() asm volatile("dmb ish" : : : "memory")
-
-#define rte_smp_wmb() asm volatile("dmb ishst" : : : "memory")
-
-#define rte_smp_rmb() asm volatile("dmb ishld" : : : "memory")
-
-#define rte_io_mb() rte_mb()
-
-#define rte_io_wmb() rte_wmb()
-
-#define rte_io_rmb() rte_rmb()
-
-#define rte_cio_wmb() asm volatile("dmb oshst" : : : "memory")
-
-#define rte_cio_rmb() asm volatile("dmb oshld" : : : "memory")
-
-/*------------------------ 128 bit atomic operations -------------------------*/
-
-#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
-#define __ATOMIC128_CAS_OP(cas_op_name, op_string)                          \
-static __rte_noinline rte_int128_t                                          \
-cas_op_name(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated)      \
-{                                                                           \
-       /* caspX instructions register pair must start from even-numbered
-        * register at operand 1.
-        * So, specify registers for local variables here.
-        */                                                                 \
-       register uint64_t x0 __asm("x0") = (uint64_t)old.val[0];            \
-       register uint64_t x1 __asm("x1") = (uint64_t)old.val[1];            \
-       register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0];        \
-       register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1];        \
-       asm volatile(                                                       \
-               op_string " %[old0], %[old1], %[upd0], %[upd1], [%[dst]]"   \
-               : [old0] "+r" (x0),                                         \
-               [old1] "+r" (x1)                                            \
-               : [upd0] "r" (x2),                                          \
-               [upd1] "r" (x3),                                            \
-               [dst] "r" (dst)                                             \
-               : "memory");                                                \
-       old.val[0] = x0;                                                    \
-       old.val[1] = x1;                                                    \
-       return old;                                                         \
-}
-
-__ATOMIC128_CAS_OP(__cas_128_relaxed, "casp")
-__ATOMIC128_CAS_OP(__cas_128_acquire, "caspa")
-__ATOMIC128_CAS_OP(__cas_128_release, "caspl")
-__ATOMIC128_CAS_OP(__cas_128_acq_rel, "caspal")
-
-#undef __ATOMIC128_CAS_OP
-
-#endif
-
-__rte_experimental
-static inline int
-rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp,
-               const rte_int128_t *src, unsigned int weak, int success,
-               int failure)
-{
-       /* Always do strong CAS */
-       RTE_SET_USED(weak);
-       /* Ignore memory ordering for failure, memory order for
-        * success must be stronger or equal
-        */
-       RTE_SET_USED(failure);
-       /* Find invalid memory order */
-       RTE_ASSERT(success == __ATOMIC_RELAXED ||
-               success == __ATOMIC_ACQUIRE ||
-               success == __ATOMIC_RELEASE ||
-               success == __ATOMIC_ACQ_REL ||
-               success == __ATOMIC_SEQ_CST);
-
-       rte_int128_t expected = *exp;
-       rte_int128_t desired = *src;
-       rte_int128_t old;
-
-#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
-       if (success == __ATOMIC_RELAXED)
-               old = __cas_128_relaxed(dst, expected, desired);
-       else if (success == __ATOMIC_ACQUIRE)
-               old = __cas_128_acquire(dst, expected, desired);
-       else if (success == __ATOMIC_RELEASE)
-               old = __cas_128_release(dst, expected, desired);
-       else
-               old = __cas_128_acq_rel(dst, expected, desired);
-#else
-#define __HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)
-#define __HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \
-               (mo) == __ATOMIC_SEQ_CST)
-
-       int ldx_mo = __HAS_ACQ(success) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
-       int stx_mo = __HAS_RLS(success) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED;
-
-#undef __HAS_ACQ
-#undef __HAS_RLS
-
-       uint32_t ret = 1;
-
-       /* ldx128 can not guarantee atomic,
-        * Must write back src or old to verify atomicity of ldx128;
-        */
-       do {
-
-#define __LOAD_128(op_string, src, dst) { \
-       asm volatile(                     \
-               op_string " %0, %1, %2"   \
-               : "=&r" (dst.val[0]),     \
-                 "=&r" (dst.val[1])      \
-               : "Q" (src->val[0])       \
-               : "memory"); }
-
-               if (ldx_mo == __ATOMIC_RELAXED)
-                       __LOAD_128("ldxp", dst, old)
-               else
-                       __LOAD_128("ldaxp", dst, old)
-
-#undef __LOAD_128
-
-#define __STORE_128(op_string, dst, src, ret) { \
-       asm volatile(                           \
-               op_string " %w0, %1, %2, %3"    \
-               : "=&r" (ret)                   \
-               : "r" (src.val[0]),             \
-                 "r" (src.val[1]),             \
-                 "Q" (dst->val[0])             \
-               : "memory"); }
-
-               if (likely(old.int128 == expected.int128)) {
-                       if (stx_mo == __ATOMIC_RELAXED)
-                               __STORE_128("stxp", dst, desired, ret)
-                       else
-                               __STORE_128("stlxp", dst, desired, ret)
-               } else {
-                       /* In the failure case (since 'weak' is ignored and only
-                        * weak == 0 is implemented), expected should contain
-                        * the atomically read value of dst. This means, 'old'
-                        * needs to be stored back to ensure it was read
-                        * atomically.
-                        */
-                       if (stx_mo == __ATOMIC_RELAXED)
-                               __STORE_128("stxp", dst, old, ret)
-                       else
-                               __STORE_128("stlxp", dst, old, ret)
-               }
-
-#undef __STORE_128
-
-       } while (unlikely(ret));
-#endif
-
-       /* Unconditionally updating expected removes an 'if' statement.
-        * expected should already be in register if not in the cache.
-        */
-       *exp = old;
-
-       return (old.int128 == expected.int128);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ATOMIC_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_byteorder.h b/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
deleted file mode 100644 (file)
index 9ec4a97..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_BYTEORDER_ARM_H_
-#define _RTE_BYTEORDER_ARM_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <rte_common.h>
-#include "generic/rte_byteorder.h"
-
-/* fix missing __builtin_bswap16 for gcc older then 4.8 */
-#if !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
-
-static inline uint16_t rte_arch_bswap16(uint16_t _x)
-{
-       uint16_t x = _x;
-
-       asm volatile ("rev16 %w0,%w1"
-                     : "=r" (x)
-                     : "r" (x)
-                     );
-       return x;
-}
-
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \
-                                  rte_constant_bswap16(x) : \
-                                  rte_arch_bswap16(x)))
-#endif
-
-/* ARM architecture is bi-endian (both big and little). */
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-
-#define rte_cpu_to_le_16(x) (x)
-#define rte_cpu_to_le_32(x) (x)
-#define rte_cpu_to_le_64(x) (x)
-
-#define rte_cpu_to_be_16(x) rte_bswap16(x)
-#define rte_cpu_to_be_32(x) rte_bswap32(x)
-#define rte_cpu_to_be_64(x) rte_bswap64(x)
-
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-#define rte_le_to_cpu_64(x) (x)
-
-#define rte_be_to_cpu_16(x) rte_bswap16(x)
-#define rte_be_to_cpu_32(x) rte_bswap32(x)
-#define rte_be_to_cpu_64(x) rte_bswap64(x)
-
-#else /* RTE_BIG_ENDIAN */
-
-#define rte_cpu_to_le_16(x) rte_bswap16(x)
-#define rte_cpu_to_le_32(x) rte_bswap32(x)
-#define rte_cpu_to_le_64(x) rte_bswap64(x)
-
-#define rte_cpu_to_be_16(x) (x)
-#define rte_cpu_to_be_32(x) (x)
-#define rte_cpu_to_be_64(x) (x)
-
-#define rte_le_to_cpu_16(x) rte_bswap16(x)
-#define rte_le_to_cpu_32(x) rte_bswap32(x)
-#define rte_le_to_cpu_64(x) rte_bswap64(x)
-
-#define rte_be_to_cpu_16(x) (x)
-#define rte_be_to_cpu_32(x) (x)
-#define rte_be_to_cpu_64(x) (x)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_BYTEORDER_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h
deleted file mode 100644 (file)
index 022e7da..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_CPUFLAGS_ARM_H_
-#define _RTE_CPUFLAGS_ARM_H_
-
-#ifdef RTE_ARCH_64
-#include <rte_cpuflags_64.h>
-#else
-#include <rte_cpuflags_32.h>
-#endif
-
-#endif /* _RTE_CPUFLAGS_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h
deleted file mode 100644 (file)
index b5347be..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_CPUFLAGS_ARM32_H_
-#define _RTE_CPUFLAGS_ARM32_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Enumeration of all CPU features supported
- */
-enum rte_cpu_flag_t {
-       RTE_CPUFLAG_SWP = 0,
-       RTE_CPUFLAG_HALF,
-       RTE_CPUFLAG_THUMB,
-       RTE_CPUFLAG_A26BIT,
-       RTE_CPUFLAG_FAST_MULT,
-       RTE_CPUFLAG_FPA,
-       RTE_CPUFLAG_VFP,
-       RTE_CPUFLAG_EDSP,
-       RTE_CPUFLAG_JAVA,
-       RTE_CPUFLAG_IWMMXT,
-       RTE_CPUFLAG_CRUNCH,
-       RTE_CPUFLAG_THUMBEE,
-       RTE_CPUFLAG_NEON,
-       RTE_CPUFLAG_VFPv3,
-       RTE_CPUFLAG_VFPv3D16,
-       RTE_CPUFLAG_TLS,
-       RTE_CPUFLAG_VFPv4,
-       RTE_CPUFLAG_IDIVA,
-       RTE_CPUFLAG_IDIVT,
-       RTE_CPUFLAG_VFPD32,
-       RTE_CPUFLAG_LPAE,
-       RTE_CPUFLAG_EVTSTRM,
-       RTE_CPUFLAG_AES,
-       RTE_CPUFLAG_PMULL,
-       RTE_CPUFLAG_SHA1,
-       RTE_CPUFLAG_SHA2,
-       RTE_CPUFLAG_CRC32,
-       RTE_CPUFLAG_V7L,
-       /* The last item */
-       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
-};
-
-#include "generic/rte_cpuflags.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CPUFLAGS_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_64.h
deleted file mode 100644 (file)
index 95cc014..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- */
-
-#ifndef _RTE_CPUFLAGS_ARM64_H_
-#define _RTE_CPUFLAGS_ARM64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Enumeration of all CPU features supported
- */
-enum rte_cpu_flag_t {
-       RTE_CPUFLAG_FP = 0,
-       RTE_CPUFLAG_NEON,
-       RTE_CPUFLAG_EVTSTRM,
-       RTE_CPUFLAG_AES,
-       RTE_CPUFLAG_PMULL,
-       RTE_CPUFLAG_SHA1,
-       RTE_CPUFLAG_SHA2,
-       RTE_CPUFLAG_CRC32,
-       RTE_CPUFLAG_ATOMICS,
-       RTE_CPUFLAG_AARCH64,
-       /* The last item */
-       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
-};
-
-#include "generic/rte_cpuflags.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CPUFLAGS_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles.h b/lib/librte_eal/common/include/arch/arm/rte_cycles.h
deleted file mode 100644 (file)
index e8ffa89..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_CYCLES_ARM_H_
-#define _RTE_CYCLES_ARM_H_
-
-#ifdef RTE_ARCH_64
-#include <rte_cycles_64.h>
-#else
-#include <rte_cycles_32.h>
-#endif
-
-#endif /* _RTE_CYCLES_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h
deleted file mode 100644 (file)
index 859b097..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_CYCLES_ARM32_H_
-#define _RTE_CYCLES_ARM32_H_
-
-/* ARM v7 does not have suitable source of clock signals. The only clock counter
-   available in the core is 32 bit wide. Therefore it is unsuitable as the
-   counter overlaps every few seconds and probably is not accessible by
-   userspace programs. Therefore we use clock_gettime(CLOCK_MONOTONIC_RAW) to
-   simulate counter running at 1GHz.
-*/
-
-#include <time.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_cycles.h"
-
-/**
- * Read the time base register.
- *
- * @return
- *   The time base for this lcore.
- */
-#ifndef RTE_ARM_EAL_RDTSC_USE_PMU
-
-/**
- * This call is easily portable to any architecture, however,
- * it may require a system call and inprecise for some tasks.
- */
-static inline uint64_t
-__rte_rdtsc_syscall(void)
-{
-       struct timespec val;
-       uint64_t v;
-
-       while (clock_gettime(CLOCK_MONOTONIC_RAW, &val) != 0)
-               /* no body */;
-
-       v  = (uint64_t) val.tv_sec * 1000000000LL;
-       v += (uint64_t) val.tv_nsec;
-       return v;
-}
-#define rte_rdtsc __rte_rdtsc_syscall
-
-#else
-
-/**
- * This function requires to configure the PMCCNTR and enable
- * userspace access to it:
- *
- *      asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r"(1));
- *      asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(29));
- *      asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(0x8000000f));
- *
- * which is possible only from the priviledged mode (kernel space).
- */
-static inline uint64_t
-__rte_rdtsc_pmccntr(void)
-{
-       unsigned tsc;
-       uint64_t final_tsc;
-
-       /* Read PMCCNTR */
-       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(tsc));
-       /* 1 tick = 64 clocks */
-       final_tsc = ((uint64_t)tsc) << 6;
-
-       return (uint64_t)final_tsc;
-}
-#define rte_rdtsc __rte_rdtsc_pmccntr
-
-#endif /* RTE_ARM_EAL_RDTSC_USE_PMU */
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
-       rte_mb();
-       return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CYCLES_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h
deleted file mode 100644 (file)
index da557b6..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- */
-
-#ifndef _RTE_CYCLES_ARM64_H_
-#define _RTE_CYCLES_ARM64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_cycles.h"
-
-/**
- * Read the time base register.
- *
- * @return
- *   The time base for this lcore.
- */
-#ifndef RTE_ARM_EAL_RDTSC_USE_PMU
-/**
- * This call is portable to any ARMv8 architecture, however, typically
- * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks.
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
-       uint64_t tsc;
-
-       asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
-       return tsc;
-}
-#else
-/**
- * This is an alternative method to enable rte_rdtsc() with high resolution
- * PMU cycles counter.The cycle counter runs at cpu frequency and this scheme
- * uses ARMv8 PMU subsystem to get the cycle counter at userspace, However,
- * access to PMU cycle counter from user space is not enabled by default in
- * arm64 linux kernel.
- * It is possible to enable cycle counter at user space access by configuring
- * the PMU from the privileged mode (kernel space).
- *
- * asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)(0 << 31)));
- * asm volatile("msr pmcntenset_el0, %0" :: "r" BIT(31));
- * asm volatile("msr pmuserenr_el0, %0" : : "r"(BIT(0) | BIT(2)));
- * asm volatile("mrs %0, pmcr_el0" : "=r" (val));
- * val |= (BIT(0) | BIT(2));
- * isb();
- * asm volatile("msr pmcr_el0, %0" : : "r" (val));
- *
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
-       uint64_t tsc;
-
-       asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc));
-       return tsc;
-}
-#endif
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
-       asm volatile("isb" : : : "memory");
-       return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CYCLES_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io.h b/lib/librte_eal/common/include/arch/arm/rte_io.h
deleted file mode 100644 (file)
index f4e66e6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Cavium, Inc
- */
-
-#ifndef _RTE_IO_ARM_H_
-#define _RTE_IO_ARM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef RTE_ARCH_64
-#include "rte_io_64.h"
-#else
-#include "generic/rte_io.h"
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_IO_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io_64.h b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
deleted file mode 100644 (file)
index e534624..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Cavium, Inc
- */
-
-#ifndef _RTE_IO_ARM64_H_
-#define _RTE_IO_ARM64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-
-#define RTE_OVERRIDE_IO_H
-
-#include "generic/rte_io.h"
-#include "rte_atomic_64.h"
-
-static __rte_always_inline uint8_t
-rte_read8_relaxed(const volatile void *addr)
-{
-       uint8_t val;
-
-       asm volatile(
-                   "ldrb %w[val], [%x[addr]]"
-                   : [val] "=r" (val)
-                   : [addr] "r" (addr));
-       return val;
-}
-
-static __rte_always_inline uint16_t
-rte_read16_relaxed(const volatile void *addr)
-{
-       uint16_t val;
-
-       asm volatile(
-                   "ldrh %w[val], [%x[addr]]"
-                   : [val] "=r" (val)
-                   : [addr] "r" (addr));
-       return val;
-}
-
-static __rte_always_inline uint32_t
-rte_read32_relaxed(const volatile void *addr)
-{
-       uint32_t val;
-
-       asm volatile(
-                   "ldr %w[val], [%x[addr]]"
-                   : [val] "=r" (val)
-                   : [addr] "r" (addr));
-       return val;
-}
-
-static __rte_always_inline uint64_t
-rte_read64_relaxed(const volatile void *addr)
-{
-       uint64_t val;
-
-       asm volatile(
-                   "ldr %x[val], [%x[addr]]"
-                   : [val] "=r" (val)
-                   : [addr] "r" (addr));
-       return val;
-}
-
-static __rte_always_inline void
-rte_write8_relaxed(uint8_t val, volatile void *addr)
-{
-       asm volatile(
-                   "strb %w[val], [%x[addr]]"
-                   :
-                   : [val] "r" (val), [addr] "r" (addr));
-}
-
-static __rte_always_inline void
-rte_write16_relaxed(uint16_t val, volatile void *addr)
-{
-       asm volatile(
-                   "strh %w[val], [%x[addr]]"
-                   :
-                   : [val] "r" (val), [addr] "r" (addr));
-}
-
-static __rte_always_inline void
-rte_write32_relaxed(uint32_t val, volatile void *addr)
-{
-       asm volatile(
-                   "str %w[val], [%x[addr]]"
-                   :
-                   : [val] "r" (val), [addr] "r" (addr));
-}
-
-static __rte_always_inline void
-rte_write64_relaxed(uint64_t val, volatile void *addr)
-{
-       asm volatile(
-                   "str %x[val], [%x[addr]]"
-                   :
-                   : [val] "r" (val), [addr] "r" (addr));
-}
-
-static __rte_always_inline uint8_t
-rte_read8(const volatile void *addr)
-{
-       uint8_t val;
-       val = rte_read8_relaxed(addr);
-       rte_io_rmb();
-       return val;
-}
-
-static __rte_always_inline uint16_t
-rte_read16(const volatile void *addr)
-{
-       uint16_t val;
-       val = rte_read16_relaxed(addr);
-       rte_io_rmb();
-       return val;
-}
-
-static __rte_always_inline uint32_t
-rte_read32(const volatile void *addr)
-{
-       uint32_t val;
-       val = rte_read32_relaxed(addr);
-       rte_io_rmb();
-       return val;
-}
-
-static __rte_always_inline uint64_t
-rte_read64(const volatile void *addr)
-{
-       uint64_t val;
-       val = rte_read64_relaxed(addr);
-       rte_io_rmb();
-       return val;
-}
-
-static __rte_always_inline void
-rte_write8(uint8_t value, volatile void *addr)
-{
-       rte_io_wmb();
-       rte_write8_relaxed(value, addr);
-}
-
-static __rte_always_inline void
-rte_write16(uint16_t value, volatile void *addr)
-{
-       rte_io_wmb();
-       rte_write16_relaxed(value, addr);
-}
-
-static __rte_always_inline void
-rte_write32(uint32_t value, volatile void *addr)
-{
-       rte_io_wmb();
-       rte_write32_relaxed(value, addr);
-}
-
-static __rte_always_inline void
-rte_write64(uint64_t value, volatile void *addr)
-{
-       rte_io_wmb();
-       rte_write64_relaxed(value, addr);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_IO_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_mcslock.h b/lib/librte_eal/common/include/arch/arm/rte_mcslock.h
deleted file mode 100644 (file)
index dd1fe13..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_MCSLOCK_ARM_H_
-#define _RTE_MCSLOCK_ARM_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_mcslock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MCSLOCK_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy.h
deleted file mode 100644 (file)
index 47dea9a..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_MEMCPY_ARM_H_
-#define _RTE_MEMCPY_ARM_H_
-
-#ifdef RTE_ARCH_64
-#include <rte_memcpy_64.h>
-#else
-#include <rte_memcpy_32.h>
-#endif
-
-#endif /* _RTE_MEMCPY_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
deleted file mode 100644 (file)
index eb02c3b..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_MEMCPY_ARM32_H_
-#define _RTE_MEMCPY_ARM32_H_
-
-#include <stdint.h>
-#include <string.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_memcpy.h"
-
-#ifdef RTE_ARCH_ARM_NEON_MEMCPY
-
-#ifndef RTE_MACHINE_CPUFLAG_NEON
-#error "Cannot optimize memcpy by NEON as the CPU seems to not support this"
-#endif
-
-/* ARM NEON Intrinsics are used to copy data */
-#include <arm_neon.h>
-
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       vst1q_u8(dst, vld1q_u8(src));
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       asm volatile (
-               "vld1.8 {d0-d3}, [%0]\n\t"
-               "vst1.8 {d0-d3}, [%1]\n\t"
-               : "+r" (src), "+r" (dst)
-               : : "memory", "d0", "d1", "d2", "d3");
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
-       asm volatile (
-               "vld1.8 {d0-d3}, [%0]!\n\t"
-               "vld1.8 {d4-d5}, [%0]\n\t"
-               "vst1.8 {d0-d3}, [%1]!\n\t"
-               "vst1.8 {d4-d5}, [%1]\n\t"
-               : "+r" (src), "+r" (dst)
-               :
-               : "memory", "d0", "d1", "d2", "d3", "d4", "d5");
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       asm volatile (
-               "vld1.8 {d0-d3}, [%0]!\n\t"
-               "vld1.8 {d4-d7}, [%0]\n\t"
-               "vst1.8 {d0-d3}, [%1]!\n\t"
-               "vst1.8 {d4-d7}, [%1]\n\t"
-               : "+r" (src), "+r" (dst)
-               :
-               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7");
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       asm volatile ("pld [%0, #64]" : : "r" (src));
-       asm volatile (
-               "vld1.8 {d0-d3},   [%0]!\n\t"
-               "vld1.8 {d4-d7},   [%0]!\n\t"
-               "vld1.8 {d8-d11},  [%0]!\n\t"
-               "vld1.8 {d12-d15}, [%0]\n\t"
-               "vst1.8 {d0-d3},   [%1]!\n\t"
-               "vst1.8 {d4-d7},   [%1]!\n\t"
-               "vst1.8 {d8-d11},  [%1]!\n\t"
-               "vst1.8 {d12-d15}, [%1]\n\t"
-               : "+r" (src), "+r" (dst)
-               :
-               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-               "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       asm volatile ("pld [%0,  #64]" : : "r" (src));
-       asm volatile ("pld [%0, #128]" : : "r" (src));
-       asm volatile ("pld [%0, #192]" : : "r" (src));
-       asm volatile ("pld [%0, #256]" : : "r" (src));
-       asm volatile ("pld [%0, #320]" : : "r" (src));
-       asm volatile ("pld [%0, #384]" : : "r" (src));
-       asm volatile ("pld [%0, #448]" : : "r" (src));
-       asm volatile (
-               "vld1.8 {d0-d3},   [%0]!\n\t"
-               "vld1.8 {d4-d7},   [%0]!\n\t"
-               "vld1.8 {d8-d11},  [%0]!\n\t"
-               "vld1.8 {d12-d15}, [%0]!\n\t"
-               "vld1.8 {d16-d19}, [%0]!\n\t"
-               "vld1.8 {d20-d23}, [%0]!\n\t"
-               "vld1.8 {d24-d27}, [%0]!\n\t"
-               "vld1.8 {d28-d31}, [%0]\n\t"
-               "vst1.8 {d0-d3},   [%1]!\n\t"
-               "vst1.8 {d4-d7},   [%1]!\n\t"
-               "vst1.8 {d8-d11},  [%1]!\n\t"
-               "vst1.8 {d12-d15}, [%1]!\n\t"
-               "vst1.8 {d16-d19}, [%1]!\n\t"
-               "vst1.8 {d20-d23}, [%1]!\n\t"
-               "vst1.8 {d24-d27}, [%1]!\n\t"
-               "vst1.8 {d28-d31}, [%1]!\n\t"
-               : "+r" (src), "+r" (dst)
-               :
-               : "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-               "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
-               "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
-               "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31");
-}
-
-#define rte_memcpy(dst, src, n)              \
-       __extension__ ({                     \
-       (__builtin_constant_p(n)) ?          \
-       memcpy((dst), (src), (n)) :          \
-       rte_memcpy_func((dst), (src), (n)); })
-
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
-       void *ret = dst;
-
-       /* We can't copy < 16 bytes using XMM registers so do it manually. */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dst = *(const uint8_t *)src;
-                       dst = (uint8_t *)dst + 1;
-                       src = (const uint8_t *)src + 1;
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dst = *(const uint16_t *)src;
-                       dst = (uint16_t *)dst + 1;
-                       src = (const uint16_t *)src + 1;
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dst = *(const uint32_t *)src;
-                       dst = (uint32_t *)dst + 1;
-                       src = (const uint32_t *)src + 1;
-               }
-               if (n & 0x08) {
-                       /* ARMv7 can not handle unaligned access to long long
-                        * (uint64_t). Therefore two uint32_t operations are
-                        * used.
-                        */
-                       *(uint32_t *)dst = *(const uint32_t *)src;
-                       dst = (uint32_t *)dst + 1;
-                       src = (const uint32_t *)src + 1;
-                       *(uint32_t *)dst = *(const uint32_t *)src;
-               }
-               return ret;
-       }
-
-       /* Special fast cases for <= 128 bytes */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                       (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov32((uint8_t *)dst - 32 + n,
-                       (const uint8_t *)src - 32 + n);
-               return ret;
-       }
-
-       if (n <= 128) {
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov64((uint8_t *)dst - 64 + n,
-                       (const uint8_t *)src - 64 + n);
-               return ret;
-       }
-
-       /*
-        * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
-        * copies was found to be faster than doing 128 and 32 byte copies as
-        * well.
-        */
-       for ( ; n >= 256; n -= 256) {
-               rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-               dst = (uint8_t *)dst + 256;
-               src = (const uint8_t *)src + 256;
-       }
-
-       /*
-        * We split the remaining bytes (which will be less than 256) into
-        * 64byte (2^6) chunks.
-        * Using incrementing integers in the case labels of a switch statement
-        * encourages the compiler to use a jump table. To get incrementing
-        * integers, we shift the 2 relevant bits to the LSB position to first
-        * get decrementing integers, and then subtract.
-        */
-       switch (3 - (n >> 6)) {
-       case 0x00:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       case 0x01:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       case 0x02:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       default:
-               break;
-       }
-
-       /*
-        * We split the remaining bytes (which will be less than 64) into
-        * 16byte (2^4) chunks, using the same switch structure as above.
-        */
-       switch (3 - (n >> 4)) {
-       case 0x00:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       case 0x01:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       case 0x02:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       default:
-               break;
-       }
-
-       /* Copy any remaining bytes, without going beyond end of buffers */
-       if (n != 0)
-               rte_mov16((uint8_t *)dst - 16 + n,
-                       (const uint8_t *)src - 16 + n);
-       return ret;
-}
-
-#else
-
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 16);
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 32);
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 48);
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 64);
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 128);
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 256);
-}
-
-static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
-{
-       return memcpy(dst, src, n);
-}
-
-#endif /* RTE_ARCH_ARM_NEON_MEMCPY */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
deleted file mode 100644 (file)
index 85ad587..0000000
+++ /dev/null
@@ -1,372 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- */
-
-#ifndef _RTE_MEMCPY_ARM64_H_
-#define _RTE_MEMCPY_ARM64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <string.h>
-
-#include "generic/rte_memcpy.h"
-
-#ifdef RTE_ARCH_ARM64_MEMCPY
-#include <rte_common.h>
-#include <rte_branch_prediction.h>
-
-/*
- * The memory copy performance differs on different AArch64 micro-architectures.
- * And the most recent glibc (e.g. 2.23 or later) can provide a better memcpy()
- * performance compared to old glibc versions. It's always suggested to use a
- * more recent glibc if possible, from which the entire system can get benefit.
- *
- * This implementation improves memory copy on some aarch64 micro-architectures,
- * when an old glibc (e.g. 2.19, 2.17...) is being used. It is disabled by
- * default and needs "RTE_ARCH_ARM64_MEMCPY" defined to activate. It's not
- * always providing better performance than memcpy() so users need to run unit
- * test "memcpy_perf_autotest" and customize parameters in customization section
- * below for best performance.
- *
- * Compiler version will also impact the rte_memcpy() performance. It's observed
- * on some platforms and with the same code, GCC 7.2.0 compiled binaries can
- * provide better performance than GCC 4.8.5 compiled binaries.
- */
-
-/**************************************
- * Beginning of customization section
- **************************************/
-#ifndef RTE_ARM64_MEMCPY_ALIGN_MASK
-#define RTE_ARM64_MEMCPY_ALIGN_MASK ((RTE_CACHE_LINE_SIZE >> 3) - 1)
-#endif
-
-#ifndef RTE_ARM64_MEMCPY_STRICT_ALIGN
-/* Only src unalignment will be treated as unaligned copy */
-#define RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) \
-       ((uintptr_t)(src) & RTE_ARM64_MEMCPY_ALIGN_MASK)
-#else
-/* Both dst and src unalignment will be treated as unaligned copy */
-#define RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) \
-       (((uintptr_t)(dst) | (uintptr_t)(src)) & RTE_ARM64_MEMCPY_ALIGN_MASK)
-#endif
-
-
-/*
- * If copy size is larger than threshold, memcpy() will be used.
- * Run "memcpy_perf_autotest" to determine the proper threshold.
- */
-#ifdef RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD
-#define USE_ALIGNED_RTE_MEMCPY(dst, src, n) \
-(!RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) && \
-n <= (size_t)RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD)
-#else
-#define USE_ALIGNED_RTE_MEMCPY(dst, src, n) \
-(!RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src))
-#endif
-#ifdef RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD
-#define USE_UNALIGNED_RTE_MEMCPY(dst, src, n) \
-(RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src) && \
-n <= (size_t)RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD)
-#else
-#define USE_UNALIGNED_RTE_MEMCPY(dst, src, n) \
-(RTE_ARM64_MEMCPY_IS_UNALIGNED_COPY(dst, src))
-#endif
-/*
- * The logic of USE_RTE_MEMCPY() can also be modified to best fit platform.
- */
-#if defined(RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD) \
-|| defined(RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD)
-#define USE_RTE_MEMCPY(dst, src, n) \
-(USE_ALIGNED_RTE_MEMCPY(dst, src, n) || USE_UNALIGNED_RTE_MEMCPY(dst, src, n))
-#else
-#define USE_RTE_MEMCPY(dst, src, n) (1)
-#endif
-/**************************************
- * End of customization section
- **************************************/
-
-
-#if RTE_CC_IS_GNU && !defined RTE_ARM64_MEMCPY_SKIP_GCC_VER_CHECK
-#if (GCC_VERSION < 50400)
-#warning "The GCC version is quite old, which may result in sub-optimal \
-performance of the compiled code. It is suggested that at least GCC 5.4.0 \
-be used."
-#endif
-#endif
-
-static __rte_always_inline
-void rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       __uint128_t *dst128 = (__uint128_t *)dst;
-       const __uint128_t *src128 = (const __uint128_t *)src;
-       *dst128 = *src128;
-}
-
-static __rte_always_inline
-void rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       __uint128_t *dst128 = (__uint128_t *)dst;
-       const __uint128_t *src128 = (const __uint128_t *)src;
-       const __uint128_t x0 = src128[0], x1 = src128[1];
-       dst128[0] = x0;
-       dst128[1] = x1;
-}
-
-static __rte_always_inline
-void rte_mov48(uint8_t *dst, const uint8_t *src)
-{
-       __uint128_t *dst128 = (__uint128_t *)dst;
-       const __uint128_t *src128 = (const __uint128_t *)src;
-       const __uint128_t x0 = src128[0], x1 = src128[1], x2 = src128[2];
-       dst128[0] = x0;
-       dst128[1] = x1;
-       dst128[2] = x2;
-}
-
-static __rte_always_inline
-void rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       __uint128_t *dst128 = (__uint128_t *)dst;
-       const __uint128_t *src128 = (const __uint128_t *)src;
-       const __uint128_t
-               x0 = src128[0], x1 = src128[1], x2 = src128[2], x3 = src128[3];
-       dst128[0] = x0;
-       dst128[1] = x1;
-       dst128[2] = x2;
-       dst128[3] = x3;
-}
-
-static __rte_always_inline
-void rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       __uint128_t *dst128 = (__uint128_t *)dst;
-       const __uint128_t *src128 = (const __uint128_t *)src;
-       /* Keep below declaration & copy sequence for optimized instructions */
-       const __uint128_t
-               x0 = src128[0], x1 = src128[1], x2 = src128[2], x3 = src128[3];
-       dst128[0] = x0;
-       __uint128_t x4 = src128[4];
-       dst128[1] = x1;
-       __uint128_t x5 = src128[5];
-       dst128[2] = x2;
-       __uint128_t x6 = src128[6];
-       dst128[3] = x3;
-       __uint128_t x7 = src128[7];
-       dst128[4] = x4;
-       dst128[5] = x5;
-       dst128[6] = x6;
-       dst128[7] = x7;
-}
-
-static __rte_always_inline
-void rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov128(dst, src);
-       rte_mov128(dst + 128, src + 128);
-}
-
-static __rte_always_inline void
-rte_memcpy_lt16(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       if (n & 0x08) {
-               /* copy 8 ~ 15 bytes */
-               *(uint64_t *)dst = *(const uint64_t *)src;
-               *(uint64_t *)(dst - 8 + n) = *(const uint64_t *)(src - 8 + n);
-       } else if (n & 0x04) {
-               /* copy 4 ~ 7 bytes */
-               *(uint32_t *)dst = *(const uint32_t *)src;
-               *(uint32_t *)(dst - 4 + n) = *(const uint32_t *)(src - 4 + n);
-       } else if (n & 0x02) {
-               /* copy 2 ~ 3 bytes */
-               *(uint16_t *)dst = *(const uint16_t *)src;
-               *(uint16_t *)(dst - 2 + n) = *(const uint16_t *)(src - 2 + n);
-       } else if (n & 0x01) {
-               /* copy 1 byte */
-               *dst = *src;
-       }
-}
-
-static __rte_always_inline
-void rte_memcpy_ge16_lt128(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       if (n < 64) {
-               if (n == 16) {
-                       rte_mov16(dst, src);
-               } else if (n <= 32) {
-                       rte_mov16(dst, src);
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-               } else if (n <= 48) {
-                       rte_mov32(dst, src);
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-               } else {
-                       rte_mov48(dst, src);
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-               }
-       } else {
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               if (n > 48 + 64)
-                       rte_mov64(dst - 64 + n, src - 64 + n);
-               else if (n > 32 + 64)
-                       rte_mov48(dst - 48 + n, src - 48 + n);
-               else if (n > 16 + 64)
-                       rte_mov32(dst - 32 + n, src - 32 + n);
-               else if (n > 64)
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-       }
-}
-
-static __rte_always_inline
-void rte_memcpy_ge128(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       do {
-               rte_mov128(dst, src);
-               src += 128;
-               dst += 128;
-               n -= 128;
-       } while (likely(n >= 128));
-
-       if (likely(n)) {
-               if (n <= 16)
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-               else if (n <= 32)
-                       rte_mov32(dst - 32 + n, src - 32 + n);
-               else if (n <= 48)
-                       rte_mov48(dst - 48 + n, src - 48 + n);
-               else if (n <= 64)
-                       rte_mov64(dst - 64 + n, src - 64 + n);
-               else
-                       rte_memcpy_ge16_lt128(dst, src, n);
-       }
-}
-
-static __rte_always_inline
-void rte_memcpy_ge16_lt64(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       if (n == 16) {
-               rte_mov16(dst, src);
-       } else if (n <= 32) {
-               rte_mov16(dst, src);
-               rte_mov16(dst - 16 + n, src - 16 + n);
-       } else if (n <= 48) {
-               rte_mov32(dst, src);
-               rte_mov16(dst - 16 + n, src - 16 + n);
-       } else {
-               rte_mov48(dst, src);
-               rte_mov16(dst - 16 + n, src - 16 + n);
-       }
-}
-
-static __rte_always_inline
-void rte_memcpy_ge64(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       do {
-               rte_mov64(dst, src);
-               src += 64;
-               dst += 64;
-               n -= 64;
-       } while (likely(n >= 64));
-
-       if (likely(n)) {
-               if (n <= 16)
-                       rte_mov16(dst - 16 + n, src - 16 + n);
-               else if (n <= 32)
-                       rte_mov32(dst - 32 + n, src - 32 + n);
-               else if (n <= 48)
-                       rte_mov48(dst - 48 + n, src - 48 + n);
-               else
-                       rte_mov64(dst - 64 + n, src - 64 + n);
-       }
-}
-
-#if RTE_CACHE_LINE_SIZE >= 128
-static __rte_always_inline
-void *rte_memcpy(void *dst, const void *src, size_t n)
-{
-       if (n < 16) {
-               rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       }
-       if (n < 128) {
-               rte_memcpy_ge16_lt128((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       }
-       __builtin_prefetch(src, 0, 0);
-       __builtin_prefetch(dst, 1, 0);
-       if (likely(USE_RTE_MEMCPY(dst, src, n))) {
-               rte_memcpy_ge128((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       } else
-               return memcpy(dst, src, n);
-}
-
-#else
-static __rte_always_inline
-void *rte_memcpy(void *dst, const void *src, size_t n)
-{
-       if (n < 16) {
-               rte_memcpy_lt16((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       }
-       if (n < 64) {
-               rte_memcpy_ge16_lt64((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       }
-       __builtin_prefetch(src, 0, 0);
-       __builtin_prefetch(dst, 1, 0);
-       if (likely(USE_RTE_MEMCPY(dst, src, n))) {
-               rte_memcpy_ge64((uint8_t *)dst, (const uint8_t *)src, n);
-               return dst;
-       } else
-               return memcpy(dst, src, n);
-}
-#endif /* RTE_CACHE_LINE_SIZE >= 128 */
-
-#else
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 16);
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 32);
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 48);
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 64);
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 128);
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       memcpy(dst, src, 256);
-}
-
-#define rte_memcpy(d, s, n)    memcpy((d), (s), (n))
-
-#endif /* RTE_ARCH_ARM64_MEMCPY */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_ARM_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause.h b/lib/librte_eal/common/include/arch/arm/rte_pause.h
deleted file mode 100644 (file)
index 6c7002a..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef _RTE_PAUSE_ARM_H_
-#define _RTE_PAUSE_ARM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef RTE_ARCH_64
-#include <rte_pause_64.h>
-#else
-#include <rte_pause_32.h>
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PAUSE_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_32.h b/lib/librte_eal/common/include/arch/arm/rte_pause_32.h
deleted file mode 100644 (file)
index d4768c7..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef _RTE_PAUSE_ARM32_H_
-#define _RTE_PAUSE_ARM32_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_pause.h"
-
-static inline void rte_pause(void)
-{
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PAUSE_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
deleted file mode 100644 (file)
index e87d10b..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_PAUSE_ARM64_H_
-#define _RTE_PAUSE_ARM64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-
-#ifdef RTE_ARM_USE_WFE
-#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
-#endif
-
-#include "generic/rte_pause.h"
-
-static inline void rte_pause(void)
-{
-       asm volatile("yield" ::: "memory");
-}
-
-#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
-
-/* Send an event to quit WFE. */
-#define __SEVL() { asm volatile("sevl" : : : "memory"); }
-
-/* Put processor into low power WFE(Wait For Event) state. */
-#define __WFE() { asm volatile("wfe" : : : "memory"); }
-
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
-               int memorder)
-{
-       uint16_t value;
-
-       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-       /*
-        * Atomic exclusive load from addr, it returns the 16-bit content of
-        * *addr while making it 'monitored',when it is written by someone
-        * else, the 'monitored' state is cleared and a event is generated
-        * implicitly to exit WFE.
-        */
-#define __LOAD_EXC_16(src, dst, memorder) {               \
-       if (memorder == __ATOMIC_RELAXED) {               \
-               asm volatile("ldxrh %w[tmp], [%x[addr]]"  \
-                       : [tmp] "=&r" (dst)               \
-                       : [addr] "r"(src)                 \
-                       : "memory");                      \
-       } else {                                          \
-               asm volatile("ldaxrh %w[tmp], [%x[addr]]" \
-                       : [tmp] "=&r" (dst)               \
-                       : [addr] "r"(src)                 \
-                       : "memory");                      \
-       } }
-
-       __LOAD_EXC_16(addr, value, memorder)
-       if (value != expected) {
-               __SEVL()
-               do {
-                       __WFE()
-                       __LOAD_EXC_16(addr, value, memorder)
-               } while (value != expected);
-       }
-#undef __LOAD_EXC_16
-}
-
-static __rte_always_inline void
-rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
-               int memorder)
-{
-       uint32_t value;
-
-       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-       /*
-        * Atomic exclusive load from addr, it returns the 32-bit content of
-        * *addr while making it 'monitored',when it is written by someone
-        * else, the 'monitored' state is cleared and a event is generated
-        * implicitly to exit WFE.
-        */
-#define __LOAD_EXC_32(src, dst, memorder) {              \
-       if (memorder == __ATOMIC_RELAXED) {              \
-               asm volatile("ldxr %w[tmp], [%x[addr]]"  \
-                       : [tmp] "=&r" (dst)              \
-                       : [addr] "r"(src)                \
-                       : "memory");                     \
-       } else {                                         \
-               asm volatile("ldaxr %w[tmp], [%x[addr]]" \
-                       : [tmp] "=&r" (dst)              \
-                       : [addr] "r"(src)                \
-                       : "memory");                     \
-       } }
-
-       __LOAD_EXC_32(addr, value, memorder)
-       if (value != expected) {
-               __SEVL()
-               do {
-                       __WFE()
-                       __LOAD_EXC_32(addr, value, memorder)
-               } while (value != expected);
-       }
-#undef __LOAD_EXC_32
-}
-
-static __rte_always_inline void
-rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
-               int memorder)
-{
-       uint64_t value;
-
-       assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-       /*
-        * Atomic exclusive load from addr, it returns the 64-bit content of
-        * *addr while making it 'monitored',when it is written by someone
-        * else, the 'monitored' state is cleared and a event is generated
-        * implicitly to exit WFE.
-        */
-#define __LOAD_EXC_64(src, dst, memorder) {              \
-       if (memorder == __ATOMIC_RELAXED) {              \
-               asm volatile("ldxr %x[tmp], [%x[addr]]"  \
-                       : [tmp] "=&r" (dst)              \
-                       : [addr] "r"(src)                \
-                       : "memory");                     \
-       } else {                                         \
-               asm volatile("ldaxr %x[tmp], [%x[addr]]" \
-                       : [tmp] "=&r" (dst)              \
-                       : [addr] "r"(src)                \
-                       : "memory");                     \
-       } }
-
-       __LOAD_EXC_64(addr, value, memorder)
-       if (value != expected) {
-               __SEVL()
-               do {
-                       __WFE()
-                       __LOAD_EXC_64(addr, value, memorder)
-               } while (value != expected);
-       }
-}
-#undef __LOAD_EXC_64
-
-#undef __SEVL
-#undef __WFE
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PAUSE_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch.h
deleted file mode 100644 (file)
index 27870c2..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_PREFETCH_ARM_H_
-#define _RTE_PREFETCH_ARM_H_
-
-#ifdef RTE_ARCH_64
-#include <rte_prefetch_64.h>
-#else
-#include <rte_prefetch_32.h>
-#endif
-
-#endif /* _RTE_PREFETCH_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h
deleted file mode 100644 (file)
index e53420a..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_PREFETCH_ARM32_H_
-#define _RTE_PREFETCH_ARM32_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(const volatile void *p)
-{
-       asm volatile ("pld [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch1(const volatile void *p)
-{
-       asm volatile ("pld [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch2(const volatile void *p)
-{
-       asm volatile ("pld [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch_non_temporal(const volatile void *p)
-{
-       /* non-temporal version not available, fallback to rte_prefetch0 */
-       rte_prefetch0(p);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_ARM32_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch_64.h
deleted file mode 100644 (file)
index fc2b391..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- */
-
-#ifndef _RTE_PREFETCH_ARM_64_H_
-#define _RTE_PREFETCH_ARM_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(const volatile void *p)
-{
-       asm volatile ("PRFM PLDL1KEEP, [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch1(const volatile void *p)
-{
-       asm volatile ("PRFM PLDL2KEEP, [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch2(const volatile void *p)
-{
-       asm volatile ("PRFM PLDL3KEEP, [%0]" : : "r" (p));
-}
-
-static inline void rte_prefetch_non_temporal(const volatile void *p)
-{
-       asm volatile ("PRFM PLDL1STRM, [%0]" : : "r" (p));
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_ARM_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_rwlock.h b/lib/librte_eal/common/include/arch/arm/rte_rwlock.h
deleted file mode 100644 (file)
index 18bb37b..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- */
-/* copied from ppc_64 */
-
-#ifndef _RTE_RWLOCK_ARM_H_
-#define _RTE_RWLOCK_ARM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_rwlock.h"
-
-static inline void
-rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_read_lock(rwl);
-}
-
-static inline void
-rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_read_unlock(rwl);
-}
-
-static inline void
-rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_write_lock(rwl);
-}
-
-static inline void
-rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_write_unlock(rwl);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_RWLOCK_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_spinlock.h b/lib/librte_eal/common/include/arch/arm/rte_spinlock.h
deleted file mode 100644 (file)
index 1a6916b..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- */
-
-#ifndef _RTE_SPINLOCK_ARM_H_
-#define _RTE_SPINLOCK_ARM_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_spinlock.h"
-
-static inline int rte_tm_supported(void)
-{
-       return 0;
-}
-
-static inline void
-rte_spinlock_lock_tm(rte_spinlock_t *sl)
-{
-       rte_spinlock_lock(sl); /* fall-back */
-}
-
-static inline int
-rte_spinlock_trylock_tm(rte_spinlock_t *sl)
-{
-       return rte_spinlock_trylock(sl);
-}
-
-static inline void
-rte_spinlock_unlock_tm(rte_spinlock_t *sl)
-{
-       rte_spinlock_unlock(sl);
-}
-
-static inline void
-rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
-{
-       rte_spinlock_recursive_lock(slr); /* fall-back */
-}
-
-static inline void
-rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
-{
-       rte_spinlock_recursive_unlock(slr);
-}
-
-static inline int
-rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
-{
-       return rte_spinlock_recursive_trylock(slr);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_SPINLOCK_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_ticketlock.h b/lib/librte_eal/common/include/arch/arm/rte_ticketlock.h
deleted file mode 100644 (file)
index e09fbd6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_TICKETLOCK_ARM_H_
-#define _RTE_TICKETLOCK_ARM_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-#  error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_ticketlock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_TICKETLOCK_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_vect.h b/lib/librte_eal/common/include/arch/arm/rte_vect.h
deleted file mode 100644 (file)
index 454ac7e..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Cavium, Inc
- */
-
-#ifndef _RTE_VECT_ARM_H_
-#define _RTE_VECT_ARM_H_
-
-#include <stdint.h>
-#include "generic/rte_vect.h"
-#include "rte_debug.h"
-#include "arm_neon.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef int32x4_t xmm_t;
-
-#define        XMM_SIZE        (sizeof(xmm_t))
-#define        XMM_MASK        (XMM_SIZE - 1)
-
-typedef union rte_xmm {
-       xmm_t    x;
-       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
-       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
-       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
-       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
-       double   pd[XMM_SIZE / sizeof(double)];
-} __attribute__((aligned(16))) rte_xmm_t;
-
-#ifdef RTE_ARCH_ARM
-/* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */
-static __inline uint8x16_t
-vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
-{
-       uint8_t i, pos;
-       rte_xmm_t rte_a, rte_b, rte_ret;
-
-       vst1q_u8(rte_a.u8, a);
-       vst1q_u8(rte_b.u8, b);
-
-       for (i = 0; i < 16; i++) {
-               pos = rte_b.u8[i];
-               if (pos < 16)
-                       rte_ret.u8[i] = rte_a.u8[pos];
-               else
-                       rte_ret.u8[i] = 0;
-       }
-
-       return vld1q_u8(rte_ret.u8);
-}
-
-static inline uint16_t
-vaddvq_u16(uint16x8_t a)
-{
-       uint32x4_t m = vpaddlq_u16(a);
-       uint64x2_t n = vpaddlq_u32(m);
-       uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
-
-       return vget_lane_u32((uint32x2_t)o, 0);
-}
-
-#endif
-
-#if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
-static inline uint32x4_t
-vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
-                uint32x4_t b, const int lane_b)
-{
-       return vsetq_lane_u32(vgetq_lane_u32(b, lane_b), a, lane_a);
-}
-#endif
-
-#if defined(RTE_ARCH_ARM64)
-#if RTE_CC_IS_GNU && (GCC_VERSION < 70000)
-
-#if (GCC_VERSION < 40900)
-typedef uint64_t poly64_t;
-typedef uint64x2_t poly64x2_t;
-typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
-
-static inline uint32x4_t
-vceqzq_u32(uint32x4_t a)
-{
-       return (a == 0);
-}
-#endif
-
-/* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
-static inline uint64x2_t
-vreinterpretq_u64_p128(poly128_t x)
-{
-       return (uint64x2_t)x;
-}
-
-/* NEON intrinsic vreinterpretq_p64_u64() is supported since GCC version 7 */
-static inline poly64x2_t
-vreinterpretq_p64_u64(uint64x2_t x)
-{
-       return (poly64x2_t)x;
-}
-
-/* NEON intrinsic vgetq_lane_p64() is supported since GCC version 7 */
-static inline poly64_t
-vgetq_lane_p64(poly64x2_t x, const int lane)
-{
-       RTE_ASSERT(lane >= 0 && lane <= 1);
-
-       poly64_t *p = (poly64_t *)&x;
-
-       return p[lane];
-}
-#endif
-#endif
-
-/*
- * If (0 <= index <= 15), then call the ASIMD ext instruction on the
- * 128 bit regs v0 and v1 with the appropriate index.
- *
- * Else returns a zero vector.
- */
-static inline uint8x16_t
-vextract(uint8x16_t v0, uint8x16_t v1, const int index)
-{
-       switch (index) {
-       case 0: return vextq_u8(v0, v1, 0);
-       case 1: return vextq_u8(v0, v1, 1);
-       case 2: return vextq_u8(v0, v1, 2);
-       case 3: return vextq_u8(v0, v1, 3);
-       case 4: return vextq_u8(v0, v1, 4);
-       case 5: return vextq_u8(v0, v1, 5);
-       case 6: return vextq_u8(v0, v1, 6);
-       case 7: return vextq_u8(v0, v1, 7);
-       case 8: return vextq_u8(v0, v1, 8);
-       case 9: return vextq_u8(v0, v1, 9);
-       case 10: return vextq_u8(v0, v1, 10);
-       case 11: return vextq_u8(v0, v1, 11);
-       case 12: return vextq_u8(v0, v1, 12);
-       case 13: return vextq_u8(v0, v1, 13);
-       case 14: return vextq_u8(v0, v1, 14);
-       case 15: return vextq_u8(v0, v1, 15);
-       }
-       return vdupq_n_u8(0);
-}
-
-/**
- * Shifts right 128 bit register by specified number of bytes
- *
- * Value of shift parameter must be in range 0 - 16
- */
-static inline uint64x2_t
-vshift_bytes_right(uint64x2_t reg, const unsigned int shift)
-{
-       return vreinterpretq_u64_u8(vextract(
-                               vreinterpretq_u8_u64(reg),
-                               vdupq_n_u8(0),
-                               shift));
-}
-
-/**
- * Shifts left 128 bit register by specified number of bytes
- *
- * Value of shift parameter must be in range 0 - 16
- */
-static inline uint64x2_t
-vshift_bytes_left(uint64x2_t reg, const unsigned int shift)
-{
-       return vreinterpretq_u64_u8(vextract(
-                               vdupq_n_u8(0),
-                               vreinterpretq_u8_u64(reg),
-                               16 - shift));
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/lib/librte_eal/common/include/arch/ppc b/lib/librte_eal/common/include/arch/ppc
deleted file mode 120000 (symlink)
index 2db6a0c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-ppc_64
\ No newline at end of file
diff --git a/lib/librte_eal/common/include/arch/ppc_64/meson.build b/lib/librte_eal/common/include/arch/ppc_64/meson.build
deleted file mode 100644 (file)
index 00f9611..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
-
-install_headers(
-       'rte_atomic.h',
-       'rte_byteorder.h',
-       'rte_cpuflags.h',
-       'rte_cycles.h',
-       'rte_io.h',
-       'rte_memcpy.h',
-       'rte_pause.h',
-       'rte_prefetch.h',
-       'rte_rwlock.h',
-       'rte_spinlock.h',
-       'rte_vect.h',
-       subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
deleted file mode 100644 (file)
index 7e3e131..0000000
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
- * Copyright (c) 2008 Marcel Moolenaar
- * Copyright (c) 2001 Benno Rice
- * Copyright (c) 2001 David E. O'Brien
- * Copyright (c) 1998 Doug Rabson
- * All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_PPC_64_H_
-#define _RTE_ATOMIC_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include "generic/rte_atomic.h"
-
-#define        rte_mb()  asm volatile("sync" : : : "memory")
-
-#define        rte_wmb() asm volatile("sync" : : : "memory")
-
-#define        rte_rmb() asm volatile("sync" : : : "memory")
-
-#define rte_smp_mb() rte_mb()
-
-#define rte_smp_wmb() rte_wmb()
-
-#define rte_smp_rmb() rte_rmb()
-
-#define rte_io_mb() rte_mb()
-
-#define rte_io_wmb() rte_wmb()
-
-#define rte_io_rmb() rte_rmb()
-
-#define rte_cio_wmb() rte_wmb()
-
-#define rte_cio_rmb() rte_rmb()
-
-/*------------------------- 16 bit atomic operations -------------------------*/
-/* To be compatible with Power7, use GCC built-in functions for 16 bit
- * operations */
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline int
-rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
-{
-       return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
-               __ATOMIC_ACQUIRE) ? 1 : 0;
-}
-
-static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
-{
-       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic16_inc(rte_atomic16_t *v)
-{
-       __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
-}
-
-static inline void
-rte_atomic16_dec(rte_atomic16_t *v)
-{
-       __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
-}
-
-static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
-{
-       return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
-}
-
-static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
-{
-       return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
-}
-
-static inline uint16_t
-rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
-{
-       return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
-}
-
-/*------------------------- 32 bit atomic operations -------------------------*/
-
-static inline int
-rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
-{
-       unsigned int ret = 0;
-
-       asm volatile(
-                       "\tlwsync\n"
-                       "1:\tlwarx %[ret], 0, %[dst]\n"
-                       "cmplw %[exp], %[ret]\n"
-                       "bne 2f\n"
-                       "stwcx. %[src], 0, %[dst]\n"
-                       "bne- 1b\n"
-                       "li %[ret], 1\n"
-                       "b 3f\n"
-                       "2:\n"
-                       "stwcx. %[ret], 0, %[dst]\n"
-                       "li %[ret], 0\n"
-                       "3:\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret), "=m" (*dst)
-                       : [dst] "r" (dst),
-                         [exp] "r" (exp),
-                         [src] "r" (src),
-                         "m" (*dst)
-                       : "cc", "memory");
-
-       return ret;
-}
-
-static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
-{
-       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic32_inc(rte_atomic32_t *v)
-{
-       int t;
-
-       asm volatile(
-                       "1: lwarx %[t],0,%[cnt]\n"
-                       "addic %[t],%[t],1\n"
-                       "stwcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "=m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
-                       : "cc", "xer", "memory");
-}
-
-static inline void
-rte_atomic32_dec(rte_atomic32_t *v)
-{
-       int t;
-
-       asm volatile(
-                       "1: lwarx %[t],0,%[cnt]\n"
-                       "addic %[t],%[t],-1\n"
-                       "stwcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "=m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
-                       : "cc", "xer", "memory");
-}
-
-static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
-{
-       int ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: lwarx %[ret],0,%[cnt]\n"
-                       "addic  %[ret],%[ret],1\n"
-                       "stwcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [cnt] "r" (&v->cnt)
-                       : "cc", "xer", "memory");
-
-       return ret == 0;
-}
-
-static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
-{
-       int ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: lwarx %[ret],0,%[cnt]\n"
-                       "addic %[ret],%[ret],-1\n"
-                       "stwcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [cnt] "r" (&v->cnt)
-                       : "cc", "xer", "memory");
-
-       return ret == 0;
-}
-
-static inline uint32_t
-rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
-{
-       return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
-}
-
-/*------------------------- 64 bit atomic operations -------------------------*/
-
-static inline int
-rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
-{
-       unsigned int ret = 0;
-
-       asm volatile (
-                       "\tlwsync\n"
-                       "1: ldarx %[ret], 0, %[dst]\n"
-                       "cmpld %[exp], %[ret]\n"
-                       "bne 2f\n"
-                       "stdcx. %[src], 0, %[dst]\n"
-                       "bne- 1b\n"
-                       "li %[ret], 1\n"
-                       "b 3f\n"
-                       "2:\n"
-                       "stdcx. %[ret], 0, %[dst]\n"
-                       "li %[ret], 0\n"
-                       "3:\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret), "=m" (*dst)
-                       : [dst] "r" (dst),
-                         [exp] "r" (exp),
-                         [src] "r" (src),
-                         "m" (*dst)
-                       : "cc", "memory");
-       return ret;
-}
-
-static inline void
-rte_atomic64_init(rte_atomic64_t *v)
-{
-       v->cnt = 0;
-}
-
-static inline int64_t
-rte_atomic64_read(rte_atomic64_t *v)
-{
-       long ret;
-
-       asm volatile("ld%U1%X1 %[ret],%[cnt]"
-               : [ret] "=r"(ret)
-               : [cnt] "m"(v->cnt));
-
-       return ret;
-}
-
-static inline void
-rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
-{
-       asm volatile("std%U0%X0 %[new_value],%[cnt]"
-               : [cnt] "=m"(v->cnt)
-               : [new_value] "r"(new_value));
-}
-
-static inline void
-rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
-{
-       long t;
-
-       asm volatile(
-                       "1: ldarx %[t],0,%[cnt]\n"
-                       "add %[t],%[inc],%[t]\n"
-                       "stdcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "=m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
-                       : "cc", "memory");
-}
-
-static inline void
-rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
-{
-       long t;
-
-       asm volatile(
-                       "1: ldarx %[t],0,%[cnt]\n"
-                       "subf %[t],%[dec],%[t]\n"
-                       "stdcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "+m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
-                       : "cc", "memory");
-}
-
-static inline void
-rte_atomic64_inc(rte_atomic64_t *v)
-{
-       long t;
-
-       asm volatile(
-                       "1: ldarx %[t],0,%[cnt]\n"
-                       "addic %[t],%[t],1\n"
-                       "stdcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "+m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
-                       : "cc", "xer", "memory");
-}
-
-static inline void
-rte_atomic64_dec(rte_atomic64_t *v)
-{
-       long t;
-
-       asm volatile(
-                       "1: ldarx %[t],0,%[cnt]\n"
-                       "addic %[t],%[t],-1\n"
-                       "stdcx. %[t],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       : [t] "=&r" (t), "+m" (v->cnt)
-                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
-                       : "cc", "xer", "memory");
-}
-
-static inline int64_t
-rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
-{
-       long ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: ldarx %[ret],0,%[cnt]\n"
-                       "add %[ret],%[inc],%[ret]\n"
-                       "stdcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [inc] "r" (inc), [cnt] "r" (&v->cnt)
-                       : "cc", "memory");
-
-       return ret;
-}
-
-static inline int64_t
-rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
-{
-       long ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: ldarx %[ret],0,%[cnt]\n"
-                       "subf %[ret],%[dec],%[ret]\n"
-                       "stdcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [dec] "r" (dec), [cnt] "r" (&v->cnt)
-                       : "cc", "memory");
-
-       return ret;
-}
-
-static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
-{
-       long ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: ldarx %[ret],0,%[cnt]\n"
-                       "addic %[ret],%[ret],1\n"
-                       "stdcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [cnt] "r" (&v->cnt)
-                       : "cc", "xer", "memory");
-
-       return ret == 0;
-}
-
-static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
-{
-       long ret;
-
-       asm volatile(
-                       "\n\tlwsync\n"
-                       "1: ldarx %[ret],0,%[cnt]\n"
-                       "addic %[ret],%[ret],-1\n"
-                       "stdcx. %[ret],0,%[cnt]\n"
-                       "bne- 1b\n"
-                       "isync\n"
-                       : [ret] "=&r" (ret)
-                       : [cnt] "r" (&v->cnt)
-                       : "cc", "xer", "memory");
-
-       return ret == 0;
-}
-
-static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
-{
-       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
-}
-/**
- * Atomically set a 64-bit counter to 0.
- *
- * @param v
- *   A pointer to the atomic counter.
- */
-static inline void rte_atomic64_clear(rte_atomic64_t *v)
-{
-       v->cnt = 0;
-}
-
-static inline uint64_t
-rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
-{
-       return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ATOMIC_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_byteorder.h b/lib/librte_eal/common/include/arch/ppc_64/rte_byteorder.h
deleted file mode 100644 (file)
index bfdded4..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Inspired from FreeBSD src/sys/powerpc/include/endian.h
- * Copyright (c) 1987, 1991, 1993
- * The Regents of the University of California.  All rights reserved.
- */
-
-#ifndef _RTE_BYTEORDER_PPC_64_H_
-#define _RTE_BYTEORDER_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include "generic/rte_byteorder.h"
-
-/*
- * An architecture-optimized byte swap for a 16-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap16().
- */
-static inline uint16_t rte_arch_bswap16(uint16_t _x)
-{
-       return (_x >> 8) | ((_x << 8) & 0xff00);
-}
-
-/*
- * An architecture-optimized byte swap for a 32-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap32().
- */
-static inline uint32_t rte_arch_bswap32(uint32_t _x)
-{
-       return (_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
-               ((_x << 24) & 0xff000000);
-}
-
-/*
- * An architecture-optimized byte swap for a 64-bit value.
- *
-  * Do not use this function directly. The preferred function is rte_bswap64().
- */
-/* 64-bit mode */
-static inline uint64_t rte_arch_bswap64(uint64_t _x)
-{
-       return (_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
-               ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
-               ((_x << 24) & (0xffULL << 40)) |
-               ((_x << 40) & (0xffULL << 48)) | ((_x << 56));
-}
-
-#ifndef RTE_FORCE_INTRINSICS
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap16(x) :            \
-                                  rte_arch_bswap16(x)))
-
-#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap32(x) :            \
-                                  rte_arch_bswap32(x)))
-
-#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap64(x) :            \
-                                  rte_arch_bswap64(x)))
-#else
-/*
- * __builtin_bswap16 is only available gcc 4.8 and upwards
- */
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap16(x) :            \
-                                  rte_arch_bswap16(x)))
-#endif
-#endif
-
-/* Power 8 have both little endian and big endian mode
- * Power 7 only support big endian
- */
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-
-#define rte_cpu_to_le_16(x) (x)
-#define rte_cpu_to_le_32(x) (x)
-#define rte_cpu_to_le_64(x) (x)
-
-#define rte_cpu_to_be_16(x) rte_bswap16(x)
-#define rte_cpu_to_be_32(x) rte_bswap32(x)
-#define rte_cpu_to_be_64(x) rte_bswap64(x)
-
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-#define rte_le_to_cpu_64(x) (x)
-
-#define rte_be_to_cpu_16(x) rte_bswap16(x)
-#define rte_be_to_cpu_32(x) rte_bswap32(x)
-#define rte_be_to_cpu_64(x) rte_bswap64(x)
-
-#else /* RTE_BIG_ENDIAN */
-
-#define rte_cpu_to_le_16(x) rte_bswap16(x)
-#define rte_cpu_to_le_32(x) rte_bswap32(x)
-#define rte_cpu_to_le_64(x) rte_bswap64(x)
-
-#define rte_cpu_to_be_16(x) (x)
-#define rte_cpu_to_be_32(x) (x)
-#define rte_cpu_to_be_64(x) (x)
-
-#define rte_le_to_cpu_16(x) rte_bswap16(x)
-#define rte_le_to_cpu_32(x) rte_bswap32(x)
-#define rte_le_to_cpu_64(x) rte_bswap64(x)
-
-#define rte_be_to_cpu_16(x) (x)
-#define rte_be_to_cpu_32(x) (x)
-#define rte_be_to_cpu_64(x) (x)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_BYTEORDER_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h b/lib/librte_eal/common/include/arch/ppc_64/rte_cpuflags.h
deleted file mode 100644 (file)
index a88355d..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2014.
- */
-
-#ifndef _RTE_CPUFLAGS_PPC_64_H_
-#define _RTE_CPUFLAGS_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Enumeration of all CPU features supported
- */
-enum rte_cpu_flag_t {
-       RTE_CPUFLAG_PPC_LE = 0,
-       RTE_CPUFLAG_TRUE_LE,
-       RTE_CPUFLAG_PSERIES_PERFMON_COMPAT,
-       RTE_CPUFLAG_VSX,
-       RTE_CPUFLAG_ARCH_2_06,
-       RTE_CPUFLAG_POWER6_EXT,
-       RTE_CPUFLAG_DFP,
-       RTE_CPUFLAG_PA6T,
-       RTE_CPUFLAG_ARCH_2_05,
-       RTE_CPUFLAG_ICACHE_SNOOP,
-       RTE_CPUFLAG_SMT,
-       RTE_CPUFLAG_BOOKE,
-       RTE_CPUFLAG_CELLBE,
-       RTE_CPUFLAG_POWER5_PLUS,
-       RTE_CPUFLAG_POWER5,
-       RTE_CPUFLAG_POWER4,
-       RTE_CPUFLAG_NOTB,
-       RTE_CPUFLAG_EFP_DOUBLE,
-       RTE_CPUFLAG_EFP_SINGLE,
-       RTE_CPUFLAG_SPE,
-       RTE_CPUFLAG_UNIFIED_CACHE,
-       RTE_CPUFLAG_4xxMAC,
-       RTE_CPUFLAG_MMU,
-       RTE_CPUFLAG_FPU,
-       RTE_CPUFLAG_ALTIVEC,
-       RTE_CPUFLAG_PPC601,
-       RTE_CPUFLAG_PPC64,
-       RTE_CPUFLAG_PPC32,
-       RTE_CPUFLAG_TAR,
-       RTE_CPUFLAG_LSEL,
-       RTE_CPUFLAG_EBB,
-       RTE_CPUFLAG_DSCR,
-       RTE_CPUFLAG_HTM,
-       RTE_CPUFLAG_ARCH_2_07,
-       /* The last item */
-       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
-};
-
-#include "generic/rte_cpuflags.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CPUFLAGS_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_cycles.h b/lib/librte_eal/common/include/arch/ppc_64/rte_cycles.h
deleted file mode 100644 (file)
index 8f2e986..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2014.
- */
-
-#ifndef _RTE_CYCLES_PPC_64_H_
-#define _RTE_CYCLES_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_cycles.h"
-
-#include <rte_byteorder.h>
-#include <rte_common.h>
-
-/**
- * Read the time base register.
- *
- * @return
- *   The time base for this lcore.
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
-       union {
-               uint64_t tsc_64;
-               RTE_STD_C11
-               struct {
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
-                       uint32_t hi_32;
-                       uint32_t lo_32;
-#else
-                       uint32_t lo_32;
-                       uint32_t hi_32;
-#endif
-               };
-       } tsc;
-       uint32_t tmp;
-
-       asm volatile(
-                       "0:\n"
-                       "mftbu   %[hi32]\n"
-                       "mftb    %[lo32]\n"
-                       "mftbu   %[tmp]\n"
-                       "cmpw    %[tmp],%[hi32]\n"
-                       "bne     0b\n"
-                       : [hi32] "=r"(tsc.hi_32), [lo32] "=r"(tsc.lo_32),
-                       [tmp] "=r"(tmp)
-                   );
-       return tsc.tsc_64;
-}
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
-       rte_mb();
-       return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CYCLES_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_io.h b/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
deleted file mode 100644 (file)
index 0145506..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Cavium, Inc
- */
-
-#ifndef _RTE_IO_PPC_64_H_
-#define _RTE_IO_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_io.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_IO_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_mcslock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_mcslock.h
deleted file mode 100644 (file)
index c58a6ed..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_MCSLOCK_PPC_64_H_
-#define _RTE_MCSLOCK_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_mcslock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MCSLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h
deleted file mode 100644 (file)
index 25311ba..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2014.
- */
-
-#ifndef _RTE_MEMCPY_PPC_64_H_
-#define _RTE_MEMCPY_PPC_64_H_
-
-#include <stdint.h>
-#include <string.h>
-/*To include altivec.h, GCC version must  >= 4.8 */
-#include <altivec.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_memcpy.h"
-
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-}
-
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
-}
-
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
-       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
-       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
-}
-
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
-       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
-       vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
-}
-
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
-       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
-       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
-       vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
-       vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
-       vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
-       vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
-       vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov128(dst, src);
-       rte_mov128(dst + 128, src + 128);
-}
-
-#define rte_memcpy(dst, src, n)              \
-       __extension__ ({                     \
-       (__builtin_constant_p(n)) ?          \
-       memcpy((dst), (src), (n)) :          \
-       rte_memcpy_func((dst), (src), (n)); })
-
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
-       void *ret = dst;
-
-       /* We can't copy < 16 bytes using XMM registers so do it manually. */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dst = *(const uint8_t *)src;
-                       dst = (uint8_t *)dst + 1;
-                       src = (const uint8_t *)src + 1;
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dst = *(const uint16_t *)src;
-                       dst = (uint16_t *)dst + 1;
-                       src = (const uint16_t *)src + 1;
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dst = *(const uint32_t *)src;
-                       dst = (uint32_t *)dst + 1;
-                       src = (const uint32_t *)src + 1;
-               }
-               if (n & 0x08)
-                       *(uint64_t *)dst = *(const uint64_t *)src;
-               return ret;
-       }
-
-       /* Special fast cases for <= 128 bytes */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                       (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov32((uint8_t *)dst - 32 + n,
-                       (const uint8_t *)src - 32 + n);
-               return ret;
-       }
-
-       if (n <= 128) {
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov64((uint8_t *)dst - 64 + n,
-                       (const uint8_t *)src - 64 + n);
-               return ret;
-       }
-
-       /*
-        * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
-        * copies was found to be faster than doing 128 and 32 byte copies as
-        * well.
-        */
-       for ( ; n >= 256; n -= 256) {
-               rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-               dst = (uint8_t *)dst + 256;
-               src = (const uint8_t *)src + 256;
-       }
-
-       /*
-        * We split the remaining bytes (which will be less than 256) into
-        * 64byte (2^6) chunks.
-        * Using incrementing integers in the case labels of a switch statement
-        * encourages the compiler to use a jump table. To get incrementing
-        * integers, we shift the 2 relevant bits to the LSB position to first
-        * get decrementing integers, and then subtract.
-        */
-       switch (3 - (n >> 6)) {
-       case 0x00:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       case 0x01:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       case 0x02:
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               n -= 64;
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;      /* fallthrough */
-       default:
-               ;
-       }
-
-       /*
-        * We split the remaining bytes (which will be less than 64) into
-        * 16byte (2^4) chunks, using the same switch structure as above.
-        */
-       switch (3 - (n >> 4)) {
-       case 0x00:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       case 0x01:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       case 0x02:
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               n -= 16;
-               dst = (uint8_t *)dst + 16;
-               src = (const uint8_t *)src + 16;      /* fallthrough */
-       default:
-               ;
-       }
-
-       /* Copy any remaining bytes, without going beyond end of buffers */
-       if (n != 0)
-               rte_mov16((uint8_t *)dst - 16 + n,
-                       (const uint8_t *)src - 16 + n);
-       return ret;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_pause.h b/lib/librte_eal/common/include/arch/ppc_64/rte_pause.h
deleted file mode 100644 (file)
index 16e47ce..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef _RTE_PAUSE_PPC64_H_
-#define _RTE_PAUSE_PPC64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "rte_atomic.h"
-
-#include "generic/rte_pause.h"
-
-static inline void rte_pause(void)
-{
-       /* Set hardware multi-threading low priority */
-       asm volatile("or 1,1,1");
-       /* Set hardware multi-threading medium priority */
-       asm volatile("or 2,2,2");
-       rte_compiler_barrier();
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PAUSE_PPC64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_prefetch.h b/lib/librte_eal/common/include/arch/ppc_64/rte_prefetch.h
deleted file mode 100644 (file)
index 9ba07c8..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2014.
- */
-
-#ifndef _RTE_PREFETCH_PPC_64_H_
-#define _RTE_PREFETCH_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(const volatile void *p)
-{
-       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
-}
-
-static inline void rte_prefetch1(const volatile void *p)
-{
-       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
-}
-
-static inline void rte_prefetch2(const volatile void *p)
-{
-       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
-}
-
-static inline void rte_prefetch_non_temporal(const volatile void *p)
-{
-       /* non-temporal version not available, fallback to rte_prefetch0 */
-       rte_prefetch0(p);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h
deleted file mode 100644 (file)
index 9fadc04..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- */
-#ifndef _RTE_RWLOCK_PPC_64_H_
-#define _RTE_RWLOCK_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_rwlock.h"
-
-static inline void
-rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_read_lock(rwl);
-}
-
-static inline void
-rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_read_unlock(rwl);
-}
-
-static inline void
-rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_write_lock(rwl);
-}
-
-static inline void
-rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
-{
-       rte_rwlock_write_unlock(rwl);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_RWLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_spinlock.h
deleted file mode 100644 (file)
index 149ec24..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2014.
- */
-
-#ifndef _RTE_SPINLOCK_PPC_64_H_
-#define _RTE_SPINLOCK_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include <rte_pause.h>
-#include "generic/rte_spinlock.h"
-
-/* Fixme: Use intrinsics to implement the spinlock on Power architecture */
-
-#ifndef RTE_FORCE_INTRINSICS
-
-static inline void
-rte_spinlock_lock(rte_spinlock_t *sl)
-{
-       while (__sync_lock_test_and_set(&sl->locked, 1))
-               while (sl->locked)
-                       rte_pause();
-}
-
-static inline void
-rte_spinlock_unlock(rte_spinlock_t *sl)
-{
-       __sync_lock_release(&sl->locked);
-}
-
-static inline int
-rte_spinlock_trylock(rte_spinlock_t *sl)
-{
-       return __sync_lock_test_and_set(&sl->locked, 1) == 0;
-}
-
-#endif
-
-static inline int rte_tm_supported(void)
-{
-       return 0;
-}
-
-static inline void
-rte_spinlock_lock_tm(rte_spinlock_t *sl)
-{
-       rte_spinlock_lock(sl); /* fall-back */
-}
-
-static inline int
-rte_spinlock_trylock_tm(rte_spinlock_t *sl)
-{
-       return rte_spinlock_trylock(sl);
-}
-
-static inline void
-rte_spinlock_unlock_tm(rte_spinlock_t *sl)
-{
-       rte_spinlock_unlock(sl);
-}
-
-static inline void
-rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
-{
-       rte_spinlock_recursive_lock(slr); /* fall-back */
-}
-
-static inline void
-rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
-{
-       rte_spinlock_recursive_unlock(slr);
-}
-
-static inline int
-rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
-{
-       return rte_spinlock_recursive_trylock(slr);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_SPINLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_ticketlock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_ticketlock.h
deleted file mode 100644 (file)
index c175e9e..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_TICKETLOCK_PPC_64_H_
-#define _RTE_TICKETLOCK_PPC_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_ticketlock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_TICKETLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h b/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
deleted file mode 100644 (file)
index 068c805..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) IBM Corporation 2016.
- */
-
-#ifndef _RTE_VECT_PPC_64_H_
-#define _RTE_VECT_PPC_64_H_
-
-#include <altivec.h>
-#include "generic/rte_vect.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef vector signed int xmm_t;
-
-#define        XMM_SIZE        (sizeof(xmm_t))
-#define        XMM_MASK        (XMM_SIZE - 1)
-
-typedef union rte_xmm {
-       xmm_t    x;
-       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
-       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
-       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
-       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
-       double   pd[XMM_SIZE / sizeof(double)];
-} __attribute__((aligned(16))) rte_xmm_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_VECT_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/meson.build b/lib/librte_eal/common/include/arch/x86/meson.build
deleted file mode 100644 (file)
index bc8ffea..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
-
-install_headers(
-       'rte_atomic_32.h',
-       'rte_atomic_64.h',
-       'rte_atomic.h',
-       'rte_byteorder_32.h',
-       'rte_byteorder_64.h',
-       'rte_byteorder.h',
-       'rte_cpuflags.h',
-       'rte_cycles.h',
-       'rte_io.h',
-       'rte_memcpy.h',
-       'rte_prefetch.h',
-       'rte_pause.h',
-       'rte_rtm.h',
-       'rte_rwlock.h',
-       'rte_spinlock.h',
-       'rte_vect.h',
-       subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
deleted file mode 100644 (file)
index 148398f..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_ATOMIC_X86_H_
-#define _RTE_ATOMIC_X86_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <rte_common.h>
-#include <rte_config.h>
-#include <emmintrin.h>
-#include "generic/rte_atomic.h"
-
-#if RTE_MAX_LCORE == 1
-#define MPLOCKED                        /**< No need to insert MP lock prefix. */
-#else
-#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
-#endif
-
-#define        rte_mb() _mm_mfence()
-
-#define        rte_wmb() _mm_sfence()
-
-#define        rte_rmb() _mm_lfence()
-
-#define rte_smp_wmb() rte_compiler_barrier()
-
-#define rte_smp_rmb() rte_compiler_barrier()
-
-/*
- * From Intel Software Development Manual; Vol 3;
- * 8.2.2 Memory Ordering in P6 and More Recent Processor Families:
- * ...
- * . Reads are not reordered with other reads.
- * . Writes are not reordered with older reads.
- * . Writes to memory are not reordered with other writes,
- *   with the following exceptions:
- *   . streaming stores (writes) executed with the non-temporal move
- *     instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and
- *   . string operations (see Section 8.2.4.1).
- *  ...
- * . Reads may be reordered with older writes to different locations but not
- * with older writes to the same location.
- * . Reads or writes cannot be reordered with I/O instructions,
- * locked instructions, or serializing instructions.
- * . Reads cannot pass earlier LFENCE and MFENCE instructions.
- * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions.
- * . LFENCE instructions cannot pass earlier reads.
- * . SFENCE instructions cannot pass earlier writes ...
- * . MFENCE instructions cannot pass earlier reads, writes ...
- *
- * As pointed by Java guys, that makes possible to use lock-prefixed
- * instructions to get the same effect as mfence and on most modern HW
- * that gives a better perfomance then using mfence:
- * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
- * Basic idea is to use lock prefixed add with some dummy memory location
- * as the destination. From their experiments 128B(2 cache lines) below
- * current stack pointer looks like a good candidate.
- * So below we use that techinque for rte_smp_mb() implementation.
- */
-
-static __rte_always_inline void
-rte_smp_mb(void)
-{
-#ifdef RTE_ARCH_I686
-       asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
-#else
-       asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
-#endif
-}
-
-#define rte_io_mb() rte_mb()
-
-#define rte_io_wmb() rte_compiler_barrier()
-
-#define rte_io_rmb() rte_compiler_barrier()
-
-#define rte_cio_wmb() rte_compiler_barrier()
-
-#define rte_cio_rmb() rte_compiler_barrier()
-
-/*------------------------- 16 bit atomic operations -------------------------*/
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline int
-rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
-{
-       uint8_t res;
-
-       asm volatile(
-                       MPLOCKED
-                       "cmpxchgw %[src], %[dst];"
-                       "sete %[res];"
-                       : [res] "=a" (res),     /* output */
-                         [dst] "=m" (*dst)
-                       : [src] "r" (src),      /* input */
-                         "a" (exp),
-                         "m" (*dst)
-                       : "memory");            /* no-clobber list */
-       return res;
-}
-
-static inline uint16_t
-rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
-{
-       asm volatile(
-                       MPLOCKED
-                       "xchgw %0, %1;"
-                       : "=r" (val), "=m" (*dst)
-                       : "0" (val),  "m" (*dst)
-                       : "memory");         /* no-clobber list */
-       return val;
-}
-
-static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
-{
-       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic16_inc(rte_atomic16_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "incw %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline void
-rte_atomic16_dec(rte_atomic16_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "decw %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "incw %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return ret != 0;
-}
-
-static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(MPLOCKED
-                       "decw %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return ret != 0;
-}
-
-/*------------------------- 32 bit atomic operations -------------------------*/
-
-static inline int
-rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
-{
-       uint8_t res;
-
-       asm volatile(
-                       MPLOCKED
-                       "cmpxchgl %[src], %[dst];"
-                       "sete %[res];"
-                       : [res] "=a" (res),     /* output */
-                         [dst] "=m" (*dst)
-                       : [src] "r" (src),      /* input */
-                         "a" (exp),
-                         "m" (*dst)
-                       : "memory");            /* no-clobber list */
-       return res;
-}
-
-static inline uint32_t
-rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
-{
-       asm volatile(
-                       MPLOCKED
-                       "xchgl %0, %1;"
-                       : "=r" (val), "=m" (*dst)
-                       : "0" (val),  "m" (*dst)
-                       : "memory");         /* no-clobber list */
-       return val;
-}
-
-static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
-{
-       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
-}
-
-static inline void
-rte_atomic32_inc(rte_atomic32_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "incl %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline void
-rte_atomic32_dec(rte_atomic32_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "decl %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "incl %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return ret != 0;
-}
-
-static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(MPLOCKED
-                       "decl %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return ret != 0;
-}
-#endif
-
-#ifdef RTE_ARCH_I686
-#include "rte_atomic_32.h"
-#else
-#include "rte_atomic_64.h"
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ATOMIC_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
deleted file mode 100644 (file)
index f63b7fa..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-/*
- * Inspired from FreeBSD src/sys/i386/include/atomic.h
- * Copyright (c) 1998 Doug Rabson
- * All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_X86_H_
-#error do not include this file directly, use <rte_atomic.h> instead
-#endif
-
-#ifndef _RTE_ATOMIC_I686_H_
-#define _RTE_ATOMIC_I686_H_
-
-#include <stdint.h>
-#include <rte_common.h>
-#include <rte_atomic.h>
-
-/*------------------------- 64 bit atomic operations -------------------------*/
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline int
-rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
-{
-       uint8_t res;
-       RTE_STD_C11
-       union {
-               struct {
-                       uint32_t l32;
-                       uint32_t h32;
-               };
-               uint64_t u64;
-       } _exp, _src;
-
-       _exp.u64 = exp;
-       _src.u64 = src;
-
-#ifndef __PIC__
-    asm volatile (
-            MPLOCKED
-            "cmpxchg8b (%[dst]);"
-            "setz %[res];"
-            : [res] "=a" (res)      /* result in eax */
-            : [dst] "S" (dst),      /* esi */
-             "b" (_src.l32),       /* ebx */
-             "c" (_src.h32),       /* ecx */
-             "a" (_exp.l32),       /* eax */
-             "d" (_exp.h32)        /* edx */
-                       : "memory" );           /* no-clobber list */
-#else
-       asm volatile (
-            "xchgl %%ebx, %%edi;\n"
-                       MPLOCKED
-                       "cmpxchg8b (%[dst]);"
-                       "setz %[res];"
-            "xchgl %%ebx, %%edi;\n"
-                       : [res] "=a" (res)      /* result in eax */
-                       : [dst] "S" (dst),      /* esi */
-                         "D" (_src.l32),       /* ebx */
-                         "c" (_src.h32),       /* ecx */
-                         "a" (_exp.l32),       /* eax */
-                         "d" (_exp.h32)        /* edx */
-                       : "memory" );           /* no-clobber list */
-#endif
-
-       return res;
-}
-
-static inline uint64_t
-rte_atomic64_exchange(volatile uint64_t *dest, uint64_t val)
-{
-       uint64_t old;
-
-       do {
-               old = *dest;
-       } while (rte_atomic64_cmpset(dest, old, val) == 0);
-
-       return old;
-}
-
-static inline void
-rte_atomic64_init(rte_atomic64_t *v)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, 0);
-       }
-}
-
-static inline int64_t
-rte_atomic64_read(rte_atomic64_t *v)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               /* replace the value by itself */
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, tmp);
-       }
-       return tmp;
-}
-
-static inline void
-rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, new_value);
-       }
-}
-
-static inline void
-rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, tmp + inc);
-       }
-}
-
-static inline void
-rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, tmp - dec);
-       }
-}
-
-static inline void
-rte_atomic64_inc(rte_atomic64_t *v)
-{
-       rte_atomic64_add(v, 1);
-}
-
-static inline void
-rte_atomic64_dec(rte_atomic64_t *v)
-{
-       rte_atomic64_sub(v, 1);
-}
-
-static inline int64_t
-rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, tmp + inc);
-       }
-
-       return tmp + inc;
-}
-
-static inline int64_t
-rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
-{
-       int success = 0;
-       uint64_t tmp;
-
-       while (success == 0) {
-               tmp = v->cnt;
-               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
-                                             tmp, tmp - dec);
-       }
-
-       return tmp - dec;
-}
-
-static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
-{
-       return rte_atomic64_add_return(v, 1) == 0;
-}
-
-static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
-{
-       return rte_atomic64_sub_return(v, 1) == 0;
-}
-
-static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
-{
-       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
-}
-
-static inline void rte_atomic64_clear(rte_atomic64_t *v)
-{
-       rte_atomic64_set(v, 0);
-}
-#endif
-
-#endif /* _RTE_ATOMIC_I686_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
deleted file mode 100644 (file)
index cfe7067..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-/*
- * Inspired from FreeBSD src/sys/amd64/include/atomic.h
- * Copyright (c) 1998 Doug Rabson
- * Copyright (c) 2019 Intel Corporation
- * All rights reserved.
- */
-
-#ifndef _RTE_ATOMIC_X86_H_
-#error do not include this file directly, use <rte_atomic.h> instead
-#endif
-
-#ifndef _RTE_ATOMIC_X86_64_H_
-#define _RTE_ATOMIC_X86_64_H_
-
-#include <stdint.h>
-#include <rte_common.h>
-#include <rte_compat.h>
-#include <rte_atomic.h>
-
-/*------------------------- 64 bit atomic operations -------------------------*/
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline int
-rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
-{
-       uint8_t res;
-
-
-       asm volatile(
-                       MPLOCKED
-                       "cmpxchgq %[src], %[dst];"
-                       "sete %[res];"
-                       : [res] "=a" (res),     /* output */
-                         [dst] "=m" (*dst)
-                       : [src] "r" (src),      /* input */
-                         "a" (exp),
-                         "m" (*dst)
-                       : "memory");            /* no-clobber list */
-
-       return res;
-}
-
-static inline uint64_t
-rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
-{
-       asm volatile(
-                       MPLOCKED
-                       "xchgq %0, %1;"
-                       : "=r" (val), "=m" (*dst)
-                       : "0" (val),  "m" (*dst)
-                       : "memory");         /* no-clobber list */
-       return val;
-}
-
-static inline void
-rte_atomic64_init(rte_atomic64_t *v)
-{
-       v->cnt = 0;
-}
-
-static inline int64_t
-rte_atomic64_read(rte_atomic64_t *v)
-{
-       return v->cnt;
-}
-
-static inline void
-rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
-{
-       v->cnt = new_value;
-}
-
-static inline void
-rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
-{
-       asm volatile(
-                       MPLOCKED
-                       "addq %[inc], %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : [inc] "ir" (inc),     /* input */
-                         "m" (v->cnt)
-                       );
-}
-
-static inline void
-rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
-{
-       asm volatile(
-                       MPLOCKED
-                       "subq %[dec], %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : [dec] "ir" (dec),     /* input */
-                         "m" (v->cnt)
-                       );
-}
-
-static inline void
-rte_atomic64_inc(rte_atomic64_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "incq %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline void
-rte_atomic64_dec(rte_atomic64_t *v)
-{
-       asm volatile(
-                       MPLOCKED
-                       "decq %[cnt]"
-                       : [cnt] "=m" (v->cnt)   /* output */
-                       : "m" (v->cnt)          /* input */
-                       );
-}
-
-static inline int64_t
-rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
-{
-       int64_t prev = inc;
-
-       asm volatile(
-                       MPLOCKED
-                       "xaddq %[prev], %[cnt]"
-                       : [prev] "+r" (prev),   /* output */
-                         [cnt] "=m" (v->cnt)
-                       : "m" (v->cnt)          /* input */
-                       );
-       return prev + inc;
-}
-
-static inline int64_t
-rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
-{
-       return rte_atomic64_add_return(v, -dec);
-}
-
-static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "incq %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt), /* output */
-                         [ret] "=qm" (ret)
-                       );
-
-       return ret != 0;
-}
-
-static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
-{
-       uint8_t ret;
-
-       asm volatile(
-                       MPLOCKED
-                       "decq %[cnt] ; "
-                       "sete %[ret]"
-                       : [cnt] "+m" (v->cnt),  /* output */
-                         [ret] "=qm" (ret)
-                       );
-       return ret != 0;
-}
-
-static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
-{
-       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
-}
-
-static inline void rte_atomic64_clear(rte_atomic64_t *v)
-{
-       v->cnt = 0;
-}
-#endif
-
-/*------------------------ 128 bit atomic operations -------------------------*/
-
-__rte_experimental
-static inline int
-rte_atomic128_cmp_exchange(rte_int128_t *dst,
-                          rte_int128_t *exp,
-                          const rte_int128_t *src,
-                          unsigned int weak,
-                          int success,
-                          int failure)
-{
-       RTE_SET_USED(weak);
-       RTE_SET_USED(success);
-       RTE_SET_USED(failure);
-       uint8_t res;
-
-       asm volatile (
-                     MPLOCKED
-                     "cmpxchg16b %[dst];"
-                     " sete %[res]"
-                     : [dst] "=m" (dst->val[0]),
-                       "=a" (exp->val[0]),
-                       "=d" (exp->val[1]),
-                       [res] "=r" (res)
-                     : "b" (src->val[0]),
-                       "c" (src->val[1]),
-                       "a" (exp->val[0]),
-                       "d" (exp->val[1]),
-                       "m" (dst->val[0])
-                     : "memory");
-
-       return res;
-}
-
-#endif /* _RTE_ATOMIC_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_byteorder.h b/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
deleted file mode 100644 (file)
index a2dfecc..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_BYTEORDER_X86_H_
-#define _RTE_BYTEORDER_X86_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <rte_common.h>
-#include <rte_config.h>
-#include "generic/rte_byteorder.h"
-
-#ifndef RTE_BYTE_ORDER
-#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
-#endif
-
-/*
- * An architecture-optimized byte swap for a 16-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap16().
- */
-static inline uint16_t rte_arch_bswap16(uint16_t _x)
-{
-       uint16_t x = _x;
-       asm volatile ("xchgb %b[x1],%h[x2]"
-                     : [x1] "=Q" (x)
-                     : [x2] "0" (x)
-                     );
-       return x;
-}
-
-/*
- * An architecture-optimized byte swap for a 32-bit value.
- *
- * Do not use this function directly. The preferred function is rte_bswap32().
- */
-static inline uint32_t rte_arch_bswap32(uint32_t _x)
-{
-       uint32_t x = _x;
-       asm volatile ("bswap %[x]"
-                     : [x] "+r" (x)
-                     );
-       return x;
-}
-
-#ifndef RTE_FORCE_INTRINSICS
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap16(x) :            \
-                                  rte_arch_bswap16(x)))
-
-#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap32(x) :            \
-                                  rte_arch_bswap32(x)))
-
-#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap64(x) :            \
-                                  rte_arch_bswap64(x)))
-#else
-/*
- * __builtin_bswap16 is only available gcc 4.8 and upwards
- */
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
-#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
-                                  rte_constant_bswap16(x) :            \
-                                  rte_arch_bswap16(x)))
-#endif
-#endif
-
-#define rte_cpu_to_le_16(x) (x)
-#define rte_cpu_to_le_32(x) (x)
-#define rte_cpu_to_le_64(x) (x)
-
-#define rte_cpu_to_be_16(x) rte_bswap16(x)
-#define rte_cpu_to_be_32(x) rte_bswap32(x)
-#define rte_cpu_to_be_64(x) rte_bswap64(x)
-
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-#define rte_le_to_cpu_64(x) (x)
-
-#define rte_be_to_cpu_16(x) rte_bswap16(x)
-#define rte_be_to_cpu_32(x) rte_bswap32(x)
-#define rte_be_to_cpu_64(x) rte_bswap64(x)
-
-#ifdef RTE_ARCH_I686
-#include "rte_byteorder_32.h"
-#else
-#include "rte_byteorder_64.h"
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_BYTEORDER_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h b/lib/librte_eal/common/include/arch/x86/rte_byteorder_32.h
deleted file mode 100644 (file)
index d5a768e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_BYTEORDER_X86_H_
-#error do not include this file directly, use <rte_byteorder.h> instead
-#endif
-
-#ifndef _RTE_BYTEORDER_I686_H_
-#define _RTE_BYTEORDER_I686_H_
-
-#include <stdint.h>
-#include <rte_byteorder.h>
-
-/*
- * An architecture-optimized byte swap for a 64-bit value.
- *
-  * Do not use this function directly. The preferred function is rte_bswap64().
- */
-/* Compat./Leg. mode */
-static inline uint64_t rte_arch_bswap64(uint64_t x)
-{
-       uint64_t ret = 0;
-       ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32);
-       ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL));
-       return ret;
-}
-
-#endif /* _RTE_BYTEORDER_I686_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h b/lib/librte_eal/common/include/arch/x86/rte_byteorder_64.h
deleted file mode 100644 (file)
index 8c6cf28..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_BYTEORDER_X86_H_
-#error do not include this file directly, use <rte_byteorder.h> instead
-#endif
-
-#ifndef _RTE_BYTEORDER_X86_64_H_
-#define _RTE_BYTEORDER_X86_64_H_
-
-#include <stdint.h>
-#include <rte_common.h>
-
-/*
- * An architecture-optimized byte swap for a 64-bit value.
- *
-  * Do not use this function directly. The preferred function is rte_bswap64().
- */
-/* 64-bit mode */
-static inline uint64_t rte_arch_bswap64(uint64_t _x)
-{
-       uint64_t x = _x;
-       asm volatile ("bswap %[x]"
-                     : [x] "+r" (x)
-                     );
-       return x;
-}
-
-#endif /* _RTE_BYTEORDER_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_cpuflags.h b/lib/librte_eal/common/include/arch/x86/rte_cpuflags.h
deleted file mode 100644 (file)
index 25ba47b..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_CPUFLAGS_X86_64_H_
-#define _RTE_CPUFLAGS_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-enum rte_cpu_flag_t {
-       /* (EAX 01h) ECX features*/
-       RTE_CPUFLAG_SSE3 = 0,               /**< SSE3 */
-       RTE_CPUFLAG_PCLMULQDQ,              /**< PCLMULQDQ */
-       RTE_CPUFLAG_DTES64,                 /**< DTES64 */
-       RTE_CPUFLAG_MONITOR,                /**< MONITOR */
-       RTE_CPUFLAG_DS_CPL,                 /**< DS_CPL */
-       RTE_CPUFLAG_VMX,                    /**< VMX */
-       RTE_CPUFLAG_SMX,                    /**< SMX */
-       RTE_CPUFLAG_EIST,                   /**< EIST */
-       RTE_CPUFLAG_TM2,                    /**< TM2 */
-       RTE_CPUFLAG_SSSE3,                  /**< SSSE3 */
-       RTE_CPUFLAG_CNXT_ID,                /**< CNXT_ID */
-       RTE_CPUFLAG_FMA,                    /**< FMA */
-       RTE_CPUFLAG_CMPXCHG16B,             /**< CMPXCHG16B */
-       RTE_CPUFLAG_XTPR,                   /**< XTPR */
-       RTE_CPUFLAG_PDCM,                   /**< PDCM */
-       RTE_CPUFLAG_PCID,                   /**< PCID */
-       RTE_CPUFLAG_DCA,                    /**< DCA */
-       RTE_CPUFLAG_SSE4_1,                 /**< SSE4_1 */
-       RTE_CPUFLAG_SSE4_2,                 /**< SSE4_2 */
-       RTE_CPUFLAG_X2APIC,                 /**< X2APIC */
-       RTE_CPUFLAG_MOVBE,                  /**< MOVBE */
-       RTE_CPUFLAG_POPCNT,                 /**< POPCNT */
-       RTE_CPUFLAG_TSC_DEADLINE,           /**< TSC_DEADLINE */
-       RTE_CPUFLAG_AES,                    /**< AES */
-       RTE_CPUFLAG_XSAVE,                  /**< XSAVE */
-       RTE_CPUFLAG_OSXSAVE,                /**< OSXSAVE */
-       RTE_CPUFLAG_AVX,                    /**< AVX */
-       RTE_CPUFLAG_F16C,                   /**< F16C */
-       RTE_CPUFLAG_RDRAND,                 /**< RDRAND */
-       RTE_CPUFLAG_HYPERVISOR,             /**< Running in a VM */
-
-       /* (EAX 01h) EDX features */
-       RTE_CPUFLAG_FPU,                    /**< FPU */
-       RTE_CPUFLAG_VME,                    /**< VME */
-       RTE_CPUFLAG_DE,                     /**< DE */
-       RTE_CPUFLAG_PSE,                    /**< PSE */
-       RTE_CPUFLAG_TSC,                    /**< TSC */
-       RTE_CPUFLAG_MSR,                    /**< MSR */
-       RTE_CPUFLAG_PAE,                    /**< PAE */
-       RTE_CPUFLAG_MCE,                    /**< MCE */
-       RTE_CPUFLAG_CX8,                    /**< CX8 */
-       RTE_CPUFLAG_APIC,                   /**< APIC */
-       RTE_CPUFLAG_SEP,                    /**< SEP */
-       RTE_CPUFLAG_MTRR,                   /**< MTRR */
-       RTE_CPUFLAG_PGE,                    /**< PGE */
-       RTE_CPUFLAG_MCA,                    /**< MCA */
-       RTE_CPUFLAG_CMOV,                   /**< CMOV */
-       RTE_CPUFLAG_PAT,                    /**< PAT */
-       RTE_CPUFLAG_PSE36,                  /**< PSE36 */
-       RTE_CPUFLAG_PSN,                    /**< PSN */
-       RTE_CPUFLAG_CLFSH,                  /**< CLFSH */
-       RTE_CPUFLAG_DS,                     /**< DS */
-       RTE_CPUFLAG_ACPI,                   /**< ACPI */
-       RTE_CPUFLAG_MMX,                    /**< MMX */
-       RTE_CPUFLAG_FXSR,                   /**< FXSR */
-       RTE_CPUFLAG_SSE,                    /**< SSE */
-       RTE_CPUFLAG_SSE2,                   /**< SSE2 */
-       RTE_CPUFLAG_SS,                     /**< SS */
-       RTE_CPUFLAG_HTT,                    /**< HTT */
-       RTE_CPUFLAG_TM,                     /**< TM */
-       RTE_CPUFLAG_PBE,                    /**< PBE */
-
-       /* (EAX 06h) EAX features */
-       RTE_CPUFLAG_DIGTEMP,                /**< DIGTEMP */
-       RTE_CPUFLAG_TRBOBST,                /**< TRBOBST */
-       RTE_CPUFLAG_ARAT,                   /**< ARAT */
-       RTE_CPUFLAG_PLN,                    /**< PLN */
-       RTE_CPUFLAG_ECMD,                   /**< ECMD */
-       RTE_CPUFLAG_PTM,                    /**< PTM */
-
-       /* (EAX 06h) ECX features */
-       RTE_CPUFLAG_MPERF_APERF_MSR,        /**< MPERF_APERF_MSR */
-       RTE_CPUFLAG_ACNT2,                  /**< ACNT2 */
-       RTE_CPUFLAG_ENERGY_EFF,             /**< ENERGY_EFF */
-
-       /* (EAX 07h, ECX 0h) EBX features */
-       RTE_CPUFLAG_FSGSBASE,               /**< FSGSBASE */
-       RTE_CPUFLAG_BMI1,                   /**< BMI1 */
-       RTE_CPUFLAG_HLE,                    /**< Hardware Lock elision */
-       RTE_CPUFLAG_AVX2,                   /**< AVX2 */
-       RTE_CPUFLAG_SMEP,                   /**< SMEP */
-       RTE_CPUFLAG_BMI2,                   /**< BMI2 */
-       RTE_CPUFLAG_ERMS,                   /**< ERMS */
-       RTE_CPUFLAG_INVPCID,                /**< INVPCID */
-       RTE_CPUFLAG_RTM,                    /**< Transactional memory */
-       RTE_CPUFLAG_AVX512F,                /**< AVX512F */
-       RTE_CPUFLAG_RDSEED,                 /**< RDSEED instruction */
-
-       /* (EAX 80000001h) ECX features */
-       RTE_CPUFLAG_LAHF_SAHF,              /**< LAHF_SAHF */
-       RTE_CPUFLAG_LZCNT,                  /**< LZCNT */
-
-       /* (EAX 80000001h) EDX features */
-       RTE_CPUFLAG_SYSCALL,                /**< SYSCALL */
-       RTE_CPUFLAG_XD,                     /**< XD */
-       RTE_CPUFLAG_1GB_PG,                 /**< 1GB_PG */
-       RTE_CPUFLAG_RDTSCP,                 /**< RDTSCP */
-       RTE_CPUFLAG_EM64T,                  /**< EM64T */
-
-       /* (EAX 80000007h) EDX features */
-       RTE_CPUFLAG_INVTSC,                 /**< INVTSC */
-
-       /* The last item */
-       RTE_CPUFLAG_NUMFLAGS,               /**< This should always be the last! */
-};
-
-#include "generic/rte_cpuflags.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CPUFLAGS_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_cycles.h b/lib/librte_eal/common/include/arch/x86/rte_cycles.h
deleted file mode 100644 (file)
index a461a4d..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation.
- * Copyright(c) 2013 6WIND S.A.
- */
-
-#ifndef _RTE_CYCLES_X86_64_H_
-#define _RTE_CYCLES_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_cycles.h"
-
-#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
-/* Global switch to use VMWARE mapping of TSC instead of RDTSC */
-extern int rte_cycles_vmware_tsc_map;
-#include <rte_branch_prediction.h>
-#endif
-#include <rte_common.h>
-#include <rte_config.h>
-
-static inline uint64_t
-rte_rdtsc(void)
-{
-       union {
-               uint64_t tsc_64;
-               RTE_STD_C11
-               struct {
-                       uint32_t lo_32;
-                       uint32_t hi_32;
-               };
-       } tsc;
-
-#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
-       if (unlikely(rte_cycles_vmware_tsc_map)) {
-               /* ecx = 0x10000 corresponds to the physical TSC for VMware */
-               asm volatile("rdpmc" :
-                            "=a" (tsc.lo_32),
-                            "=d" (tsc.hi_32) :
-                            "c"(0x10000));
-               return tsc.tsc_64;
-       }
-#endif
-
-       asm volatile("rdtsc" :
-                    "=a" (tsc.lo_32),
-                    "=d" (tsc.hi_32));
-       return tsc.tsc_64;
-}
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
-       rte_mb();
-       return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_CYCLES_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_io.h b/lib/librte_eal/common/include/arch/x86/rte_io.h
deleted file mode 100644 (file)
index 2db71b1..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Cavium, Inc
- */
-
-#ifndef _RTE_IO_X86_H_
-#define _RTE_IO_X86_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_io.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_IO_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_mcslock.h b/lib/librte_eal/common/include/arch/x86/rte_mcslock.h
deleted file mode 100644 (file)
index a8f041a..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_MCSLOCK_X86_64_H_
-#define _RTE_MCSLOCK_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_mcslock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MCSLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
deleted file mode 100644 (file)
index ba44c4a..0000000
+++ /dev/null
@@ -1,876 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_MEMCPY_X86_64_H_
-#define _RTE_MEMCPY_X86_64_H_
-
-/**
- * @file
- *
- * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-#include <rte_vect.h>
-#include <rte_common.h>
-#include <rte_config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Copy bytes from one location to another. The locations must not overlap.
- *
- * @note This is implemented as a macro, so it's address should not be taken
- * and care is needed as parameter expressions may be evaluated multiple times.
- *
- * @param dst
- *   Pointer to the destination of the data.
- * @param src
- *   Pointer to the source data.
- * @param n
- *   Number of bytes to copy.
- * @return
- *   Pointer to the destination data.
- */
-static __rte_always_inline void *
-rte_memcpy(void *dst, const void *src, size_t n);
-
-#ifdef RTE_MACHINE_CPUFLAG_AVX512F
-
-#define ALIGNMENT_MASK 0x3F
-
-/**
- * AVX512 implementation below
- */
-
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       __m128i xmm0;
-
-       xmm0 = _mm_loadu_si128((const __m128i *)src);
-       _mm_storeu_si128((__m128i *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       __m256i ymm0;
-
-       ymm0 = _mm256_loadu_si256((const __m256i *)src);
-       _mm256_storeu_si256((__m256i *)dst, ymm0);
-}
-
-/**
- * Copy 64 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       __m512i zmm0;
-
-       zmm0 = _mm512_loadu_si512((const void *)src);
-       _mm512_storeu_si512((void *)dst, zmm0);
-}
-
-/**
- * Copy 128 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov64(dst + 0 * 64, src + 0 * 64);
-       rte_mov64(dst + 1 * 64, src + 1 * 64);
-}
-
-/**
- * Copy 256 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov64(dst + 0 * 64, src + 0 * 64);
-       rte_mov64(dst + 1 * 64, src + 1 * 64);
-       rte_mov64(dst + 2 * 64, src + 2 * 64);
-       rte_mov64(dst + 3 * 64, src + 3 * 64);
-}
-
-/**
- * Copy 128-byte blocks from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       __m512i zmm0, zmm1;
-
-       while (n >= 128) {
-               zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
-               n -= 128;
-               zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
-               src = src + 128;
-               _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
-               _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
-               dst = dst + 128;
-       }
-}
-
-/**
- * Copy 512-byte blocks from one location to another,
- * locations should not overlap.
- */
-static inline void
-rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
-
-       while (n >= 512) {
-               zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
-               n -= 512;
-               zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
-               zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
-               zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
-               zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
-               zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
-               zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
-               zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
-               src = src + 512;
-               _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
-               _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
-               _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
-               _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
-               _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
-               _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
-               _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
-               _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
-               dst = dst + 512;
-       }
-}
-
-static __rte_always_inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
-{
-       uintptr_t dstu = (uintptr_t)dst;
-       uintptr_t srcu = (uintptr_t)src;
-       void *ret = dst;
-       size_t dstofss;
-       size_t bits;
-
-       /**
-        * Copy less than 16 bytes
-        */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
-                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
-                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
-                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
-               }
-               if (n & 0x08)
-                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
-               return ret;
-       }
-
-       /**
-        * Fast way when copy size doesn't exceed 512 bytes
-        */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                                 (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov32((uint8_t *)dst - 32 + n,
-                                 (const uint8_t *)src - 32 + n);
-               return ret;
-       }
-       if (n <= 512) {
-               if (n >= 256) {
-                       n -= 256;
-                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 256;
-                       dst = (uint8_t *)dst + 256;
-               }
-               if (n >= 128) {
-                       n -= 128;
-                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 128;
-                       dst = (uint8_t *)dst + 128;
-               }
-COPY_BLOCK_128_BACK63:
-               if (n > 64) {
-                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-                       rte_mov64((uint8_t *)dst - 64 + n,
-                                         (const uint8_t *)src - 64 + n);
-                       return ret;
-               }
-               if (n > 0)
-                       rte_mov64((uint8_t *)dst - 64 + n,
-                                         (const uint8_t *)src - 64 + n);
-               return ret;
-       }
-
-       /**
-        * Make store aligned when copy size exceeds 512 bytes
-        */
-       dstofss = ((uintptr_t)dst & 0x3F);
-       if (dstofss > 0) {
-               dstofss = 64 - dstofss;
-               n -= dstofss;
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               src = (const uint8_t *)src + dstofss;
-               dst = (uint8_t *)dst + dstofss;
-       }
-
-       /**
-        * Copy 512-byte blocks.
-        * Use copy block function for better instruction order control,
-        * which is important when load is unaligned.
-        */
-       rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
-       bits = n;
-       n = n & 511;
-       bits -= n;
-       src = (const uint8_t *)src + bits;
-       dst = (uint8_t *)dst + bits;
-
-       /**
-        * Copy 128-byte blocks.
-        * Use copy block function for better instruction order control,
-        * which is important when load is unaligned.
-        */
-       if (n >= 128) {
-               rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
-               bits = n;
-               n = n & 127;
-               bits -= n;
-               src = (const uint8_t *)src + bits;
-               dst = (uint8_t *)dst + bits;
-       }
-
-       /**
-        * Copy whatever left
-        */
-       goto COPY_BLOCK_128_BACK63;
-}
-
-#elif defined RTE_MACHINE_CPUFLAG_AVX2
-
-#define ALIGNMENT_MASK 0x1F
-
-/**
- * AVX2 implementation below
- */
-
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       __m128i xmm0;
-
-       xmm0 = _mm_loadu_si128((const __m128i *)src);
-       _mm_storeu_si128((__m128i *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       __m256i ymm0;
-
-       ymm0 = _mm256_loadu_si256((const __m256i *)src);
-       _mm256_storeu_si256((__m256i *)dst, ymm0);
-}
-
-/**
- * Copy 64 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-}
-
-/**
- * Copy 128 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-       rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
-       rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
-}
-
-/**
- * Copy 128-byte blocks from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
-{
-       __m256i ymm0, ymm1, ymm2, ymm3;
-
-       while (n >= 128) {
-               ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
-               n -= 128;
-               ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
-               ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
-               ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
-               src = (const uint8_t *)src + 128;
-               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
-               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
-               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
-               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
-               dst = (uint8_t *)dst + 128;
-       }
-}
-
-static __rte_always_inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
-{
-       uintptr_t dstu = (uintptr_t)dst;
-       uintptr_t srcu = (uintptr_t)src;
-       void *ret = dst;
-       size_t dstofss;
-       size_t bits;
-
-       /**
-        * Copy less than 16 bytes
-        */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
-                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
-                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
-                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
-               }
-               if (n & 0x08) {
-                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
-               }
-               return ret;
-       }
-
-       /**
-        * Fast way when copy size doesn't exceed 256 bytes
-        */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                               (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 48) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                               (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov32((uint8_t *)dst - 32 + n,
-                               (const uint8_t *)src - 32 + n);
-               return ret;
-       }
-       if (n <= 256) {
-               if (n >= 128) {
-                       n -= 128;
-                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 128;
-                       dst = (uint8_t *)dst + 128;
-               }
-COPY_BLOCK_128_BACK31:
-               if (n >= 64) {
-                       n -= 64;
-                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 64;
-                       dst = (uint8_t *)dst + 64;
-               }
-               if (n > 32) {
-                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-                       rte_mov32((uint8_t *)dst - 32 + n,
-                                       (const uint8_t *)src - 32 + n);
-                       return ret;
-               }
-               if (n > 0) {
-                       rte_mov32((uint8_t *)dst - 32 + n,
-                                       (const uint8_t *)src - 32 + n);
-               }
-               return ret;
-       }
-
-       /**
-        * Make store aligned when copy size exceeds 256 bytes
-        */
-       dstofss = (uintptr_t)dst & 0x1F;
-       if (dstofss > 0) {
-               dstofss = 32 - dstofss;
-               n -= dstofss;
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               src = (const uint8_t *)src + dstofss;
-               dst = (uint8_t *)dst + dstofss;
-       }
-
-       /**
-        * Copy 128-byte blocks
-        */
-       rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
-       bits = n;
-       n = n & 127;
-       bits -= n;
-       src = (const uint8_t *)src + bits;
-       dst = (uint8_t *)dst + bits;
-
-       /**
-        * Copy whatever left
-        */
-       goto COPY_BLOCK_128_BACK31;
-}
-
-#else /* RTE_MACHINE_CPUFLAG */
-
-#define ALIGNMENT_MASK 0x0F
-
-/**
- * SSE & AVX implementation below
- */
-
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-       __m128i xmm0;
-
-       xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
-       _mm_storeu_si128((__m128i *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-}
-
-/**
- * Copy 64 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-}
-
-/**
- * Copy 128 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
-}
-
-/**
- * Copy 256 bytes from one location to another,
- * locations should not overlap.
- */
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
-       rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
-       rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
-       rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
-       rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
-       rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
-       rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
-       rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
-       rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
-}
-
-/**
- * Macro for copying unaligned block from one location to another with constant load offset,
- * 47 bytes leftover maximum,
- * locations should not overlap.
- * Requirements:
- * - Store is aligned
- * - Load offset is <offset>, which must be immediate value within [1, 15]
- * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
- * - <dst>, <src>, <len> must be variables
- * - __m128i <xmm0> ~ <xmm8> must be pre-defined
- */
-#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset)                                                     \
-__extension__ ({                                                                                            \
-    size_t tmp;                                                                                                \
-    while (len >= 128 + 16 - offset) {                                                                      \
-        xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));                  \
-        len -= 128;                                                                                         \
-        xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));                  \
-        xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));                  \
-        xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16));                  \
-        xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16));                  \
-        xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16));                  \
-        xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16));                  \
-        xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16));                  \
-        xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16));                  \
-        src = (const uint8_t *)src + 128;                                                                   \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset));        \
-        dst = (uint8_t *)dst + 128;                                                                         \
-    }                                                                                                       \
-    tmp = len;                                                                                              \
-    len = ((len - 16 + offset) & 127) + 16 - offset;                                                        \
-    tmp -= len;                                                                                             \
-    src = (const uint8_t *)src + tmp;                                                                       \
-    dst = (uint8_t *)dst + tmp;                                                                             \
-    if (len >= 32 + 16 - offset) {                                                                          \
-        while (len >= 32 + 16 - offset) {                                                                   \
-            xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));              \
-            len -= 32;                                                                                      \
-            xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));              \
-            xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));              \
-            src = (const uint8_t *)src + 32;                                                                \
-            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));    \
-            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));    \
-            dst = (uint8_t *)dst + 32;                                                                      \
-        }                                                                                                   \
-        tmp = len;                                                                                          \
-        len = ((len - 16 + offset) & 31) + 16 - offset;                                                     \
-        tmp -= len;                                                                                         \
-        src = (const uint8_t *)src + tmp;                                                                   \
-        dst = (uint8_t *)dst + tmp;                                                                         \
-    }                                                                                                       \
-})
-
-/**
- * Macro for copying unaligned block from one location to another,
- * 47 bytes leftover maximum,
- * locations should not overlap.
- * Use switch here because the aligning instruction requires immediate value for shift count.
- * Requirements:
- * - Store is aligned
- * - Load offset is <offset>, which must be within [1, 15]
- * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
- * - <dst>, <src>, <len> must be variables
- * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
- */
-#define MOVEUNALIGNED_LEFT47(dst, src, len, offset)                   \
-__extension__ ({                                                      \
-    switch (offset) {                                                 \
-    case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break;    \
-    case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break;    \
-    case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break;    \
-    case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break;    \
-    case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break;    \
-    case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break;    \
-    case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break;    \
-    case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break;    \
-    case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break;    \
-    case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break;    \
-    case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break;    \
-    case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break;    \
-    case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break;    \
-    case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break;    \
-    case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break;    \
-    default:;                                                         \
-    }                                                                 \
-})
-
-static __rte_always_inline void *
-rte_memcpy_generic(void *dst, const void *src, size_t n)
-{
-       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
-       uintptr_t dstu = (uintptr_t)dst;
-       uintptr_t srcu = (uintptr_t)src;
-       void *ret = dst;
-       size_t dstofss;
-       size_t srcofs;
-
-       /**
-        * Copy less than 16 bytes
-        */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
-                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
-                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
-                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
-                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
-               }
-               if (n & 0x08) {
-                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
-               }
-               return ret;
-       }
-
-       /**
-        * Fast way when copy size doesn't exceed 512 bytes
-        */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 48) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
-               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-               return ret;
-       }
-       if (n <= 128) {
-               goto COPY_BLOCK_128_BACK15;
-       }
-       if (n <= 512) {
-               if (n >= 256) {
-                       n -= 256;
-                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-                       rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
-                       src = (const uint8_t *)src + 256;
-                       dst = (uint8_t *)dst + 256;
-               }
-COPY_BLOCK_255_BACK15:
-               if (n >= 128) {
-                       n -= 128;
-                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 128;
-                       dst = (uint8_t *)dst + 128;
-               }
-COPY_BLOCK_128_BACK15:
-               if (n >= 64) {
-                       n -= 64;
-                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 64;
-                       dst = (uint8_t *)dst + 64;
-               }
-COPY_BLOCK_64_BACK15:
-               if (n >= 32) {
-                       n -= 32;
-                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-                       src = (const uint8_t *)src + 32;
-                       dst = (uint8_t *)dst + 32;
-               }
-               if (n > 16) {
-                       rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-                       return ret;
-               }
-               if (n > 0) {
-                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-               }
-               return ret;
-       }
-
-       /**
-        * Make store aligned when copy size exceeds 512 bytes,
-        * and make sure the first 15 bytes are copied, because
-        * unaligned copy functions require up to 15 bytes
-        * backwards access.
-        */
-       dstofss = (uintptr_t)dst & 0x0F;
-       if (dstofss > 0) {
-               dstofss = 16 - dstofss + 16;
-               n -= dstofss;
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               src = (const uint8_t *)src + dstofss;
-               dst = (uint8_t *)dst + dstofss;
-       }
-       srcofs = ((uintptr_t)src & 0x0F);
-
-       /**
-        * For aligned copy
-        */
-       if (srcofs == 0) {
-               /**
-                * Copy 256-byte blocks
-                */
-               for (; n >= 256; n -= 256) {
-                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-                       dst = (uint8_t *)dst + 256;
-                       src = (const uint8_t *)src + 256;
-               }
-
-               /**
-                * Copy whatever left
-                */
-               goto COPY_BLOCK_255_BACK15;
-       }
-
-       /**
-        * For copy with unaligned load
-        */
-       MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
-
-       /**
-        * Copy whatever left
-        */
-       goto COPY_BLOCK_64_BACK15;
-}
-
-#endif /* RTE_MACHINE_CPUFLAG */
-
-static __rte_always_inline void *
-rte_memcpy_aligned(void *dst, const void *src, size_t n)
-{
-       void *ret = dst;
-
-       /* Copy size <= 16 bytes */
-       if (n < 16) {
-               if (n & 0x01) {
-                       *(uint8_t *)dst = *(const uint8_t *)src;
-                       src = (const uint8_t *)src + 1;
-                       dst = (uint8_t *)dst + 1;
-               }
-               if (n & 0x02) {
-                       *(uint16_t *)dst = *(const uint16_t *)src;
-                       src = (const uint16_t *)src + 1;
-                       dst = (uint16_t *)dst + 1;
-               }
-               if (n & 0x04) {
-                       *(uint32_t *)dst = *(const uint32_t *)src;
-                       src = (const uint32_t *)src + 1;
-                       dst = (uint32_t *)dst + 1;
-               }
-               if (n & 0x08)
-                       *(uint64_t *)dst = *(const uint64_t *)src;
-
-               return ret;
-       }
-
-       /* Copy 16 <= size <= 32 bytes */
-       if (n <= 32) {
-               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov16((uint8_t *)dst - 16 + n,
-                               (const uint8_t *)src - 16 + n);
-
-               return ret;
-       }
-
-       /* Copy 32 < size <= 64 bytes */
-       if (n <= 64) {
-               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-               rte_mov32((uint8_t *)dst - 32 + n,
-                               (const uint8_t *)src - 32 + n);
-
-               return ret;
-       }
-
-       /* Copy 64 bytes blocks */
-       for (; n >= 64; n -= 64) {
-               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-               dst = (uint8_t *)dst + 64;
-               src = (const uint8_t *)src + 64;
-       }
-
-       /* Copy whatever left */
-       rte_mov64((uint8_t *)dst - 64 + n,
-                       (const uint8_t *)src - 64 + n);
-
-       return ret;
-}
-
-static __rte_always_inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
-{
-       if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
-               return rte_memcpy_aligned(dst, src, n);
-       else
-               return rte_memcpy_generic(dst, src, n);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_pause.h b/lib/librte_eal/common/include/arch/x86/rte_pause.h
deleted file mode 100644 (file)
index b4cf1df..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef _RTE_PAUSE_X86_H_
-#define _RTE_PAUSE_X86_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_pause.h"
-
-#include <emmintrin.h>
-static inline void rte_pause(void)
-{
-       _mm_pause();
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PAUSE_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_prefetch.h b/lib/librte_eal/common/include/arch/x86/rte_prefetch.h
deleted file mode 100644 (file)
index 384c6b3..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef _RTE_PREFETCH_X86_64_H_
-#define _RTE_PREFETCH_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_common.h>
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(const volatile void *p)
-{
-       asm volatile ("prefetcht0 %[p]" : : [p] "m" (*(const volatile char *)p));
-}
-
-static inline void rte_prefetch1(const volatile void *p)
-{
-       asm volatile ("prefetcht1 %[p]" : : [p] "m" (*(const volatile char *)p));
-}
-
-static inline void rte_prefetch2(const volatile void *p)
-{
-       asm volatile ("prefetcht2 %[p]" : : [p] "m" (*(const volatile char *)p));
-}
-
-static inline void rte_prefetch_non_temporal(const volatile void *p)
-{
-       asm volatile ("prefetchnta %[p]" : : [p] "m" (*(const volatile char *)p));
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
deleted file mode 100644 (file)
index eb0f8e8..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2012,2013 Intel Corporation
- */
-
-#ifndef _RTE_RTM_H_
-#define _RTE_RTM_H_ 1
-
-
-/* Official RTM intrinsics interface matching gcc/icc, but works
-   on older gcc compatible compilers and binutils. */
-
-#include <rte_common.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#define RTE_XBEGIN_STARTED             (~0u)
-#define RTE_XABORT_EXPLICIT            (1 << 0)
-#define RTE_XABORT_RETRY               (1 << 1)
-#define RTE_XABORT_CONFLICT            (1 << 2)
-#define RTE_XABORT_CAPACITY            (1 << 3)
-#define RTE_XABORT_DEBUG               (1 << 4)
-#define RTE_XABORT_NESTED              (1 << 5)
-#define RTE_XABORT_CODE(x)             (((x) >> 24) & 0xff)
-
-static __attribute__((__always_inline__)) inline
-unsigned int rte_xbegin(void)
-{
-       unsigned int ret = RTE_XBEGIN_STARTED;
-
-       asm volatile(".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory");
-       return ret;
-}
-
-static __attribute__((__always_inline__)) inline
-void rte_xend(void)
-{
-        asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
-}
-
-/* not an inline function to workaround a clang bug with -O0 */
-#define rte_xabort(status) do { \
-       asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
-} while (0)
-
-static __attribute__((__always_inline__)) inline
-int rte_xtest(void)
-{
-       unsigned char out;
-
-       asm volatile(".byte 0x0f,0x01,0xd6 ; setnz %0" :
-               "=r" (out) :: "memory");
-       return out;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_RTM_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rwlock.h b/lib/librte_eal/common/include/arch/x86/rte_rwlock.h
deleted file mode 100644 (file)
index eec4c71..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015 Intel Corporation
- */
-
-#ifndef _RTE_RWLOCK_X86_64_H_
-#define _RTE_RWLOCK_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_rwlock.h"
-#include "rte_spinlock.h"
-
-static inline void
-rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
-{
-       if (likely(rte_try_tm(&rwl->cnt)))
-               return;
-       rte_rwlock_read_lock(rwl);
-}
-
-static inline void
-rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
-{
-       if (unlikely(rwl->cnt))
-               rte_rwlock_read_unlock(rwl);
-       else
-               rte_xend();
-}
-
-static inline void
-rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
-{
-       if (likely(rte_try_tm(&rwl->cnt)))
-               return;
-       rte_rwlock_write_lock(rwl);
-}
-
-static inline void
-rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
-{
-       if (unlikely(rwl->cnt))
-               rte_rwlock_write_unlock(rwl);
-       else
-               rte_xend();
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_RWLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
deleted file mode 100644 (file)
index e2e2b26..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_SPINLOCK_X86_64_H_
-#define _RTE_SPINLOCK_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_spinlock.h"
-#include "rte_rtm.h"
-#include "rte_cpuflags.h"
-#include "rte_branch_prediction.h"
-#include "rte_common.h"
-#include "rte_pause.h"
-#include "rte_cycles.h"
-
-#define RTE_RTM_MAX_RETRIES (20)
-#define RTE_XABORT_LOCK_BUSY (0xff)
-
-#ifndef RTE_FORCE_INTRINSICS
-static inline void
-rte_spinlock_lock(rte_spinlock_t *sl)
-{
-       int lock_val = 1;
-       asm volatile (
-                       "1:\n"
-                       "xchg %[locked], %[lv]\n"
-                       "test %[lv], %[lv]\n"
-                       "jz 3f\n"
-                       "2:\n"
-                       "pause\n"
-                       "cmpl $0, %[locked]\n"
-                       "jnz 2b\n"
-                       "jmp 1b\n"
-                       "3:\n"
-                       : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
-                       : "[lv]" (lock_val)
-                       : "memory");
-}
-
-static inline void
-rte_spinlock_unlock (rte_spinlock_t *sl)
-{
-       int unlock_val = 0;
-       asm volatile (
-                       "xchg %[locked], %[ulv]\n"
-                       : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
-                       : "[ulv]" (unlock_val)
-                       : "memory");
-}
-
-static inline int
-rte_spinlock_trylock (rte_spinlock_t *sl)
-{
-       int lockval = 1;
-
-       asm volatile (
-                       "xchg %[locked], %[lockval]"
-                       : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
-                       : "[lockval]" (lockval)
-                       : "memory");
-
-       return lockval == 0;
-}
-#endif
-
-extern uint8_t rte_rtm_supported;
-
-static inline int rte_tm_supported(void)
-{
-       return rte_rtm_supported;
-}
-
-static inline int
-rte_try_tm(volatile int *lock)
-{
-       int i, retries;
-
-       if (!rte_rtm_supported)
-               return 0;
-
-       retries = RTE_RTM_MAX_RETRIES;
-
-       while (likely(retries--)) {
-
-               unsigned int status = rte_xbegin();
-
-               if (likely(RTE_XBEGIN_STARTED == status)) {
-                       if (unlikely(*lock))
-                               rte_xabort(RTE_XABORT_LOCK_BUSY);
-                       else
-                               return 1;
-               }
-               while (*lock)
-                       rte_pause();
-
-               if ((status & RTE_XABORT_CONFLICT) ||
-                  ((status & RTE_XABORT_EXPLICIT) &&
-                   (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
-                       /* add a small delay before retrying, basing the
-                        * delay on the number of times we've already tried,
-                        * to give a back-off type of behaviour. We
-                        * randomize trycount by taking bits from the tsc count
-                        */
-                       int try_count = RTE_RTM_MAX_RETRIES - retries;
-                       int pause_count = (rte_rdtsc() & 0x7) | 1;
-                       pause_count <<= try_count;
-                       for (i = 0; i < pause_count; i++)
-                               rte_pause();
-                       continue;
-               }
-
-               if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
-                       break;
-       }
-       return 0;
-}
-
-static inline void
-rte_spinlock_lock_tm(rte_spinlock_t *sl)
-{
-       if (likely(rte_try_tm(&sl->locked)))
-               return;
-
-       rte_spinlock_lock(sl); /* fall-back */
-}
-
-static inline int
-rte_spinlock_trylock_tm(rte_spinlock_t *sl)
-{
-       if (likely(rte_try_tm(&sl->locked)))
-               return 1;
-
-       return rte_spinlock_trylock(sl);
-}
-
-static inline void
-rte_spinlock_unlock_tm(rte_spinlock_t *sl)
-{
-       if (unlikely(sl->locked))
-               rte_spinlock_unlock(sl);
-       else
-               rte_xend();
-}
-
-static inline void
-rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
-{
-       if (likely(rte_try_tm(&slr->sl.locked)))
-               return;
-
-       rte_spinlock_recursive_lock(slr); /* fall-back */
-}
-
-static inline void
-rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
-{
-       if (unlikely(slr->sl.locked))
-               rte_spinlock_recursive_unlock(slr);
-       else
-               rte_xend();
-}
-
-static inline int
-rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
-{
-       if (likely(rte_try_tm(&slr->sl.locked)))
-               return 1;
-
-       return rte_spinlock_recursive_trylock(slr);
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_SPINLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_ticketlock.h b/lib/librte_eal/common/include/arch/x86/rte_ticketlock.h
deleted file mode 100644 (file)
index 0cc01f6..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Arm Limited
- */
-
-#ifndef _RTE_TICKETLOCK_X86_64_H_
-#define _RTE_TICKETLOCK_X86_64_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_ticketlock.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_TICKETLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_vect.h b/lib/librte_eal/common/include/arch/x86/rte_vect.h
deleted file mode 100644 (file)
index df5a607..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef _RTE_VECT_X86_H_
-#define _RTE_VECT_X86_H_
-
-/**
- * @file
- *
- * RTE SSE/AVX related header.
- */
-
-#include <stdint.h>
-#include <rte_config.h>
-#include "generic/rte_vect.h"
-
-#if (defined(__ICC) || \
-       (defined(_WIN64)) || \
-       (__GNUC__ == 4 &&  __GNUC_MINOR__ < 4))
-
-#include <smmintrin.h> /* SSE4 */
-
-#if defined(__AVX__)
-#include <immintrin.h>
-#endif
-
-#else
-
-#include <x86intrin.h>
-
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef __m128i xmm_t;
-
-#define        XMM_SIZE        (sizeof(xmm_t))
-#define        XMM_MASK        (XMM_SIZE - 1)
-
-typedef union rte_xmm {
-       xmm_t    x;
-       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
-       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
-       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
-       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
-       double   pd[XMM_SIZE / sizeof(double)];
-} rte_xmm_t;
-
-#ifdef __AVX__
-
-typedef __m256i ymm_t;
-
-#define        YMM_SIZE        (sizeof(ymm_t))
-#define        YMM_MASK        (YMM_SIZE - 1)
-
-typedef union rte_ymm {
-       ymm_t    y;
-       xmm_t    x[YMM_SIZE / sizeof(xmm_t)];
-       uint8_t  u8[YMM_SIZE / sizeof(uint8_t)];
-       uint16_t u16[YMM_SIZE / sizeof(uint16_t)];
-       uint32_t u32[YMM_SIZE / sizeof(uint32_t)];
-       uint64_t u64[YMM_SIZE / sizeof(uint64_t)];
-       double   pd[YMM_SIZE / sizeof(double)];
-} rte_ymm_t;
-
-#endif /* __AVX__ */
-
-#ifdef RTE_ARCH_I686
-#define _mm_cvtsi128_si64(a)    \
-__extension__ ({                \
-       rte_xmm_t m;            \
-       m.x = (a);              \
-       (m.u64[0]);             \
-})
-#endif
-
-/*
- * Prior to version 12.1 icc doesn't support _mm_set_epi64x.
- */
-#if (defined(__ICC) && __ICC < 1210)
-#define _mm_set_epi64x(a, b)     \
-__extension__ ({                 \
-       rte_xmm_t m;             \
-       m.u64[0] = b;            \
-       m.u64[1] = a;            \
-       (m.x);                   \
-})
-#endif /* (defined(__ICC) && __ICC < 1210) */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_VECT_X86_H_ */
index e666618..94dfc5f 100644 (file)
@@ -1,8 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
-eal_inc += include_directories('.', 'include',
-               join_paths('include/arch', arch_subdir))
+eal_inc += include_directories('.', 'include')
 
 common_objs = []
 common_sources = files(
@@ -98,6 +97,3 @@ generic_headers = files(
        'include/generic/rte_ticketlock.h',
        'include/generic/rte_vect.h')
 install_headers(generic_headers, subdir: 'generic')
-
-# get and install the architecture specific headers
-subdir(join_paths('include/arch', arch_subdir))
index 16a4f98..0c3d465 100644 (file)
@@ -28,4 +28,4 @@ endif
 sources += common_sources + env_sources
 objs = common_objs + env_objs
 headers = common_headers + env_headers
-includes = eal_inc
+includes += eal_inc
diff --git a/lib/librte_eal/ppc/include/meson.build b/lib/librte_eal/ppc/include/meson.build
new file mode 100644 (file)
index 0000000..3a91c98
--- /dev/null
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+includes += include_directories('.')
+
+arch_headers = files(
+       'rte_atomic.h',
+       'rte_byteorder.h',
+       'rte_cpuflags.h',
+       'rte_cycles.h',
+       'rte_io.h',
+       'rte_memcpy.h',
+       'rte_pause.h',
+       'rte_prefetch.h',
+       'rte_rwlock.h',
+       'rte_spinlock.h',
+       'rte_vect.h',
+)
+install_headers(arch_headers, subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/ppc/include/rte_atomic.h b/lib/librte_eal/ppc/include/rte_atomic.h
new file mode 100644 (file)
index 0000000..7e3e131
--- /dev/null
@@ -0,0 +1,413 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
+ * Copyright (c) 2008 Marcel Moolenaar
+ * Copyright (c) 2001 Benno Rice
+ * Copyright (c) 2001 David E. O'Brien
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_PPC_64_H_
+#define _RTE_ATOMIC_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include "generic/rte_atomic.h"
+
+#define        rte_mb()  asm volatile("sync" : : : "memory")
+
+#define        rte_wmb() asm volatile("sync" : : : "memory")
+
+#define        rte_rmb() asm volatile("sync" : : : "memory")
+
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_wmb()
+
+#define rte_smp_rmb() rte_rmb()
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
+
+#define rte_cio_wmb() rte_wmb()
+
+#define rte_cio_rmb() rte_rmb()
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+/* To be compatible with Power7, use GCC built-in functions for 16 bit
+ * operations */
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+       return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
+               __ATOMIC_ACQUIRE) ? 1 : 0;
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+       __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+       __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+       return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+       return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
+}
+
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+       return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+       unsigned int ret = 0;
+
+       asm volatile(
+                       "\tlwsync\n"
+                       "1:\tlwarx %[ret], 0, %[dst]\n"
+                       "cmplw %[exp], %[ret]\n"
+                       "bne 2f\n"
+                       "stwcx. %[src], 0, %[dst]\n"
+                       "bne- 1b\n"
+                       "li %[ret], 1\n"
+                       "b 3f\n"
+                       "2:\n"
+                       "stwcx. %[ret], 0, %[dst]\n"
+                       "li %[ret], 0\n"
+                       "3:\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret), "=m" (*dst)
+                       : [dst] "r" (dst),
+                         [exp] "r" (exp),
+                         [src] "r" (src),
+                         "m" (*dst)
+                       : "cc", "memory");
+
+       return ret;
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+       int t;
+
+       asm volatile(
+                       "1: lwarx %[t],0,%[cnt]\n"
+                       "addic %[t],%[t],1\n"
+                       "stwcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "=m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
+                       : "cc", "xer", "memory");
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+       int t;
+
+       asm volatile(
+                       "1: lwarx %[t],0,%[cnt]\n"
+                       "addic %[t],%[t],-1\n"
+                       "stwcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "=m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
+                       : "cc", "xer", "memory");
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+       int ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: lwarx %[ret],0,%[cnt]\n"
+                       "addic  %[ret],%[ret],1\n"
+                       "stwcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [cnt] "r" (&v->cnt)
+                       : "cc", "xer", "memory");
+
+       return ret == 0;
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+       int ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: lwarx %[ret],0,%[cnt]\n"
+                       "addic %[ret],%[ret],-1\n"
+                       "stwcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [cnt] "r" (&v->cnt)
+                       : "cc", "xer", "memory");
+
+       return ret == 0;
+}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+       return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       unsigned int ret = 0;
+
+       asm volatile (
+                       "\tlwsync\n"
+                       "1: ldarx %[ret], 0, %[dst]\n"
+                       "cmpld %[exp], %[ret]\n"
+                       "bne 2f\n"
+                       "stdcx. %[src], 0, %[dst]\n"
+                       "bne- 1b\n"
+                       "li %[ret], 1\n"
+                       "b 3f\n"
+                       "2:\n"
+                       "stdcx. %[ret], 0, %[dst]\n"
+                       "li %[ret], 0\n"
+                       "3:\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret), "=m" (*dst)
+                       : [dst] "r" (dst),
+                         [exp] "r" (exp),
+                         [src] "r" (src),
+                         "m" (*dst)
+                       : "cc", "memory");
+       return ret;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+       long ret;
+
+       asm volatile("ld%U1%X1 %[ret],%[cnt]"
+               : [ret] "=r"(ret)
+               : [cnt] "m"(v->cnt));
+
+       return ret;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+       asm volatile("std%U0%X0 %[new_value],%[cnt]"
+               : [cnt] "=m"(v->cnt)
+               : [new_value] "r"(new_value));
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       long t;
+
+       asm volatile(
+                       "1: ldarx %[t],0,%[cnt]\n"
+                       "add %[t],%[inc],%[t]\n"
+                       "stdcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "=m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
+                       : "cc", "memory");
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       long t;
+
+       asm volatile(
+                       "1: ldarx %[t],0,%[cnt]\n"
+                       "subf %[t],%[dec],%[t]\n"
+                       "stdcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "+m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
+                       : "cc", "memory");
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       long t;
+
+       asm volatile(
+                       "1: ldarx %[t],0,%[cnt]\n"
+                       "addic %[t],%[t],1\n"
+                       "stdcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "+m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
+                       : "cc", "xer", "memory");
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       long t;
+
+       asm volatile(
+                       "1: ldarx %[t],0,%[cnt]\n"
+                       "addic %[t],%[t],-1\n"
+                       "stdcx. %[t],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       : [t] "=&r" (t), "+m" (v->cnt)
+                       : [cnt] "r" (&v->cnt), "m" (v->cnt)
+                       : "cc", "xer", "memory");
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       long ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: ldarx %[ret],0,%[cnt]\n"
+                       "add %[ret],%[inc],%[ret]\n"
+                       "stdcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [inc] "r" (inc), [cnt] "r" (&v->cnt)
+                       : "cc", "memory");
+
+       return ret;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       long ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: ldarx %[ret],0,%[cnt]\n"
+                       "subf %[ret],%[dec],%[ret]\n"
+                       "stdcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [dec] "r" (dec), [cnt] "r" (&v->cnt)
+                       : "cc", "memory");
+
+       return ret;
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       long ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: ldarx %[ret],0,%[cnt]\n"
+                       "addic %[ret],%[ret],1\n"
+                       "stdcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [cnt] "r" (&v->cnt)
+                       : "cc", "xer", "memory");
+
+       return ret == 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       long ret;
+
+       asm volatile(
+                       "\n\tlwsync\n"
+                       "1: ldarx %[ret],0,%[cnt]\n"
+                       "addic %[ret],%[ret],-1\n"
+                       "stdcx. %[ret],0,%[cnt]\n"
+                       "bne- 1b\n"
+                       "isync\n"
+                       : [ret] "=&r" (ret)
+                       : [cnt] "r" (&v->cnt)
+                       : "cc", "xer", "memory");
+
+       return ret == 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+       return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_byteorder.h b/lib/librte_eal/ppc/include/rte_byteorder.h
new file mode 100644 (file)
index 0000000..bfdded4
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Inspired from FreeBSD src/sys/powerpc/include/endian.h
+ * Copyright (c) 1987, 1991, 1993
+ * The Regents of the University of California.  All rights reserved.
+ */
+
+#ifndef _RTE_BYTEORDER_PPC_64_H_
+#define _RTE_BYTEORDER_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include "generic/rte_byteorder.h"
+
+/*
+ * An architecture-optimized byte swap for a 16-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap16().
+ */
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+       return (_x >> 8) | ((_x << 8) & 0xff00);
+}
+
+/*
+ * An architecture-optimized byte swap for a 32-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap32().
+ */
+static inline uint32_t rte_arch_bswap32(uint32_t _x)
+{
+       return (_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
+               ((_x << 24) & 0xff000000);
+}
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+  * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* 64-bit mode */
+static inline uint64_t rte_arch_bswap64(uint64_t _x)
+{
+       return (_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+               ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
+               ((_x << 24) & (0xffULL << 40)) |
+               ((_x << 40) & (0xffULL << 48)) | ((_x << 56));
+}
+
+#ifndef RTE_FORCE_INTRINSICS
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap16(x) :            \
+                                  rte_arch_bswap16(x)))
+
+#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap32(x) :            \
+                                  rte_arch_bswap32(x)))
+
+#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap64(x) :            \
+                                  rte_arch_bswap64(x)))
+#else
+/*
+ * __builtin_bswap16 is only available gcc 4.8 and upwards
+ */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap16(x) :            \
+                                  rte_arch_bswap16(x)))
+#endif
+#endif
+
+/* Power 8 have both little endian and big endian mode
+ * Power 7 only support big endian
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+#define rte_cpu_to_le_16(x) (x)
+#define rte_cpu_to_le_32(x) (x)
+#define rte_cpu_to_le_64(x) (x)
+
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+#define rte_le_to_cpu_64(x) (x)
+
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#else /* RTE_BIG_ENDIAN */
+
+#define rte_cpu_to_le_16(x) rte_bswap16(x)
+#define rte_cpu_to_le_32(x) rte_bswap32(x)
+#define rte_cpu_to_le_64(x) rte_bswap64(x)
+
+#define rte_cpu_to_be_16(x) (x)
+#define rte_cpu_to_be_32(x) (x)
+#define rte_cpu_to_be_64(x) (x)
+
+#define rte_le_to_cpu_16(x) rte_bswap16(x)
+#define rte_le_to_cpu_32(x) rte_bswap32(x)
+#define rte_le_to_cpu_64(x) rte_bswap64(x)
+
+#define rte_be_to_cpu_16(x) (x)
+#define rte_be_to_cpu_32(x) (x)
+#define rte_be_to_cpu_64(x) (x)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_cpuflags.h b/lib/librte_eal/ppc/include/rte_cpuflags.h
new file mode 100644 (file)
index 0000000..a88355d
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2014.
+ */
+
+#ifndef _RTE_CPUFLAGS_PPC_64_H_
+#define _RTE_CPUFLAGS_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t {
+       RTE_CPUFLAG_PPC_LE = 0,
+       RTE_CPUFLAG_TRUE_LE,
+       RTE_CPUFLAG_PSERIES_PERFMON_COMPAT,
+       RTE_CPUFLAG_VSX,
+       RTE_CPUFLAG_ARCH_2_06,
+       RTE_CPUFLAG_POWER6_EXT,
+       RTE_CPUFLAG_DFP,
+       RTE_CPUFLAG_PA6T,
+       RTE_CPUFLAG_ARCH_2_05,
+       RTE_CPUFLAG_ICACHE_SNOOP,
+       RTE_CPUFLAG_SMT,
+       RTE_CPUFLAG_BOOKE,
+       RTE_CPUFLAG_CELLBE,
+       RTE_CPUFLAG_POWER5_PLUS,
+       RTE_CPUFLAG_POWER5,
+       RTE_CPUFLAG_POWER4,
+       RTE_CPUFLAG_NOTB,
+       RTE_CPUFLAG_EFP_DOUBLE,
+       RTE_CPUFLAG_EFP_SINGLE,
+       RTE_CPUFLAG_SPE,
+       RTE_CPUFLAG_UNIFIED_CACHE,
+       RTE_CPUFLAG_4xxMAC,
+       RTE_CPUFLAG_MMU,
+       RTE_CPUFLAG_FPU,
+       RTE_CPUFLAG_ALTIVEC,
+       RTE_CPUFLAG_PPC601,
+       RTE_CPUFLAG_PPC64,
+       RTE_CPUFLAG_PPC32,
+       RTE_CPUFLAG_TAR,
+       RTE_CPUFLAG_LSEL,
+       RTE_CPUFLAG_EBB,
+       RTE_CPUFLAG_DSCR,
+       RTE_CPUFLAG_HTM,
+       RTE_CPUFLAG_ARCH_2_07,
+       /* The last item */
+       RTE_CPUFLAG_NUMFLAGS,/**< This should always be the last! */
+};
+
+#include "generic/rte_cpuflags.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_cycles.h b/lib/librte_eal/ppc/include/rte_cycles.h
new file mode 100644 (file)
index 0000000..8f2e986
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2014.
+ */
+
+#ifndef _RTE_CYCLES_PPC_64_H_
+#define _RTE_CYCLES_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+/**
+ * Read the time base register.
+ *
+ * @return
+ *   The time base for this lcore.
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+       union {
+               uint64_t tsc_64;
+               RTE_STD_C11
+               struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+                       uint32_t hi_32;
+                       uint32_t lo_32;
+#else
+                       uint32_t lo_32;
+                       uint32_t hi_32;
+#endif
+               };
+       } tsc;
+       uint32_t tmp;
+
+       asm volatile(
+                       "0:\n"
+                       "mftbu   %[hi32]\n"
+                       "mftb    %[lo32]\n"
+                       "mftbu   %[tmp]\n"
+                       "cmpw    %[tmp],%[hi32]\n"
+                       "bne     0b\n"
+                       : [hi32] "=r"(tsc.hi_32), [lo32] "=r"(tsc.lo_32),
+                       [tmp] "=r"(tmp)
+                   );
+       return tsc.tsc_64;
+}
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+       rte_mb();
+       return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_io.h b/lib/librte_eal/ppc/include/rte_io.h
new file mode 100644 (file)
index 0000000..0145506
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_IO_PPC_64_H_
+#define _RTE_IO_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_io.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IO_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_mcslock.h b/lib/librte_eal/ppc/include/rte_mcslock.h
new file mode 100644 (file)
index 0000000..c58a6ed
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_MCSLOCK_PPC_64_H_
+#define _RTE_MCSLOCK_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_mcslock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MCSLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_memcpy.h b/lib/librte_eal/ppc/include/rte_memcpy.h
new file mode 100644 (file)
index 0000000..25311ba
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2014.
+ */
+
+#ifndef _RTE_MEMCPY_PPC_64_H_
+#define _RTE_MEMCPY_PPC_64_H_
+
+#include <stdint.h>
+#include <string.h>
+/*To include altivec.h, GCC version must  >= 4.8 */
+#include <altivec.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_memcpy.h"
+
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+}
+
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+}
+
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+}
+
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+       vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+}
+
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       vec_vsx_st(vec_vsx_ld(0, src), 0, dst);
+       vec_vsx_st(vec_vsx_ld(16, src), 16, dst);
+       vec_vsx_st(vec_vsx_ld(32, src), 32, dst);
+       vec_vsx_st(vec_vsx_ld(48, src), 48, dst);
+       vec_vsx_st(vec_vsx_ld(64, src), 64, dst);
+       vec_vsx_st(vec_vsx_ld(80, src), 80, dst);
+       vec_vsx_st(vec_vsx_ld(96, src), 96, dst);
+       vec_vsx_st(vec_vsx_ld(112, src), 112, dst);
+}
+
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov128(dst, src);
+       rte_mov128(dst + 128, src + 128);
+}
+
+#define rte_memcpy(dst, src, n)              \
+       __extension__ ({                     \
+       (__builtin_constant_p(n)) ?          \
+       memcpy((dst), (src), (n)) :          \
+       rte_memcpy_func((dst), (src), (n)); })
+
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+       void *ret = dst;
+
+       /* We can't copy < 16 bytes using XMM registers so do it manually. */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dst = *(const uint8_t *)src;
+                       dst = (uint8_t *)dst + 1;
+                       src = (const uint8_t *)src + 1;
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dst = *(const uint16_t *)src;
+                       dst = (uint16_t *)dst + 1;
+                       src = (const uint16_t *)src + 1;
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dst = *(const uint32_t *)src;
+                       dst = (uint32_t *)dst + 1;
+                       src = (const uint32_t *)src + 1;
+               }
+               if (n & 0x08)
+                       *(uint64_t *)dst = *(const uint64_t *)src;
+               return ret;
+       }
+
+       /* Special fast cases for <= 128 bytes */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                       (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n,
+                       (const uint8_t *)src - 32 + n);
+               return ret;
+       }
+
+       if (n <= 128) {
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov64((uint8_t *)dst - 64 + n,
+                       (const uint8_t *)src - 64 + n);
+               return ret;
+       }
+
+       /*
+        * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+        * copies was found to be faster than doing 128 and 32 byte copies as
+        * well.
+        */
+       for ( ; n >= 256; n -= 256) {
+               rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+               dst = (uint8_t *)dst + 256;
+               src = (const uint8_t *)src + 256;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 256) into
+        * 64byte (2^6) chunks.
+        * Using incrementing integers in the case labels of a switch statement
+        * encourages the compiler to use a jump table. To get incrementing
+        * integers, we shift the 2 relevant bits to the LSB position to first
+        * get decrementing integers, and then subtract.
+        */
+       switch (3 - (n >> 6)) {
+       case 0x00:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x01:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x02:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       default:
+               ;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 64) into
+        * 16byte (2^4) chunks, using the same switch structure as above.
+        */
+       switch (3 - (n >> 4)) {
+       case 0x00:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x01:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x02:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       default:
+               ;
+       }
+
+       /* Copy any remaining bytes, without going beyond end of buffers */
+       if (n != 0)
+               rte_mov16((uint8_t *)dst - 16 + n,
+                       (const uint8_t *)src - 16 + n);
+       return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_pause.h b/lib/librte_eal/ppc/include/rte_pause.h
new file mode 100644 (file)
index 0000000..16e47ce
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_PAUSE_PPC64_H_
+#define _RTE_PAUSE_PPC64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_atomic.h"
+
+#include "generic/rte_pause.h"
+
+static inline void rte_pause(void)
+{
+       /* Set hardware multi-threading low priority */
+       asm volatile("or 1,1,1");
+       /* Set hardware multi-threading medium priority */
+       asm volatile("or 2,2,2");
+       rte_compiler_barrier();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_PPC64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_prefetch.h b/lib/librte_eal/ppc/include/rte_prefetch.h
new file mode 100644 (file)
index 0000000..9ba07c8
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2014.
+ */
+
+#ifndef _RTE_PREFETCH_PPC_64_H_
+#define _RTE_PREFETCH_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(const volatile void *p)
+{
+       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
+}
+
+static inline void rte_prefetch1(const volatile void *p)
+{
+       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
+}
+
+static inline void rte_prefetch2(const volatile void *p)
+{
+       asm volatile ("dcbt 0,%[p],0" : : [p] "r" (p));
+}
+
+static inline void rte_prefetch_non_temporal(const volatile void *p)
+{
+       /* non-temporal version not available, fallback to rte_prefetch0 */
+       rte_prefetch0(p);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_rwlock.h b/lib/librte_eal/ppc/include/rte_rwlock.h
new file mode 100644 (file)
index 0000000..9fadc04
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef _RTE_RWLOCK_PPC_64_H_
+#define _RTE_RWLOCK_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_rwlock.h"
+
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_read_lock(rwl);
+}
+
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_read_unlock(rwl);
+}
+
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_write_lock(rwl);
+}
+
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+{
+       rte_rwlock_write_unlock(rwl);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_spinlock.h b/lib/librte_eal/ppc/include/rte_spinlock.h
new file mode 100644 (file)
index 0000000..149ec24
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2014.
+ */
+
+#ifndef _RTE_SPINLOCK_PPC_64_H_
+#define _RTE_SPINLOCK_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_pause.h>
+#include "generic/rte_spinlock.h"
+
+/* Fixme: Use intrinsics to implement the spinlock on Power architecture */
+
+#ifndef RTE_FORCE_INTRINSICS
+
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+       while (__sync_lock_test_and_set(&sl->locked, 1))
+               while (sl->locked)
+                       rte_pause();
+}
+
+static inline void
+rte_spinlock_unlock(rte_spinlock_t *sl)
+{
+       __sync_lock_release(&sl->locked);
+}
+
+static inline int
+rte_spinlock_trylock(rte_spinlock_t *sl)
+{
+       return __sync_lock_test_and_set(&sl->locked, 1) == 0;
+}
+
+#endif
+
+static inline int rte_tm_supported(void)
+{
+       return 0;
+}
+
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl)
+{
+       rte_spinlock_lock(sl); /* fall-back */
+}
+
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+{
+       return rte_spinlock_trylock(sl);
+}
+
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+{
+       rte_spinlock_unlock(sl);
+}
+
+static inline void
+rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+{
+       rte_spinlock_recursive_lock(slr); /* fall-back */
+}
+
+static inline void
+rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+{
+       rte_spinlock_recursive_unlock(slr);
+}
+
+static inline int
+rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+{
+       return rte_spinlock_recursive_trylock(slr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_ticketlock.h b/lib/librte_eal/ppc/include/rte_ticketlock.h
new file mode 100644 (file)
index 0000000..c175e9e
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_PPC_64_H_
+#define _RTE_TICKETLOCK_PPC_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_ticketlock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TICKETLOCK_PPC_64_H_ */
diff --git a/lib/librte_eal/ppc/include/rte_vect.h b/lib/librte_eal/ppc/include/rte_vect.h
new file mode 100644 (file)
index 0000000..068c805
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) IBM Corporation 2016.
+ */
+
+#ifndef _RTE_VECT_PPC_64_H_
+#define _RTE_VECT_PPC_64_H_
+
+#include <altivec.h>
+#include "generic/rte_vect.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef vector signed int xmm_t;
+
+#define        XMM_SIZE        (sizeof(xmm_t))
+#define        XMM_MASK        (XMM_SIZE - 1)
+
+typedef union rte_xmm {
+       xmm_t    x;
+       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
+       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
+       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
+       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
+       double   pd[XMM_SIZE / sizeof(double)];
+} __attribute__((aligned(16))) rte_xmm_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_VECT_PPC_64_H_ */
index 695b171..f4b6d95 100644 (file)
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
 
+subdir('include')
+
 sources += files(
        'rte_cpuflags.c',
        'rte_cycles.c',
diff --git a/lib/librte_eal/x86/include/meson.build b/lib/librte_eal/x86/include/meson.build
new file mode 100644 (file)
index 0000000..d336d52
--- /dev/null
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+includes += include_directories('.')
+
+arch_headers = files(
+       'rte_atomic_32.h',
+       'rte_atomic_64.h',
+       'rte_atomic.h',
+       'rte_byteorder_32.h',
+       'rte_byteorder_64.h',
+       'rte_byteorder.h',
+       'rte_cpuflags.h',
+       'rte_cycles.h',
+       'rte_io.h',
+       'rte_memcpy.h',
+       'rte_prefetch.h',
+       'rte_pause.h',
+       'rte_rtm.h',
+       'rte_rwlock.h',
+       'rte_spinlock.h',
+       'rte_vect.h',
+)
+install_headers(arch_headers, subdir: get_option('include_subdir_arch'))
diff --git a/lib/librte_eal/x86/include/rte_atomic.h b/lib/librte_eal/x86/include/rte_atomic.h
new file mode 100644 (file)
index 0000000..148398f
--- /dev/null
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_ATOMIC_X86_H_
+#define _RTE_ATOMIC_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <emmintrin.h>
+#include "generic/rte_atomic.h"
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED                        /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
+#endif
+
+#define        rte_mb() _mm_mfence()
+
+#define        rte_wmb() _mm_sfence()
+
+#define        rte_rmb() _mm_lfence()
+
+#define rte_smp_wmb() rte_compiler_barrier()
+
+#define rte_smp_rmb() rte_compiler_barrier()
+
+/*
+ * From Intel Software Development Manual; Vol 3;
+ * 8.2.2 Memory Ordering in P6 and More Recent Processor Families:
+ * ...
+ * . Reads are not reordered with other reads.
+ * . Writes are not reordered with older reads.
+ * . Writes to memory are not reordered with other writes,
+ *   with the following exceptions:
+ *   . streaming stores (writes) executed with the non-temporal move
+ *     instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and
+ *   . string operations (see Section 8.2.4.1).
+ *  ...
+ * . Reads may be reordered with older writes to different locations but not
+ * with older writes to the same location.
+ * . Reads or writes cannot be reordered with I/O instructions,
+ * locked instructions, or serializing instructions.
+ * . Reads cannot pass earlier LFENCE and MFENCE instructions.
+ * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions.
+ * . LFENCE instructions cannot pass earlier reads.
+ * . SFENCE instructions cannot pass earlier writes ...
+ * . MFENCE instructions cannot pass earlier reads, writes ...
+ *
+ * As pointed by Java guys, that makes possible to use lock-prefixed
+ * instructions to get the same effect as mfence and on most modern HW
+ * that gives a better perfomance then using mfence:
+ * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
+ * Basic idea is to use lock prefixed add with some dummy memory location
+ * as the destination. From their experiments 128B(2 cache lines) below
+ * current stack pointer looks like a good candidate.
+ * So below we use that techinque for rte_smp_mb() implementation.
+ */
+
+static __rte_always_inline void
+rte_smp_mb(void)
+{
+#ifdef RTE_ARCH_I686
+       asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
+#else
+       asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
+#endif
+}
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_compiler_barrier()
+
+#define rte_io_rmb() rte_compiler_barrier()
+
+#define rte_cio_wmb() rte_compiler_barrier()
+
+#define rte_cio_rmb() rte_compiler_barrier()
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgw %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+       asm volatile(
+                       MPLOCKED
+                       "xchgw %0, %1;"
+                       : "=r" (val), "=m" (*dst)
+                       : "0" (val),  "m" (*dst)
+                       : "memory");         /* no-clobber list */
+       return val;
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgl %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+       asm volatile(
+                       MPLOCKED
+                       "xchgl %0, %1;"
+                       : "=r" (val), "=m" (*dst)
+                       : "0" (val),  "m" (*dst)
+                       : "memory");         /* no-clobber list */
+       return val;
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+#endif
+
+#ifdef RTE_ARCH_I686
+#include "rte_atomic_32.h"
+#else
+#include "rte_atomic_64.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_X86_H_ */
diff --git a/lib/librte_eal/x86/include/rte_atomic_32.h b/lib/librte_eal/x86/include/rte_atomic_32.h
new file mode 100644 (file)
index 0000000..f63b7fa
--- /dev/null
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/i386/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_X86_H_
+#error do not include this file directly, use <rte_atomic.h> instead
+#endif
+
+#ifndef _RTE_ATOMIC_I686_H_
+#define _RTE_ATOMIC_I686_H_
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       uint8_t res;
+       RTE_STD_C11
+       union {
+               struct {
+                       uint32_t l32;
+                       uint32_t h32;
+               };
+               uint64_t u64;
+       } _exp, _src;
+
+       _exp.u64 = exp;
+       _src.u64 = src;
+
+#ifndef __PIC__
+    asm volatile (
+            MPLOCKED
+            "cmpxchg8b (%[dst]);"
+            "setz %[res];"
+            : [res] "=a" (res)      /* result in eax */
+            : [dst] "S" (dst),      /* esi */
+             "b" (_src.l32),       /* ebx */
+             "c" (_src.h32),       /* ecx */
+             "a" (_exp.l32),       /* eax */
+             "d" (_exp.h32)        /* edx */
+                       : "memory" );           /* no-clobber list */
+#else
+       asm volatile (
+            "xchgl %%ebx, %%edi;\n"
+                       MPLOCKED
+                       "cmpxchg8b (%[dst]);"
+                       "setz %[res];"
+            "xchgl %%ebx, %%edi;\n"
+                       : [res] "=a" (res)      /* result in eax */
+                       : [dst] "S" (dst),      /* esi */
+                         "D" (_src.l32),       /* ebx */
+                         "c" (_src.h32),       /* ecx */
+                         "a" (_exp.l32),       /* eax */
+                         "d" (_exp.h32)        /* edx */
+                       : "memory" );           /* no-clobber list */
+#endif
+
+       return res;
+}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dest, uint64_t val)
+{
+       uint64_t old;
+
+       do {
+               old = *dest;
+       } while (rte_atomic64_cmpset(dest, old, val) == 0);
+
+       return old;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, 0);
+       }
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               /* replace the value by itself */
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp);
+       }
+       return tmp;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, new_value);
+       }
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp + inc);
+       }
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp - dec);
+       }
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       rte_atomic64_add(v, 1);
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       rte_atomic64_sub(v, 1);
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp + inc);
+       }
+
+       return tmp + inc;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp - dec);
+       }
+
+       return tmp - dec;
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_add_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_sub_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       rte_atomic64_set(v, 0);
+}
+#endif
+
+#endif /* _RTE_ATOMIC_I686_H_ */
diff --git a/lib/librte_eal/x86/include/rte_atomic_64.h b/lib/librte_eal/x86/include/rte_atomic_64.h
new file mode 100644 (file)
index 0000000..cfe7067
--- /dev/null
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/amd64/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * Copyright (c) 2019 Intel Corporation
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_X86_H_
+#error do not include this file directly, use <rte_atomic.h> instead
+#endif
+
+#ifndef _RTE_ATOMIC_X86_64_H_
+#define _RTE_ATOMIC_X86_64_H_
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_atomic.h>
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       uint8_t res;
+
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgq %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+
+       return res;
+}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+       asm volatile(
+                       MPLOCKED
+                       "xchgq %0, %1;"
+                       : "=r" (val), "=m" (*dst)
+                       : "0" (val),  "m" (*dst)
+                       : "memory");         /* no-clobber list */
+       return val;
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+       return v->cnt;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+       v->cnt = new_value;
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addq %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subq %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incq %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decq %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       int64_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddq %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return prev + inc;
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       return rte_atomic64_add_return(v, -dec);
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incq %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt), /* output */
+                         [ret] "=qm" (ret)
+                       );
+
+       return ret != 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "decq %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+#endif
+
+/*------------------------ 128 bit atomic operations -------------------------*/
+
+__rte_experimental
+static inline int
+rte_atomic128_cmp_exchange(rte_int128_t *dst,
+                          rte_int128_t *exp,
+                          const rte_int128_t *src,
+                          unsigned int weak,
+                          int success,
+                          int failure)
+{
+       RTE_SET_USED(weak);
+       RTE_SET_USED(success);
+       RTE_SET_USED(failure);
+       uint8_t res;
+
+       asm volatile (
+                     MPLOCKED
+                     "cmpxchg16b %[dst];"
+                     " sete %[res]"
+                     : [dst] "=m" (dst->val[0]),
+                       "=a" (exp->val[0]),
+                       "=d" (exp->val[1]),
+                       [res] "=r" (res)
+                     : "b" (src->val[0]),
+                       "c" (src->val[1]),
+                       "a" (exp->val[0]),
+                       "d" (exp->val[1]),
+                       "m" (dst->val[0])
+                     : "memory");
+
+       return res;
+}
+
+#endif /* _RTE_ATOMIC_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_byteorder.h b/lib/librte_eal/x86/include/rte_byteorder.h
new file mode 100644 (file)
index 0000000..a2dfecc
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_BYTEORDER_X86_H_
+#define _RTE_BYTEORDER_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include "generic/rte_byteorder.h"
+
+#ifndef RTE_BYTE_ORDER
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif
+
+/*
+ * An architecture-optimized byte swap for a 16-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap16().
+ */
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+       uint16_t x = _x;
+       asm volatile ("xchgb %b[x1],%h[x2]"
+                     : [x1] "=Q" (x)
+                     : [x2] "0" (x)
+                     );
+       return x;
+}
+
+/*
+ * An architecture-optimized byte swap for a 32-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap32().
+ */
+static inline uint32_t rte_arch_bswap32(uint32_t _x)
+{
+       uint32_t x = _x;
+       asm volatile ("bswap %[x]"
+                     : [x] "+r" (x)
+                     );
+       return x;
+}
+
+#ifndef RTE_FORCE_INTRINSICS
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap16(x) :            \
+                                  rte_arch_bswap16(x)))
+
+#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap32(x) :            \
+                                  rte_arch_bswap32(x)))
+
+#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap64(x) :            \
+                                  rte_arch_bswap64(x)))
+#else
+/*
+ * __builtin_bswap16 is only available gcc 4.8 and upwards
+ */
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap16(x) :            \
+                                  rte_arch_bswap16(x)))
+#endif
+#endif
+
+#define rte_cpu_to_le_16(x) (x)
+#define rte_cpu_to_le_32(x) (x)
+#define rte_cpu_to_le_64(x) (x)
+
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+#define rte_le_to_cpu_16(x) (x)
+#define rte_le_to_cpu_32(x) (x)
+#define rte_le_to_cpu_64(x) (x)
+
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#ifdef RTE_ARCH_I686
+#include "rte_byteorder_32.h"
+#else
+#include "rte_byteorder_64.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_X86_H_ */
diff --git a/lib/librte_eal/x86/include/rte_byteorder_32.h b/lib/librte_eal/x86/include/rte_byteorder_32.h
new file mode 100644 (file)
index 0000000..d5a768e
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_BYTEORDER_X86_H_
+#error do not include this file directly, use <rte_byteorder.h> instead
+#endif
+
+#ifndef _RTE_BYTEORDER_I686_H_
+#define _RTE_BYTEORDER_I686_H_
+
+#include <stdint.h>
+#include <rte_byteorder.h>
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+  * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* Compat./Leg. mode */
+static inline uint64_t rte_arch_bswap64(uint64_t x)
+{
+       uint64_t ret = 0;
+       ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32);
+       ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL));
+       return ret;
+}
+
+#endif /* _RTE_BYTEORDER_I686_H_ */
diff --git a/lib/librte_eal/x86/include/rte_byteorder_64.h b/lib/librte_eal/x86/include/rte_byteorder_64.h
new file mode 100644 (file)
index 0000000..8c6cf28
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_BYTEORDER_X86_H_
+#error do not include this file directly, use <rte_byteorder.h> instead
+#endif
+
+#ifndef _RTE_BYTEORDER_X86_64_H_
+#define _RTE_BYTEORDER_X86_64_H_
+
+#include <stdint.h>
+#include <rte_common.h>
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+  * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+/* 64-bit mode */
+static inline uint64_t rte_arch_bswap64(uint64_t _x)
+{
+       uint64_t x = _x;
+       asm volatile ("bswap %[x]"
+                     : [x] "+r" (x)
+                     );
+       return x;
+}
+
+#endif /* _RTE_BYTEORDER_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_cpuflags.h b/lib/librte_eal/x86/include/rte_cpuflags.h
new file mode 100644 (file)
index 0000000..25ba47b
--- /dev/null
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_CPUFLAGS_X86_64_H_
+#define _RTE_CPUFLAGS_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum rte_cpu_flag_t {
+       /* (EAX 01h) ECX features*/
+       RTE_CPUFLAG_SSE3 = 0,               /**< SSE3 */
+       RTE_CPUFLAG_PCLMULQDQ,              /**< PCLMULQDQ */
+       RTE_CPUFLAG_DTES64,                 /**< DTES64 */
+       RTE_CPUFLAG_MONITOR,                /**< MONITOR */
+       RTE_CPUFLAG_DS_CPL,                 /**< DS_CPL */
+       RTE_CPUFLAG_VMX,                    /**< VMX */
+       RTE_CPUFLAG_SMX,                    /**< SMX */
+       RTE_CPUFLAG_EIST,                   /**< EIST */
+       RTE_CPUFLAG_TM2,                    /**< TM2 */
+       RTE_CPUFLAG_SSSE3,                  /**< SSSE3 */
+       RTE_CPUFLAG_CNXT_ID,                /**< CNXT_ID */
+       RTE_CPUFLAG_FMA,                    /**< FMA */
+       RTE_CPUFLAG_CMPXCHG16B,             /**< CMPXCHG16B */
+       RTE_CPUFLAG_XTPR,                   /**< XTPR */
+       RTE_CPUFLAG_PDCM,                   /**< PDCM */
+       RTE_CPUFLAG_PCID,                   /**< PCID */
+       RTE_CPUFLAG_DCA,                    /**< DCA */
+       RTE_CPUFLAG_SSE4_1,                 /**< SSE4_1 */
+       RTE_CPUFLAG_SSE4_2,                 /**< SSE4_2 */
+       RTE_CPUFLAG_X2APIC,                 /**< X2APIC */
+       RTE_CPUFLAG_MOVBE,                  /**< MOVBE */
+       RTE_CPUFLAG_POPCNT,                 /**< POPCNT */
+       RTE_CPUFLAG_TSC_DEADLINE,           /**< TSC_DEADLINE */
+       RTE_CPUFLAG_AES,                    /**< AES */
+       RTE_CPUFLAG_XSAVE,                  /**< XSAVE */
+       RTE_CPUFLAG_OSXSAVE,                /**< OSXSAVE */
+       RTE_CPUFLAG_AVX,                    /**< AVX */
+       RTE_CPUFLAG_F16C,                   /**< F16C */
+       RTE_CPUFLAG_RDRAND,                 /**< RDRAND */
+       RTE_CPUFLAG_HYPERVISOR,             /**< Running in a VM */
+
+       /* (EAX 01h) EDX features */
+       RTE_CPUFLAG_FPU,                    /**< FPU */
+       RTE_CPUFLAG_VME,                    /**< VME */
+       RTE_CPUFLAG_DE,                     /**< DE */
+       RTE_CPUFLAG_PSE,                    /**< PSE */
+       RTE_CPUFLAG_TSC,                    /**< TSC */
+       RTE_CPUFLAG_MSR,                    /**< MSR */
+       RTE_CPUFLAG_PAE,                    /**< PAE */
+       RTE_CPUFLAG_MCE,                    /**< MCE */
+       RTE_CPUFLAG_CX8,                    /**< CX8 */
+       RTE_CPUFLAG_APIC,                   /**< APIC */
+       RTE_CPUFLAG_SEP,                    /**< SEP */
+       RTE_CPUFLAG_MTRR,                   /**< MTRR */
+       RTE_CPUFLAG_PGE,                    /**< PGE */
+       RTE_CPUFLAG_MCA,                    /**< MCA */
+       RTE_CPUFLAG_CMOV,                   /**< CMOV */
+       RTE_CPUFLAG_PAT,                    /**< PAT */
+       RTE_CPUFLAG_PSE36,                  /**< PSE36 */
+       RTE_CPUFLAG_PSN,                    /**< PSN */
+       RTE_CPUFLAG_CLFSH,                  /**< CLFSH */
+       RTE_CPUFLAG_DS,                     /**< DS */
+       RTE_CPUFLAG_ACPI,                   /**< ACPI */
+       RTE_CPUFLAG_MMX,                    /**< MMX */
+       RTE_CPUFLAG_FXSR,                   /**< FXSR */
+       RTE_CPUFLAG_SSE,                    /**< SSE */
+       RTE_CPUFLAG_SSE2,                   /**< SSE2 */
+       RTE_CPUFLAG_SS,                     /**< SS */
+       RTE_CPUFLAG_HTT,                    /**< HTT */
+       RTE_CPUFLAG_TM,                     /**< TM */
+       RTE_CPUFLAG_PBE,                    /**< PBE */
+
+       /* (EAX 06h) EAX features */
+       RTE_CPUFLAG_DIGTEMP,                /**< DIGTEMP */
+       RTE_CPUFLAG_TRBOBST,                /**< TRBOBST */
+       RTE_CPUFLAG_ARAT,                   /**< ARAT */
+       RTE_CPUFLAG_PLN,                    /**< PLN */
+       RTE_CPUFLAG_ECMD,                   /**< ECMD */
+       RTE_CPUFLAG_PTM,                    /**< PTM */
+
+       /* (EAX 06h) ECX features */
+       RTE_CPUFLAG_MPERF_APERF_MSR,        /**< MPERF_APERF_MSR */
+       RTE_CPUFLAG_ACNT2,                  /**< ACNT2 */
+       RTE_CPUFLAG_ENERGY_EFF,             /**< ENERGY_EFF */
+
+       /* (EAX 07h, ECX 0h) EBX features */
+       RTE_CPUFLAG_FSGSBASE,               /**< FSGSBASE */
+       RTE_CPUFLAG_BMI1,                   /**< BMI1 */
+       RTE_CPUFLAG_HLE,                    /**< Hardware Lock elision */
+       RTE_CPUFLAG_AVX2,                   /**< AVX2 */
+       RTE_CPUFLAG_SMEP,                   /**< SMEP */
+       RTE_CPUFLAG_BMI2,                   /**< BMI2 */
+       RTE_CPUFLAG_ERMS,                   /**< ERMS */
+       RTE_CPUFLAG_INVPCID,                /**< INVPCID */
+       RTE_CPUFLAG_RTM,                    /**< Transactional memory */
+       RTE_CPUFLAG_AVX512F,                /**< AVX512F */
+       RTE_CPUFLAG_RDSEED,                 /**< RDSEED instruction */
+
+       /* (EAX 80000001h) ECX features */
+       RTE_CPUFLAG_LAHF_SAHF,              /**< LAHF_SAHF */
+       RTE_CPUFLAG_LZCNT,                  /**< LZCNT */
+
+       /* (EAX 80000001h) EDX features */
+       RTE_CPUFLAG_SYSCALL,                /**< SYSCALL */
+       RTE_CPUFLAG_XD,                     /**< XD */
+       RTE_CPUFLAG_1GB_PG,                 /**< 1GB_PG */
+       RTE_CPUFLAG_RDTSCP,                 /**< RDTSCP */
+       RTE_CPUFLAG_EM64T,                  /**< EM64T */
+
+       /* (EAX 80000007h) EDX features */
+       RTE_CPUFLAG_INVTSC,                 /**< INVTSC */
+
+       /* The last item */
+       RTE_CPUFLAG_NUMFLAGS,               /**< This should always be the last! */
+};
+
+#include "generic/rte_cpuflags.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CPUFLAGS_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_cycles.h b/lib/librte_eal/x86/include/rte_cycles.h
new file mode 100644 (file)
index 0000000..a461a4d
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2013 6WIND S.A.
+ */
+
+#ifndef _RTE_CYCLES_X86_64_H_
+#define _RTE_CYCLES_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_cycles.h"
+
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+/* Global switch to use VMWARE mapping of TSC instead of RDTSC */
+extern int rte_cycles_vmware_tsc_map;
+#include <rte_branch_prediction.h>
+#endif
+#include <rte_common.h>
+#include <rte_config.h>
+
+static inline uint64_t
+rte_rdtsc(void)
+{
+       union {
+               uint64_t tsc_64;
+               RTE_STD_C11
+               struct {
+                       uint32_t lo_32;
+                       uint32_t hi_32;
+               };
+       } tsc;
+
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+       if (unlikely(rte_cycles_vmware_tsc_map)) {
+               /* ecx = 0x10000 corresponds to the physical TSC for VMware */
+               asm volatile("rdpmc" :
+                            "=a" (tsc.lo_32),
+                            "=d" (tsc.hi_32) :
+                            "c"(0x10000));
+               return tsc.tsc_64;
+       }
+#endif
+
+       asm volatile("rdtsc" :
+                    "=a" (tsc.lo_32),
+                    "=d" (tsc.hi_32));
+       return tsc.tsc_64;
+}
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+       rte_mb();
+       return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_io.h b/lib/librte_eal/x86/include/rte_io.h
new file mode 100644 (file)
index 0000000..2db71b1
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_IO_X86_H_
+#define _RTE_IO_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_io.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IO_X86_H_ */
diff --git a/lib/librte_eal/x86/include/rte_mcslock.h b/lib/librte_eal/x86/include/rte_mcslock.h
new file mode 100644 (file)
index 0000000..a8f041a
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_MCSLOCK_X86_64_H_
+#define _RTE_MCSLOCK_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_mcslock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MCSLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_memcpy.h b/lib/librte_eal/x86/include/rte_memcpy.h
new file mode 100644 (file)
index 0000000..ba44c4a
--- /dev/null
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_MEMCPY_X86_64_H_
+#define _RTE_MEMCPY_X86_64_H_
+
+/**
+ * @file
+ *
+ * Functions for SSE/AVX/AVX2/AVX512 implementation of memcpy().
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <rte_vect.h>
+#include <rte_common.h>
+#include <rte_config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @note This is implemented as a macro, so it's address should not be taken
+ * and care is needed as parameter expressions may be evaluated multiple times.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ * @param n
+ *   Number of bytes to copy.
+ * @return
+ *   Pointer to the destination data.
+ */
+static __rte_always_inline void *
+rte_memcpy(void *dst, const void *src, size_t n);
+
+#ifdef RTE_MACHINE_CPUFLAG_AVX512F
+
+#define ALIGNMENT_MASK 0x3F
+
+/**
+ * AVX512 implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       __m128i xmm0;
+
+       xmm0 = _mm_loadu_si128((const __m128i *)src);
+       _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       __m256i ymm0;
+
+       ymm0 = _mm256_loadu_si256((const __m256i *)src);
+       _mm256_storeu_si256((__m256i *)dst, ymm0);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       __m512i zmm0;
+
+       zmm0 = _mm512_loadu_si512((const void *)src);
+       _mm512_storeu_si512((void *)dst, zmm0);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov64(dst + 0 * 64, src + 0 * 64);
+       rte_mov64(dst + 1 * 64, src + 1 * 64);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov64(dst + 0 * 64, src + 0 * 64);
+       rte_mov64(dst + 1 * 64, src + 1 * 64);
+       rte_mov64(dst + 2 * 64, src + 2 * 64);
+       rte_mov64(dst + 3 * 64, src + 3 * 64);
+}
+
+/**
+ * Copy 128-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       __m512i zmm0, zmm1;
+
+       while (n >= 128) {
+               zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
+               n -= 128;
+               zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
+               src = src + 128;
+               _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
+               _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
+               dst = dst + 128;
+       }
+}
+
+/**
+ * Copy 512-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
+
+       while (n >= 512) {
+               zmm0 = _mm512_loadu_si512((const void *)(src + 0 * 64));
+               n -= 512;
+               zmm1 = _mm512_loadu_si512((const void *)(src + 1 * 64));
+               zmm2 = _mm512_loadu_si512((const void *)(src + 2 * 64));
+               zmm3 = _mm512_loadu_si512((const void *)(src + 3 * 64));
+               zmm4 = _mm512_loadu_si512((const void *)(src + 4 * 64));
+               zmm5 = _mm512_loadu_si512((const void *)(src + 5 * 64));
+               zmm6 = _mm512_loadu_si512((const void *)(src + 6 * 64));
+               zmm7 = _mm512_loadu_si512((const void *)(src + 7 * 64));
+               src = src + 512;
+               _mm512_storeu_si512((void *)(dst + 0 * 64), zmm0);
+               _mm512_storeu_si512((void *)(dst + 1 * 64), zmm1);
+               _mm512_storeu_si512((void *)(dst + 2 * 64), zmm2);
+               _mm512_storeu_si512((void *)(dst + 3 * 64), zmm3);
+               _mm512_storeu_si512((void *)(dst + 4 * 64), zmm4);
+               _mm512_storeu_si512((void *)(dst + 5 * 64), zmm5);
+               _mm512_storeu_si512((void *)(dst + 6 * 64), zmm6);
+               _mm512_storeu_si512((void *)(dst + 7 * 64), zmm7);
+               dst = dst + 512;
+       }
+}
+
+static __rte_always_inline void *
+rte_memcpy_generic(void *dst, const void *src, size_t n)
+{
+       uintptr_t dstu = (uintptr_t)dst;
+       uintptr_t srcu = (uintptr_t)src;
+       void *ret = dst;
+       size_t dstofss;
+       size_t bits;
+
+       /**
+        * Copy less than 16 bytes
+        */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
+                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
+                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
+                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
+               }
+               if (n & 0x08)
+                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
+               return ret;
+       }
+
+       /**
+        * Fast way when copy size doesn't exceed 512 bytes
+        */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                                 (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n,
+                                 (const uint8_t *)src - 32 + n);
+               return ret;
+       }
+       if (n <= 512) {
+               if (n >= 256) {
+                       n -= 256;
+                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 256;
+                       dst = (uint8_t *)dst + 256;
+               }
+               if (n >= 128) {
+                       n -= 128;
+                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 128;
+                       dst = (uint8_t *)dst + 128;
+               }
+COPY_BLOCK_128_BACK63:
+               if (n > 64) {
+                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+                       rte_mov64((uint8_t *)dst - 64 + n,
+                                         (const uint8_t *)src - 64 + n);
+                       return ret;
+               }
+               if (n > 0)
+                       rte_mov64((uint8_t *)dst - 64 + n,
+                                         (const uint8_t *)src - 64 + n);
+               return ret;
+       }
+
+       /**
+        * Make store aligned when copy size exceeds 512 bytes
+        */
+       dstofss = ((uintptr_t)dst & 0x3F);
+       if (dstofss > 0) {
+               dstofss = 64 - dstofss;
+               n -= dstofss;
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               src = (const uint8_t *)src + dstofss;
+               dst = (uint8_t *)dst + dstofss;
+       }
+
+       /**
+        * Copy 512-byte blocks.
+        * Use copy block function for better instruction order control,
+        * which is important when load is unaligned.
+        */
+       rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
+       bits = n;
+       n = n & 511;
+       bits -= n;
+       src = (const uint8_t *)src + bits;
+       dst = (uint8_t *)dst + bits;
+
+       /**
+        * Copy 128-byte blocks.
+        * Use copy block function for better instruction order control,
+        * which is important when load is unaligned.
+        */
+       if (n >= 128) {
+               rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
+               bits = n;
+               n = n & 127;
+               bits -= n;
+               src = (const uint8_t *)src + bits;
+               dst = (uint8_t *)dst + bits;
+       }
+
+       /**
+        * Copy whatever left
+        */
+       goto COPY_BLOCK_128_BACK63;
+}
+
+#elif defined RTE_MACHINE_CPUFLAG_AVX2
+
+#define ALIGNMENT_MASK 0x1F
+
+/**
+ * AVX2 implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       __m128i xmm0;
+
+       xmm0 = _mm_loadu_si128((const __m128i *)src);
+       _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       __m256i ymm0;
+
+       ymm0 = _mm256_loadu_si256((const __m256i *)src);
+       _mm256_storeu_si256((__m256i *)dst, ymm0);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+       rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
+       rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+}
+
+/**
+ * Copy 128-byte blocks from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
+{
+       __m256i ymm0, ymm1, ymm2, ymm3;
+
+       while (n >= 128) {
+               ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+               n -= 128;
+               ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
+               ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
+               ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
+               src = (const uint8_t *)src + 128;
+               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
+               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
+               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
+               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
+               dst = (uint8_t *)dst + 128;
+       }
+}
+
+static __rte_always_inline void *
+rte_memcpy_generic(void *dst, const void *src, size_t n)
+{
+       uintptr_t dstu = (uintptr_t)dst;
+       uintptr_t srcu = (uintptr_t)src;
+       void *ret = dst;
+       size_t dstofss;
+       size_t bits;
+
+       /**
+        * Copy less than 16 bytes
+        */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
+                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
+                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
+                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
+               }
+               if (n & 0x08) {
+                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
+               }
+               return ret;
+       }
+
+       /**
+        * Fast way when copy size doesn't exceed 256 bytes
+        */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                               (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 48) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                               (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n,
+                               (const uint8_t *)src - 32 + n);
+               return ret;
+       }
+       if (n <= 256) {
+               if (n >= 128) {
+                       n -= 128;
+                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 128;
+                       dst = (uint8_t *)dst + 128;
+               }
+COPY_BLOCK_128_BACK31:
+               if (n >= 64) {
+                       n -= 64;
+                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 64;
+                       dst = (uint8_t *)dst + 64;
+               }
+               if (n > 32) {
+                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+                       rte_mov32((uint8_t *)dst - 32 + n,
+                                       (const uint8_t *)src - 32 + n);
+                       return ret;
+               }
+               if (n > 0) {
+                       rte_mov32((uint8_t *)dst - 32 + n,
+                                       (const uint8_t *)src - 32 + n);
+               }
+               return ret;
+       }
+
+       /**
+        * Make store aligned when copy size exceeds 256 bytes
+        */
+       dstofss = (uintptr_t)dst & 0x1F;
+       if (dstofss > 0) {
+               dstofss = 32 - dstofss;
+               n -= dstofss;
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               src = (const uint8_t *)src + dstofss;
+               dst = (uint8_t *)dst + dstofss;
+       }
+
+       /**
+        * Copy 128-byte blocks
+        */
+       rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
+       bits = n;
+       n = n & 127;
+       bits -= n;
+       src = (const uint8_t *)src + bits;
+       dst = (uint8_t *)dst + bits;
+
+       /**
+        * Copy whatever left
+        */
+       goto COPY_BLOCK_128_BACK31;
+}
+
+#else /* RTE_MACHINE_CPUFLAG */
+
+#define ALIGNMENT_MASK 0x0F
+
+/**
+ * SSE & AVX implementation below
+ */
+
+/**
+ * Copy 16 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       __m128i xmm0;
+
+       xmm0 = _mm_loadu_si128((const __m128i *)(const __m128i *)src);
+       _mm_storeu_si128((__m128i *)dst, xmm0);
+}
+
+/**
+ * Copy 32 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+}
+
+/**
+ * Copy 64 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+}
+
+/**
+ * Copy 128 bytes from one location to another,
+ * locations should not overlap.
+ */
+static __rte_always_inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+}
+
+/**
+ * Copy 256 bytes from one location to another,
+ * locations should not overlap.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
+       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
+       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
+       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
+       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+       rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
+       rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
+       rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
+       rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
+       rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
+       rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
+       rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
+       rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+}
+
+/**
+ * Macro for copying unaligned block from one location to another with constant load offset,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be immediate value within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset)                                                     \
+__extension__ ({                                                                                            \
+    size_t tmp;                                                                                                \
+    while (len >= 128 + 16 - offset) {                                                                      \
+        xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));                  \
+        len -= 128;                                                                                         \
+        xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));                  \
+        xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));                  \
+        xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16));                  \
+        xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16));                  \
+        xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16));                  \
+        xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16));                  \
+        xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16));                  \
+        xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16));                  \
+        src = (const uint8_t *)src + 128;                                                                   \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset));        \
+        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset));        \
+        dst = (uint8_t *)dst + 128;                                                                         \
+    }                                                                                                       \
+    tmp = len;                                                                                              \
+    len = ((len - 16 + offset) & 127) + 16 - offset;                                                        \
+    tmp -= len;                                                                                             \
+    src = (const uint8_t *)src + tmp;                                                                       \
+    dst = (uint8_t *)dst + tmp;                                                                             \
+    if (len >= 32 + 16 - offset) {                                                                          \
+        while (len >= 32 + 16 - offset) {                                                                   \
+            xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));              \
+            len -= 32;                                                                                      \
+            xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));              \
+            xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));              \
+            src = (const uint8_t *)src + 32;                                                                \
+            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));    \
+            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));    \
+            dst = (uint8_t *)dst + 32;                                                                      \
+        }                                                                                                   \
+        tmp = len;                                                                                          \
+        len = ((len - 16 + offset) & 31) + 16 - offset;                                                     \
+        tmp -= len;                                                                                         \
+        src = (const uint8_t *)src + tmp;                                                                   \
+        dst = (uint8_t *)dst + tmp;                                                                         \
+    }                                                                                                       \
+})
+
+/**
+ * Macro for copying unaligned block from one location to another,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Use switch here because the aligning instruction requires immediate value for shift count.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be pre-defined
+ */
+#define MOVEUNALIGNED_LEFT47(dst, src, len, offset)                   \
+__extension__ ({                                                      \
+    switch (offset) {                                                 \
+    case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break;    \
+    case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break;    \
+    case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break;    \
+    case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break;    \
+    case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break;    \
+    case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break;    \
+    case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break;    \
+    case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break;    \
+    case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break;    \
+    case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break;    \
+    case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break;    \
+    case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break;    \
+    case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break;    \
+    case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break;    \
+    case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break;    \
+    default:;                                                         \
+    }                                                                 \
+})
+
+static __rte_always_inline void *
+rte_memcpy_generic(void *dst, const void *src, size_t n)
+{
+       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+       uintptr_t dstu = (uintptr_t)dst;
+       uintptr_t srcu = (uintptr_t)src;
+       void *ret = dst;
+       size_t dstofss;
+       size_t srcofs;
+
+       /**
+        * Copy less than 16 bytes
+        */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dstu = *(const uint8_t *)srcu;
+                       srcu = (uintptr_t)((const uint8_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint8_t *)dstu + 1);
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dstu = *(const uint16_t *)srcu;
+                       srcu = (uintptr_t)((const uint16_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint16_t *)dstu + 1);
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dstu = *(const uint32_t *)srcu;
+                       srcu = (uintptr_t)((const uint32_t *)srcu + 1);
+                       dstu = (uintptr_t)((uint32_t *)dstu + 1);
+               }
+               if (n & 0x08) {
+                       *(uint64_t *)dstu = *(const uint64_t *)srcu;
+               }
+               return ret;
+       }
+
+       /**
+        * Fast way when copy size doesn't exceed 512 bytes
+        */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 48) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
+               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+       if (n <= 128) {
+               goto COPY_BLOCK_128_BACK15;
+       }
+       if (n <= 512) {
+               if (n >= 256) {
+                       n -= 256;
+                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+                       rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
+                       src = (const uint8_t *)src + 256;
+                       dst = (uint8_t *)dst + 256;
+               }
+COPY_BLOCK_255_BACK15:
+               if (n >= 128) {
+                       n -= 128;
+                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 128;
+                       dst = (uint8_t *)dst + 128;
+               }
+COPY_BLOCK_128_BACK15:
+               if (n >= 64) {
+                       n -= 64;
+                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 64;
+                       dst = (uint8_t *)dst + 64;
+               }
+COPY_BLOCK_64_BACK15:
+               if (n >= 32) {
+                       n -= 32;
+                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+                       src = (const uint8_t *)src + 32;
+                       dst = (uint8_t *)dst + 32;
+               }
+               if (n > 16) {
+                       rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+                       return ret;
+               }
+               if (n > 0) {
+                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+               }
+               return ret;
+       }
+
+       /**
+        * Make store aligned when copy size exceeds 512 bytes,
+        * and make sure the first 15 bytes are copied, because
+        * unaligned copy functions require up to 15 bytes
+        * backwards access.
+        */
+       dstofss = (uintptr_t)dst & 0x0F;
+       if (dstofss > 0) {
+               dstofss = 16 - dstofss + 16;
+               n -= dstofss;
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               src = (const uint8_t *)src + dstofss;
+               dst = (uint8_t *)dst + dstofss;
+       }
+       srcofs = ((uintptr_t)src & 0x0F);
+
+       /**
+        * For aligned copy
+        */
+       if (srcofs == 0) {
+               /**
+                * Copy 256-byte blocks
+                */
+               for (; n >= 256; n -= 256) {
+                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+                       dst = (uint8_t *)dst + 256;
+                       src = (const uint8_t *)src + 256;
+               }
+
+               /**
+                * Copy whatever left
+                */
+               goto COPY_BLOCK_255_BACK15;
+       }
+
+       /**
+        * For copy with unaligned load
+        */
+       MOVEUNALIGNED_LEFT47(dst, src, n, srcofs);
+
+       /**
+        * Copy whatever left
+        */
+       goto COPY_BLOCK_64_BACK15;
+}
+
+#endif /* RTE_MACHINE_CPUFLAG */
+
+static __rte_always_inline void *
+rte_memcpy_aligned(void *dst, const void *src, size_t n)
+{
+       void *ret = dst;
+
+       /* Copy size <= 16 bytes */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dst = *(const uint8_t *)src;
+                       src = (const uint8_t *)src + 1;
+                       dst = (uint8_t *)dst + 1;
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dst = *(const uint16_t *)src;
+                       src = (const uint16_t *)src + 1;
+                       dst = (uint16_t *)dst + 1;
+               }
+               if (n & 0x04) {
+                       *(uint32_t *)dst = *(const uint32_t *)src;
+                       src = (const uint32_t *)src + 1;
+                       dst = (uint32_t *)dst + 1;
+               }
+               if (n & 0x08)
+                       *(uint64_t *)dst = *(const uint64_t *)src;
+
+               return ret;
+       }
+
+       /* Copy 16 <= size <= 32 bytes */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n,
+                               (const uint8_t *)src - 16 + n);
+
+               return ret;
+       }
+
+       /* Copy 32 < size <= 64 bytes */
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n,
+                               (const uint8_t *)src - 32 + n);
+
+               return ret;
+       }
+
+       /* Copy 64 bytes blocks */
+       for (; n >= 64; n -= 64) {
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;
+       }
+
+       /* Copy whatever left */
+       rte_mov64((uint8_t *)dst - 64 + n,
+                       (const uint8_t *)src - 64 + n);
+
+       return ret;
+}
+
+static __rte_always_inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+       if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
+               return rte_memcpy_aligned(dst, src, n);
+       else
+               return rte_memcpy_generic(dst, src, n);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_pause.h b/lib/librte_eal/x86/include/rte_pause.h
new file mode 100644 (file)
index 0000000..b4cf1df
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _RTE_PAUSE_X86_H_
+#define _RTE_PAUSE_X86_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_pause.h"
+
+#include <emmintrin.h>
+static inline void rte_pause(void)
+{
+       _mm_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PAUSE_X86_H_ */
diff --git a/lib/librte_eal/x86/include/rte_prefetch.h b/lib/librte_eal/x86/include/rte_prefetch.h
new file mode 100644 (file)
index 0000000..384c6b3
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _RTE_PREFETCH_X86_64_H_
+#define _RTE_PREFETCH_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include "generic/rte_prefetch.h"
+
+static inline void rte_prefetch0(const volatile void *p)
+{
+       asm volatile ("prefetcht0 %[p]" : : [p] "m" (*(const volatile char *)p));
+}
+
+static inline void rte_prefetch1(const volatile void *p)
+{
+       asm volatile ("prefetcht1 %[p]" : : [p] "m" (*(const volatile char *)p));
+}
+
+static inline void rte_prefetch2(const volatile void *p)
+{
+       asm volatile ("prefetcht2 %[p]" : : [p] "m" (*(const volatile char *)p));
+}
+
+static inline void rte_prefetch_non_temporal(const volatile void *p)
+{
+       asm volatile ("prefetchnta %[p]" : : [p] "m" (*(const volatile char *)p));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_rtm.h b/lib/librte_eal/x86/include/rte_rtm.h
new file mode 100644 (file)
index 0000000..eb0f8e8
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2012,2013 Intel Corporation
+ */
+
+#ifndef _RTE_RTM_H_
+#define _RTE_RTM_H_ 1
+
+
+/* Official RTM intrinsics interface matching gcc/icc, but works
+   on older gcc compatible compilers and binutils. */
+
+#include <rte_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define RTE_XBEGIN_STARTED             (~0u)
+#define RTE_XABORT_EXPLICIT            (1 << 0)
+#define RTE_XABORT_RETRY               (1 << 1)
+#define RTE_XABORT_CONFLICT            (1 << 2)
+#define RTE_XABORT_CAPACITY            (1 << 3)
+#define RTE_XABORT_DEBUG               (1 << 4)
+#define RTE_XABORT_NESTED              (1 << 5)
+#define RTE_XABORT_CODE(x)             (((x) >> 24) & 0xff)
+
+static __attribute__((__always_inline__)) inline
+unsigned int rte_xbegin(void)
+{
+       unsigned int ret = RTE_XBEGIN_STARTED;
+
+       asm volatile(".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory");
+       return ret;
+}
+
+static __attribute__((__always_inline__)) inline
+void rte_xend(void)
+{
+        asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
+}
+
+/* not an inline function to workaround a clang bug with -O0 */
+#define rte_xabort(status) do { \
+       asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
+} while (0)
+
+static __attribute__((__always_inline__)) inline
+int rte_xtest(void)
+{
+       unsigned char out;
+
+       asm volatile(".byte 0x0f,0x01,0xd6 ; setnz %0" :
+               "=r" (out) :: "memory");
+       return out;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RTM_H_ */
diff --git a/lib/librte_eal/x86/include/rte_rwlock.h b/lib/librte_eal/x86/include/rte_rwlock.h
new file mode 100644 (file)
index 0000000..eec4c71
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
+ */
+
+#ifndef _RTE_RWLOCK_X86_64_H_
+#define _RTE_RWLOCK_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_rwlock.h"
+#include "rte_spinlock.h"
+
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+{
+       if (likely(rte_try_tm(&rwl->cnt)))
+               return;
+       rte_rwlock_read_lock(rwl);
+}
+
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+{
+       if (unlikely(rwl->cnt))
+               rte_rwlock_read_unlock(rwl);
+       else
+               rte_xend();
+}
+
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+{
+       if (likely(rte_try_tm(&rwl->cnt)))
+               return;
+       rte_rwlock_write_lock(rwl);
+}
+
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+{
+       if (unlikely(rwl->cnt))
+               rte_rwlock_write_unlock(rwl);
+       else
+               rte_xend();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_spinlock.h b/lib/librte_eal/x86/include/rte_spinlock.h
new file mode 100644 (file)
index 0000000..e2e2b26
--- /dev/null
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_SPINLOCK_X86_64_H_
+#define _RTE_SPINLOCK_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_spinlock.h"
+#include "rte_rtm.h"
+#include "rte_cpuflags.h"
+#include "rte_branch_prediction.h"
+#include "rte_common.h"
+#include "rte_pause.h"
+#include "rte_cycles.h"
+
+#define RTE_RTM_MAX_RETRIES (20)
+#define RTE_XABORT_LOCK_BUSY (0xff)
+
+#ifndef RTE_FORCE_INTRINSICS
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+       int lock_val = 1;
+       asm volatile (
+                       "1:\n"
+                       "xchg %[locked], %[lv]\n"
+                       "test %[lv], %[lv]\n"
+                       "jz 3f\n"
+                       "2:\n"
+                       "pause\n"
+                       "cmpl $0, %[locked]\n"
+                       "jnz 2b\n"
+                       "jmp 1b\n"
+                       "3:\n"
+                       : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
+                       : "[lv]" (lock_val)
+                       : "memory");
+}
+
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl)
+{
+       int unlock_val = 0;
+       asm volatile (
+                       "xchg %[locked], %[ulv]\n"
+                       : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
+                       : "[ulv]" (unlock_val)
+                       : "memory");
+}
+
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl)
+{
+       int lockval = 1;
+
+       asm volatile (
+                       "xchg %[locked], %[lockval]"
+                       : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
+                       : "[lockval]" (lockval)
+                       : "memory");
+
+       return lockval == 0;
+}
+#endif
+
+extern uint8_t rte_rtm_supported;
+
+static inline int rte_tm_supported(void)
+{
+       return rte_rtm_supported;
+}
+
+static inline int
+rte_try_tm(volatile int *lock)
+{
+       int i, retries;
+
+       if (!rte_rtm_supported)
+               return 0;
+
+       retries = RTE_RTM_MAX_RETRIES;
+
+       while (likely(retries--)) {
+
+               unsigned int status = rte_xbegin();
+
+               if (likely(RTE_XBEGIN_STARTED == status)) {
+                       if (unlikely(*lock))
+                               rte_xabort(RTE_XABORT_LOCK_BUSY);
+                       else
+                               return 1;
+               }
+               while (*lock)
+                       rte_pause();
+
+               if ((status & RTE_XABORT_CONFLICT) ||
+                  ((status & RTE_XABORT_EXPLICIT) &&
+                   (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
+                       /* add a small delay before retrying, basing the
+                        * delay on the number of times we've already tried,
+                        * to give a back-off type of behaviour. We
+                        * randomize trycount by taking bits from the tsc count
+                        */
+                       int try_count = RTE_RTM_MAX_RETRIES - retries;
+                       int pause_count = (rte_rdtsc() & 0x7) | 1;
+                       pause_count <<= try_count;
+                       for (i = 0; i < pause_count; i++)
+                               rte_pause();
+                       continue;
+               }
+
+               if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
+                       break;
+       }
+       return 0;
+}
+
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl)
+{
+       if (likely(rte_try_tm(&sl->locked)))
+               return;
+
+       rte_spinlock_lock(sl); /* fall-back */
+}
+
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl)
+{
+       if (likely(rte_try_tm(&sl->locked)))
+               return 1;
+
+       return rte_spinlock_trylock(sl);
+}
+
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl)
+{
+       if (unlikely(sl->locked))
+               rte_spinlock_unlock(sl);
+       else
+               rte_xend();
+}
+
+static inline void
+rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
+{
+       if (likely(rte_try_tm(&slr->sl.locked)))
+               return;
+
+       rte_spinlock_recursive_lock(slr); /* fall-back */
+}
+
+static inline void
+rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
+{
+       if (unlikely(slr->sl.locked))
+               rte_spinlock_recursive_unlock(slr);
+       else
+               rte_xend();
+}
+
+static inline int
+rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
+{
+       if (likely(rte_try_tm(&slr->sl.locked)))
+               return 1;
+
+       return rte_spinlock_recursive_trylock(slr);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_ticketlock.h b/lib/librte_eal/x86/include/rte_ticketlock.h
new file mode 100644 (file)
index 0000000..0cc01f6
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_X86_64_H_
+#define _RTE_TICKETLOCK_X86_64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_ticketlock.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TICKETLOCK_X86_64_H_ */
diff --git a/lib/librte_eal/x86/include/rte_vect.h b/lib/librte_eal/x86/include/rte_vect.h
new file mode 100644 (file)
index 0000000..df5a607
--- /dev/null
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _RTE_VECT_X86_H_
+#define _RTE_VECT_X86_H_
+
+/**
+ * @file
+ *
+ * RTE SSE/AVX related header.
+ */
+
+#include <stdint.h>
+#include <rte_config.h>
+#include "generic/rte_vect.h"
+
+#if (defined(__ICC) || \
+       (defined(_WIN64)) || \
+       (__GNUC__ == 4 &&  __GNUC_MINOR__ < 4))
+
+#include <smmintrin.h> /* SSE4 */
+
+#if defined(__AVX__)
+#include <immintrin.h>
+#endif
+
+#else
+
+#include <x86intrin.h>
+
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef __m128i xmm_t;
+
+#define        XMM_SIZE        (sizeof(xmm_t))
+#define        XMM_MASK        (XMM_SIZE - 1)
+
+typedef union rte_xmm {
+       xmm_t    x;
+       uint8_t  u8[XMM_SIZE / sizeof(uint8_t)];
+       uint16_t u16[XMM_SIZE / sizeof(uint16_t)];
+       uint32_t u32[XMM_SIZE / sizeof(uint32_t)];
+       uint64_t u64[XMM_SIZE / sizeof(uint64_t)];
+       double   pd[XMM_SIZE / sizeof(double)];
+} rte_xmm_t;
+
+#ifdef __AVX__
+
+typedef __m256i ymm_t;
+
+#define        YMM_SIZE        (sizeof(ymm_t))
+#define        YMM_MASK        (YMM_SIZE - 1)
+
+typedef union rte_ymm {
+       ymm_t    y;
+       xmm_t    x[YMM_SIZE / sizeof(xmm_t)];
+       uint8_t  u8[YMM_SIZE / sizeof(uint8_t)];
+       uint16_t u16[YMM_SIZE / sizeof(uint16_t)];
+       uint32_t u32[YMM_SIZE / sizeof(uint32_t)];
+       uint64_t u64[YMM_SIZE / sizeof(uint64_t)];
+       double   pd[YMM_SIZE / sizeof(double)];
+} rte_ymm_t;
+
+#endif /* __AVX__ */
+
+#ifdef RTE_ARCH_I686
+#define _mm_cvtsi128_si64(a)    \
+__extension__ ({                \
+       rte_xmm_t m;            \
+       m.x = (a);              \
+       (m.u64[0]);             \
+})
+#endif
+
+/*
+ * Prior to version 12.1 icc doesn't support _mm_set_epi64x.
+ */
+#if (defined(__ICC) && __ICC < 1210)
+#define _mm_set_epi64x(a, b)     \
+__extension__ ({                 \
+       rte_xmm_t m;             \
+       m.u64[0] = b;            \
+       m.u64[1] = a;            \
+       (m.x);                   \
+})
+#endif /* (defined(__ICC) && __ICC < 1210) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_VECT_X86_H_ */
index 252699e..e78f290 100644 (file)
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
+subdir('include')
+
 sources += files(
        'rte_cpuflags.c',
        'rte_cycles.c',