From: Thomas Monjalon Date: Fri, 27 Mar 2020 01:15:38 +0000 (+0100) Subject: eal: move common header files X-Git-Url: http://git.droids-corp.org/?p=dpdk.git;a=commitdiff_plain;h=9c1e0dc39a03c95447095241eaf3764d2e0dd003 eal: move common header files The EAL API (with doxygen documentation) is moved from common/include/ to include/, which makes more clear that it is the global API for all environments and architectures. Note that the arch-specific and OS-specific include files are not in this global include directory, but include/generic/ should cover the doxygen documentation for them. Signed-off-by: Thomas Monjalon Acked-by: David Marchand --- diff --git a/MAINTAINERS b/MAINTAINERS index 840be6fafc..8ce8d02a4c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -142,8 +142,8 @@ F: .ci/ ABI versioning M: Neil Horman -F: lib/librte_eal/common/include/rte_compat.h -F: lib/librte_eal/common/include/rte_function_versioning.h +F: lib/librte_eal/include/rte_compat.h +F: lib/librte_eal/include/rte_function_versioning.h F: doc/guides/rel_notes/deprecation.rst F: devtools/check-abi.sh F: devtools/check-abi-version.sh @@ -169,7 +169,7 @@ T: git://dpdk.org/dpdk EAL API and common code F: lib/librte_eal/common/ -F: lib/librte_eal/common/include/ +F: lib/librte_eal/include/ F: lib/librte_eal/rte_eal_version.map F: doc/guides/prog_guide/env_abstraction_layer.rst F: app/test/test_alarm.c @@ -196,9 +196,9 @@ F: app/test/test_version.c Memory Allocation M: Anatoly Burakov -F: lib/librte_eal/common/include/rte_fbarray.h -F: lib/librte_eal/common/include/rte_mem* -F: lib/librte_eal/common/include/rte_malloc.h +F: lib/librte_eal/include/rte_fbarray.h +F: lib/librte_eal/include/rte_mem* +F: lib/librte_eal/include/rte_malloc.h F: lib/librte_eal/common/*malloc* F: lib/librte_eal/common/eal_common_fbarray.c F: lib/librte_eal/common/eal_common_mem* @@ -214,7 +214,7 @@ F: app/test/test_memory.c F: app/test/test_memzone.c Keep alive -F: lib/librte_eal/common/include/rte_keepalive.h +F: lib/librte_eal/include/rte_keepalive.h F: lib/librte_eal/common/rte_keepalive.c F: examples/l2fwd-keepalive/ F: doc/guides/sample_app_ug/keep_alive.rst @@ -230,30 +230,30 @@ F: doc/guides/sample_app_ug/multi_process.rst Service Cores M: Harry van Haaren -F: lib/librte_eal/common/include/rte_service.h -F: lib/librte_eal/common/include/rte_service_component.h +F: lib/librte_eal/include/rte_service.h +F: lib/librte_eal/include/rte_service_component.h F: lib/librte_eal/common/rte_service.c F: doc/guides/prog_guide/service_cores.rst F: app/test/test_service_cores.c Bitmap M: Cristian Dumitrescu -F: lib/librte_eal/common/include/rte_bitmap.h +F: lib/librte_eal/include/rte_bitmap.h F: app/test/test_bitmap.c MCSlock - EXPERIMENTAL M: Phil Yang -F: lib/librte_eal/common/include/generic/rte_mcslock.h +F: lib/librte_eal/include/generic/rte_mcslock.h F: app/test/test_mcslock.c Ticketlock M: Joyce Kong -F: lib/librte_eal/common/include/generic/rte_ticketlock.h +F: lib/librte_eal/include/generic/rte_ticketlock.h F: app/test/test_ticketlock.c Pseudo-random Number Generation M: Mattias Rönnblom -F: lib/librte_eal/common/include/rte_random.h +F: lib/librte_eal/include/rte_random.h F: lib/librte_eal/common/rte_random.c F: app/test/test_rand_perf.c diff --git a/buildtools/pmdinfogen/meson.build b/buildtools/pmdinfogen/meson.build index 899ba112cd..7da415b3b7 100644 --- a/buildtools/pmdinfogen/meson.build +++ b/buildtools/pmdinfogen/meson.build @@ -6,7 +6,7 @@ if host_machine.system() == 'windows' endif pmdinfogen_inc = [global_inc] -pmdinfogen_inc += include_directories('../../lib/librte_eal/common/include') +pmdinfogen_inc += include_directories('../../lib/librte_eal/include') pmdinfogen_inc += include_directories('../../lib/librte_pci') pmdinfogen = executable('pmdinfogen', 'pmdinfogen.c', diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in index 1c4392eecc..65e8146bef 100644 --- a/doc/api/doxy-api.conf.in +++ b/doc/api/doxy-api.conf.in @@ -17,8 +17,8 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ @TOPDIR@/drivers/net/softnic \ @TOPDIR@/drivers/raw/dpaa2_cmdif \ @TOPDIR@/drivers/raw/dpaa2_qdma \ - @TOPDIR@/lib/librte_eal/common/include \ - @TOPDIR@/lib/librte_eal/common/include/generic \ + @TOPDIR@/lib/librte_eal/include \ + @TOPDIR@/lib/librte_eal/include/generic \ @TOPDIR@/lib/librte_acl \ @TOPDIR@/lib/librte_bbdev \ @TOPDIR@/lib/librte_bitratestats \ diff --git a/doc/guides/rel_notes/known_issues.rst b/doc/guides/rel_notes/known_issues.rst index 68c3d22bea..de0782136d 100644 --- a/doc/guides/rel_notes/known_issues.rst +++ b/doc/guides/rel_notes/known_issues.rst @@ -127,7 +127,7 @@ HPET timers do not work on the Osage customer reference platform work correctly, provided the BIOS supports HPET. **Driver/Module**: - ``lib/librte_eal/common/include/rte_cycles.h`` + ``lib/librte_eal/include/rte_cycles.h`` Not all variants of supported NIC types have been used in testing diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile index cd1093f744..9e1a31bb75 100644 --- a/drivers/bus/dpaa/Makefile +++ b/drivers/bus/dpaa/Makefile @@ -18,7 +18,7 @@ CFLAGS += -I$(RTE_BUS_DPAA)/ CFLAGS += -I$(RTE_BUS_DPAA)/include CFLAGS += -I$(RTE_BUS_DPAA)/base/qbman CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include # versioning export map EXPORT_MAP := rte_bus_dpaa_version.map diff --git a/drivers/common/mlx5/meson.build b/drivers/common/mlx5/meson.build index 141739fd6f..f671710714 100644 --- a/drivers/common/mlx5/meson.build +++ b/drivers/common/mlx5/meson.build @@ -203,7 +203,7 @@ if dlopen_ibverbs dlopen_install_dir = [ eal_pmd_path + '-glue' ] dlopen_includes = [global_inc] dlopen_includes += include_directories( - '../../../lib/librte_eal/common/include/generic', + '../../../lib/librte_eal/include/generic', ) shared_lib = shared_library( dlopen_lib_name, diff --git a/drivers/crypto/caam_jr/Makefile b/drivers/crypto/caam_jr/Makefile index 1b1f25a2a2..db17294395 100644 --- a/drivers/crypto/caam_jr/Makefile +++ b/drivers/crypto/caam_jr/Makefile @@ -20,7 +20,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax/caamflib/ CFLAGS += -I$(RTE_SDK)/drivers/crypto/caam_jr -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include # versioning export map EXPORT_MAP := rte_pmd_caam_jr_version.map diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile index fbfd775855..13a5ff20cf 100644 --- a/drivers/crypto/dpaa_sec/Makefile +++ b/drivers/crypto/dpaa_sec/Makefile @@ -20,7 +20,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/ CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax/caamflib/ -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev diff --git a/drivers/event/dpaa/Makefile b/drivers/event/dpaa/Makefile index 2f53efdf9e..15ffc157f8 100644 --- a/drivers/event/dpaa/Makefile +++ b/drivers/event/dpaa/Makefile @@ -20,7 +20,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/ CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include LDLIBS += -lrte_pmd_dpaa_sec CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile index 8e049b2a0b..f63c9bf540 100644 --- a/drivers/net/dpaa/Makefile +++ b/drivers/net/dpaa/Makefile @@ -21,7 +21,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include EXPORT_MAP := rte_pmd_dpaa_version.map diff --git a/kernel/linux/igb_uio/meson.build b/kernel/linux/igb_uio/meson.build index fac404f078..80540aecee 100644 --- a/kernel/linux/igb_uio/meson.build +++ b/kernel/linux/igb_uio/meson.build @@ -12,7 +12,7 @@ custom_target('igb_uio', 'M=' + meson.current_build_dir(), 'src=' + meson.current_source_dir(), 'EXTRA_CFLAGS=-I' + meson.current_source_dir() + - '/../../../lib/librte_eal/common/include', + '/../../../lib/librte_eal/include', 'modules'], depends: mkfile, install: true, diff --git a/kernel/linux/kni/meson.build b/kernel/linux/kni/meson.build index f93e97fa09..706bea5b7f 100644 --- a/kernel/linux/kni/meson.build +++ b/kernel/linux/kni/meson.build @@ -17,7 +17,7 @@ custom_target('rte_kni', 'M=' + meson.current_build_dir(), 'src=' + meson.current_source_dir(), 'MODULE_CFLAGS=-include ' + meson.source_root() + '/config/rte_config.h' + - ' -I' + meson.source_root() + '/lib/librte_eal/common/include' + + ' -I' + meson.source_root() + '/lib/librte_eal/include' + ' -I' + meson.source_root() + '/lib/librte_eal/linux/eal/include' + ' -I' + meson.build_root() + ' -I' + meson.current_source_dir(), diff --git a/lib/librte_cfgfile/Makefile b/lib/librte_cfgfile/Makefile index d3b08420ff..7c10a4e56c 100644 --- a/lib/librte_cfgfile/Makefile +++ b/lib/librte_cfgfile/Makefile @@ -10,7 +10,7 @@ LIB = librte_cfgfile.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -CFLAGS += -I$(SRCDIR)/../librte_eal/common/include +CFLAGS += -I$(SRCDIR)/../librte_eal/include LDLIBS += -lrte_eal EXPORT_MAP := rte_cfgfile_version.map diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile index 9c383d42bd..ff74935932 100644 --- a/lib/librte_eal/Makefile +++ b/lib/librte_eal/Makefile @@ -3,10 +3,10 @@ include $(RTE_SDK)/mk/rte.vars.mk -DIRS-y += common +DIRS-y += include DIRS-$(CONFIG_RTE_EXEC_ENV_LINUX) += linux/eal -DEPDIRS-linux := common +DEPDIRS-linux := include DIRS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += freebsd/eal -DEPDIRS-freebsd := common +DEPDIRS-freebsd := include include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile deleted file mode 100644 index 6c52f50106..0000000000 --- a/lib/librte_eal/common/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2010-2014 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -INC := rte_branch_prediction.h rte_common.h rte_compat.h -INC += rte_function_versioning.h -INC += rte_debug.h rte_eal.h rte_eal_interrupts.h -INC += rte_errno.h rte_launch.h rte_lcore.h -INC += rte_log.h rte_memory.h rte_memzone.h -INC += rte_per_lcore.h rte_random.h -INC += rte_tailq.h rte_interrupts.h rte_alarm.h -INC += rte_string_fns.h rte_version.h -INC += rte_eal_memconfig.h -INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_class.h -INC += rte_option.h -INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h -INC += rte_malloc.h rte_keepalive.h rte_time.h -INC += rte_service.h rte_service_component.h -INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h -INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h - -GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h -GENERIC_INC += rte_memcpy.h rte_cpuflags.h -GENERIC_INC += rte_mcslock.h rte_spinlock.h rte_rwlock.h rte_ticketlock.h -GENERIC_INC += rte_vect.h rte_pause.h rte_io.h - -# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk -ARCH_DIR ?= $(RTE_ARCH) -ARCH_INC := $(sort $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/$(ARCH_DIR)/include/*.h))) - -SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC)) -SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \ - $(addprefix ../$(ARCH_DIR)/include/,$(ARCH_INC)) -SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/generic := \ - $(addprefix include/generic/,$(GENERIC_INC)) - -include $(RTE_SDK)/mk/rte.install.mk diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h deleted file mode 100644 index e6ab15a973..0000000000 --- a/lib/librte_eal/common/include/generic/rte_atomic.h +++ /dev/null @@ -1,1150 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_ATOMIC_H_ -#define _RTE_ATOMIC_H_ - -/** - * @file - * Atomic Operations - * - * This file defines a generic API for atomic operations. - */ - -#include -#include - -#ifdef __DOXYGEN__ - -/** @name Memory Barrier - */ -///@{ -/** - * General memory barrier. - * - * Guarantees that the LOAD and STORE operations generated before the - * barrier occur before the LOAD and STORE operations generated after. - */ -static inline void rte_mb(void); - -/** - * Write memory barrier. - * - * Guarantees that the STORE operations generated before the barrier - * occur before the STORE operations generated after. - */ -static inline void rte_wmb(void); - -/** - * Read memory barrier. - * - * Guarantees that the LOAD operations generated before the barrier - * occur before the LOAD operations generated after. - */ -static inline void rte_rmb(void); -///@} - -/** @name SMP Memory Barrier - */ -///@{ -/** - * General memory barrier between lcores - * - * Guarantees that the LOAD and STORE operations that precede the - * rte_smp_mb() call are globally visible across the lcores - * before the LOAD and STORE operations that follows it. - */ -static inline void rte_smp_mb(void); - -/** - * Write memory barrier between lcores - * - * Guarantees that the STORE operations that precede the - * rte_smp_wmb() call are globally visible across the lcores - * before the STORE operations that follows it. - */ -static inline void rte_smp_wmb(void); - -/** - * Read memory barrier between lcores - * - * Guarantees that the LOAD operations that precede the - * rte_smp_rmb() call are globally visible across the lcores - * before the LOAD operations that follows it. - */ -static inline void rte_smp_rmb(void); -///@} - -/** @name I/O Memory Barrier - */ -///@{ -/** - * General memory barrier for I/O device - * - * Guarantees that the LOAD and STORE operations that precede the - * rte_io_mb() call are visible to I/O device or CPU before the - * LOAD and STORE operations that follow it. - */ -static inline void rte_io_mb(void); - -/** - * Write memory barrier for I/O device - * - * Guarantees that the STORE operations that precede the - * rte_io_wmb() call are visible to I/O device before the STORE - * operations that follow it. - */ -static inline void rte_io_wmb(void); - -/** - * Read memory barrier for IO device - * - * Guarantees that the LOAD operations on I/O device that precede the - * rte_io_rmb() call are visible to CPU before the LOAD - * operations that follow it. - */ -static inline void rte_io_rmb(void); -///@} - -/** @name Coherent I/O Memory Barrier - * - * Coherent I/O memory barrier is a lightweight version of I/O memory - * barriers which are system-wide data synchronization barriers. This - * is for only coherent memory domain between lcore and I/O device but - * it is same as the I/O memory barriers in most of architectures. - * However, some architecture provides even lighter barriers which are - * somewhere in between I/O memory barriers and SMP memory barriers. - * For example, in case of ARMv8, DMB(data memory barrier) instruction - * can have different shareability domains - inner-shareable and - * outer-shareable. And inner-shareable DMB fits for SMP memory - * barriers and outer-shareable DMB for coherent I/O memory barriers, - * which acts on coherent memory. - * - * In most cases, I/O memory barriers are safer but if operations are - * on coherent memory instead of incoherent MMIO region of a device, - * then coherent I/O memory barriers can be used and this could bring - * performance gain depending on architectures. - */ -///@{ -/** - * Write memory barrier for coherent memory between lcore and I/O device - * - * Guarantees that the STORE operations on coherent memory that - * precede the rte_cio_wmb() call are visible to I/O device before the - * STORE operations that follow it. - */ -static inline void rte_cio_wmb(void); - -/** - * Read memory barrier for coherent memory between lcore and I/O device - * - * Guarantees that the LOAD operations on coherent memory updated by - * I/O device that precede the rte_cio_rmb() call are visible to CPU - * before the LOAD operations that follow it. - */ -static inline void rte_cio_rmb(void); -///@} - -#endif /* __DOXYGEN__ */ - -/** - * Compiler barrier. - * - * Guarantees that operation reordering does not occur at compile time - * for operations directly before and after the barrier. - */ -#define rte_compiler_barrier() do { \ - asm volatile ("" : : : "memory"); \ -} while(0) - -/*------------------------- 16 bit atomic operations -------------------------*/ - -/** - * Atomic compare and set. - * - * (atomic) equivalent to: - * if (*dst == exp) - * *dst = src (all 16-bit words) - * - * @param dst - * The destination location into which the value will be written. - * @param exp - * The expected value. - * @param src - * The new value. - * @return - * Non-zero on success; 0 on failure. - */ -static inline int -rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src); - -#ifdef RTE_FORCE_INTRINSICS -static inline int -rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) -{ - return __sync_bool_compare_and_swap(dst, exp, src); -} -#endif - -/** - * Atomic exchange. - * - * (atomic) equivalent to: - * ret = *dst - * *dst = val; - * return ret; - * - * @param dst - * The destination location into which the value will be written. - * @param val - * The new value. - * @return - * The original value at that location - */ -static inline uint16_t -rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); - -#ifdef RTE_FORCE_INTRINSICS -static inline uint16_t -rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) -{ -#if defined(__clang__) - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); -#else - return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); -#endif -} -#endif - -/** - * The atomic counter structure. - */ -typedef struct { - volatile int16_t cnt; /**< An internal counter value. */ -} rte_atomic16_t; - -/** - * Static initializer for an atomic counter. - */ -#define RTE_ATOMIC16_INIT(val) { (val) } - -/** - * Initialize an atomic counter. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic16_init(rte_atomic16_t *v) -{ - v->cnt = 0; -} - -/** - * Atomically read a 16-bit value from a counter. - * - * @param v - * A pointer to the atomic counter. - * @return - * The value of the counter. - */ -static inline int16_t -rte_atomic16_read(const rte_atomic16_t *v) -{ - return v->cnt; -} - -/** - * Atomically set a counter to a 16-bit value. - * - * @param v - * A pointer to the atomic counter. - * @param new_value - * The new value for the counter. - */ -static inline void -rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) -{ - v->cnt = new_value; -} - -/** - * Atomically add a 16-bit value to an atomic counter. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - */ -static inline void -rte_atomic16_add(rte_atomic16_t *v, int16_t inc) -{ - __sync_fetch_and_add(&v->cnt, inc); -} - -/** - * Atomically subtract a 16-bit value from an atomic counter. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - */ -static inline void -rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) -{ - __sync_fetch_and_sub(&v->cnt, dec); -} - -/** - * Atomically increment a counter by one. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic16_inc(rte_atomic16_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic16_inc(rte_atomic16_t *v) -{ - rte_atomic16_add(v, 1); -} -#endif - -/** - * Atomically decrement a counter by one. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic16_dec(rte_atomic16_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic16_dec(rte_atomic16_t *v) -{ - rte_atomic16_sub(v, 1); -} -#endif - -/** - * Atomically add a 16-bit value to a counter and return the result. - * - * Atomically adds the 16-bits value (inc) to the atomic counter (v) and - * returns the value of v after addition. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - * @return - * The value of v after the addition. - */ -static inline int16_t -rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) -{ - return __sync_add_and_fetch(&v->cnt, inc); -} - -/** - * Atomically subtract a 16-bit value from a counter and return - * the result. - * - * Atomically subtracts the 16-bit value (inc) from the atomic counter - * (v) and returns the value of v after the subtraction. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - * @return - * The value of v after the subtraction. - */ -static inline int16_t -rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) -{ - return __sync_sub_and_fetch(&v->cnt, dec); -} - -/** - * Atomically increment a 16-bit counter by one and test. - * - * Atomically increments the atomic counter (v) by one and returns true if - * the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after the increment operation is 0; false otherwise. - */ -static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) -{ - return __sync_add_and_fetch(&v->cnt, 1) == 0; -} -#endif - -/** - * Atomically decrement a 16-bit counter by one and test. - * - * Atomically decrements the atomic counter (v) by one and returns true if - * the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after the decrement operation is 0; false otherwise. - */ -static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) -{ - return __sync_sub_and_fetch(&v->cnt, 1) == 0; -} -#endif - -/** - * Atomically test and set a 16-bit atomic counter. - * - * If the counter value is already set, return 0 (failed). Otherwise, set - * the counter value to 1 and return 1 (success). - * - * @param v - * A pointer to the atomic counter. - * @return - * 0 if failed; else 1, success. - */ -static inline int rte_atomic16_test_and_set(rte_atomic16_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) -{ - return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); -} -#endif - -/** - * Atomically set a 16-bit counter to 0. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void rte_atomic16_clear(rte_atomic16_t *v) -{ - v->cnt = 0; -} - -/*------------------------- 32 bit atomic operations -------------------------*/ - -/** - * Atomic compare and set. - * - * (atomic) equivalent to: - * if (*dst == exp) - * *dst = src (all 32-bit words) - * - * @param dst - * The destination location into which the value will be written. - * @param exp - * The expected value. - * @param src - * The new value. - * @return - * Non-zero on success; 0 on failure. - */ -static inline int -rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src); - -#ifdef RTE_FORCE_INTRINSICS -static inline int -rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) -{ - return __sync_bool_compare_and_swap(dst, exp, src); -} -#endif - -/** - * Atomic exchange. - * - * (atomic) equivalent to: - * ret = *dst - * *dst = val; - * return ret; - * - * @param dst - * The destination location into which the value will be written. - * @param val - * The new value. - * @return - * The original value at that location - */ -static inline uint32_t -rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); - -#ifdef RTE_FORCE_INTRINSICS -static inline uint32_t -rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) -{ -#if defined(__clang__) - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); -#else - return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); -#endif -} -#endif - -/** - * The atomic counter structure. - */ -typedef struct { - volatile int32_t cnt; /**< An internal counter value. */ -} rte_atomic32_t; - -/** - * Static initializer for an atomic counter. - */ -#define RTE_ATOMIC32_INIT(val) { (val) } - -/** - * Initialize an atomic counter. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic32_init(rte_atomic32_t *v) -{ - v->cnt = 0; -} - -/** - * Atomically read a 32-bit value from a counter. - * - * @param v - * A pointer to the atomic counter. - * @return - * The value of the counter. - */ -static inline int32_t -rte_atomic32_read(const rte_atomic32_t *v) -{ - return v->cnt; -} - -/** - * Atomically set a counter to a 32-bit value. - * - * @param v - * A pointer to the atomic counter. - * @param new_value - * The new value for the counter. - */ -static inline void -rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) -{ - v->cnt = new_value; -} - -/** - * Atomically add a 32-bit value to an atomic counter. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - */ -static inline void -rte_atomic32_add(rte_atomic32_t *v, int32_t inc) -{ - __sync_fetch_and_add(&v->cnt, inc); -} - -/** - * Atomically subtract a 32-bit value from an atomic counter. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - */ -static inline void -rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) -{ - __sync_fetch_and_sub(&v->cnt, dec); -} - -/** - * Atomically increment a counter by one. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic32_inc(rte_atomic32_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic32_inc(rte_atomic32_t *v) -{ - rte_atomic32_add(v, 1); -} -#endif - -/** - * Atomically decrement a counter by one. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic32_dec(rte_atomic32_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic32_dec(rte_atomic32_t *v) -{ - rte_atomic32_sub(v,1); -} -#endif - -/** - * Atomically add a 32-bit value to a counter and return the result. - * - * Atomically adds the 32-bits value (inc) to the atomic counter (v) and - * returns the value of v after addition. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - * @return - * The value of v after the addition. - */ -static inline int32_t -rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) -{ - return __sync_add_and_fetch(&v->cnt, inc); -} - -/** - * Atomically subtract a 32-bit value from a counter and return - * the result. - * - * Atomically subtracts the 32-bit value (inc) from the atomic counter - * (v) and returns the value of v after the subtraction. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - * @return - * The value of v after the subtraction. - */ -static inline int32_t -rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) -{ - return __sync_sub_and_fetch(&v->cnt, dec); -} - -/** - * Atomically increment a 32-bit counter by one and test. - * - * Atomically increments the atomic counter (v) by one and returns true if - * the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after the increment operation is 0; false otherwise. - */ -static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) -{ - return __sync_add_and_fetch(&v->cnt, 1) == 0; -} -#endif - -/** - * Atomically decrement a 32-bit counter by one and test. - * - * Atomically decrements the atomic counter (v) by one and returns true if - * the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after the decrement operation is 0; false otherwise. - */ -static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) -{ - return __sync_sub_and_fetch(&v->cnt, 1) == 0; -} -#endif - -/** - * Atomically test and set a 32-bit atomic counter. - * - * If the counter value is already set, return 0 (failed). Otherwise, set - * the counter value to 1 and return 1 (success). - * - * @param v - * A pointer to the atomic counter. - * @return - * 0 if failed; else 1, success. - */ -static inline int rte_atomic32_test_and_set(rte_atomic32_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) -{ - return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); -} -#endif - -/** - * Atomically set a 32-bit counter to 0. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void rte_atomic32_clear(rte_atomic32_t *v) -{ - v->cnt = 0; -} - -/*------------------------- 64 bit atomic operations -------------------------*/ - -/** - * An atomic compare and set function used by the mutex functions. - * (atomic) equivalent to: - * if (*dst == exp) - * *dst = src (all 64-bit words) - * - * @param dst - * The destination into which the value will be written. - * @param exp - * The expected value. - * @param src - * The new value. - * @return - * Non-zero on success; 0 on failure. - */ -static inline int -rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src); - -#ifdef RTE_FORCE_INTRINSICS -static inline int -rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) -{ - return __sync_bool_compare_and_swap(dst, exp, src); -} -#endif - -/** - * Atomic exchange. - * - * (atomic) equivalent to: - * ret = *dst - * *dst = val; - * return ret; - * - * @param dst - * The destination location into which the value will be written. - * @param val - * The new value. - * @return - * The original value at that location - */ -static inline uint64_t -rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); - -#ifdef RTE_FORCE_INTRINSICS -static inline uint64_t -rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) -{ -#if defined(__clang__) - return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); -#else - return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); -#endif -} -#endif - -/** - * The atomic counter structure. - */ -typedef struct { - volatile int64_t cnt; /**< Internal counter value. */ -} rte_atomic64_t; - -/** - * Static initializer for an atomic counter. - */ -#define RTE_ATOMIC64_INIT(val) { (val) } - -/** - * Initialize the atomic counter. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic64_init(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_init(rte_atomic64_t *v) -{ -#ifdef __LP64__ - v->cnt = 0; -#else - int success = 0; - uint64_t tmp; - - while (success == 0) { - tmp = v->cnt; - success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, - tmp, 0); - } -#endif -} -#endif - -/** - * Atomically read a 64-bit counter. - * - * @param v - * A pointer to the atomic counter. - * @return - * The value of the counter. - */ -static inline int64_t -rte_atomic64_read(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int64_t -rte_atomic64_read(rte_atomic64_t *v) -{ -#ifdef __LP64__ - return v->cnt; -#else - int success = 0; - uint64_t tmp; - - while (success == 0) { - tmp = v->cnt; - /* replace the value by itself */ - success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, - tmp, tmp); - } - return tmp; -#endif -} -#endif - -/** - * Atomically set a 64-bit counter. - * - * @param v - * A pointer to the atomic counter. - * @param new_value - * The new value of the counter. - */ -static inline void -rte_atomic64_set(rte_atomic64_t *v, int64_t new_value); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) -{ -#ifdef __LP64__ - v->cnt = new_value; -#else - int success = 0; - uint64_t tmp; - - while (success == 0) { - tmp = v->cnt; - success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, - tmp, new_value); - } -#endif -} -#endif - -/** - * Atomically add a 64-bit value to a counter. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - */ -static inline void -rte_atomic64_add(rte_atomic64_t *v, int64_t inc); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_add(rte_atomic64_t *v, int64_t inc) -{ - __sync_fetch_and_add(&v->cnt, inc); -} -#endif - -/** - * Atomically subtract a 64-bit value from a counter. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - */ -static inline void -rte_atomic64_sub(rte_atomic64_t *v, int64_t dec); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) -{ - __sync_fetch_and_sub(&v->cnt, dec); -} -#endif - -/** - * Atomically increment a 64-bit counter by one and test. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic64_inc(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_inc(rte_atomic64_t *v) -{ - rte_atomic64_add(v, 1); -} -#endif - -/** - * Atomically decrement a 64-bit counter by one and test. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void -rte_atomic64_dec(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_atomic64_dec(rte_atomic64_t *v) -{ - rte_atomic64_sub(v, 1); -} -#endif - -/** - * Add a 64-bit value to an atomic counter and return the result. - * - * Atomically adds the 64-bit value (inc) to the atomic counter (v) and - * returns the value of v after the addition. - * - * @param v - * A pointer to the atomic counter. - * @param inc - * The value to be added to the counter. - * @return - * The value of v after the addition. - */ -static inline int64_t -rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc); - -#ifdef RTE_FORCE_INTRINSICS -static inline int64_t -rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) -{ - return __sync_add_and_fetch(&v->cnt, inc); -} -#endif - -/** - * Subtract a 64-bit value from an atomic counter and return the result. - * - * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) - * and returns the value of v after the subtraction. - * - * @param v - * A pointer to the atomic counter. - * @param dec - * The value to be subtracted from the counter. - * @return - * The value of v after the subtraction. - */ -static inline int64_t -rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec); - -#ifdef RTE_FORCE_INTRINSICS -static inline int64_t -rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) -{ - return __sync_sub_and_fetch(&v->cnt, dec); -} -#endif - -/** - * Atomically increment a 64-bit counter by one and test. - * - * Atomically increments the atomic counter (v) by one and returns - * true if the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after the addition is 0; false otherwise. - */ -static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) -{ - return rte_atomic64_add_return(v, 1) == 0; -} -#endif - -/** - * Atomically decrement a 64-bit counter by one and test. - * - * Atomically decrements the atomic counter (v) by one and returns true if - * the result is 0, or false in all other cases. - * - * @param v - * A pointer to the atomic counter. - * @return - * True if the result after subtraction is 0; false otherwise. - */ -static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) -{ - return rte_atomic64_sub_return(v, 1) == 0; -} -#endif - -/** - * Atomically test and set a 64-bit atomic counter. - * - * If the counter value is already set, return 0 (failed). Otherwise, set - * the counter value to 1 and return 1 (success). - * - * @param v - * A pointer to the atomic counter. - * @return - * 0 if failed; else 1, success. - */ -static inline int rte_atomic64_test_and_set(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) -{ - return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); -} -#endif - -/** - * Atomically set a 64-bit counter to 0. - * - * @param v - * A pointer to the atomic counter. - */ -static inline void rte_atomic64_clear(rte_atomic64_t *v); - -#ifdef RTE_FORCE_INTRINSICS -static inline void rte_atomic64_clear(rte_atomic64_t *v) -{ - rte_atomic64_set(v, 0); -} -#endif - -/*------------------------ 128 bit atomic operations -------------------------*/ - -/** - * 128-bit integer structure. - */ -RTE_STD_C11 -typedef struct { - RTE_STD_C11 - union { - uint64_t val[2]; -#ifdef RTE_ARCH_64 - __extension__ __int128 int128; -#endif - }; -} __rte_aligned(16) rte_int128_t; - -#ifdef __DOXYGEN__ - -/** - * An atomic compare and set function used by the mutex functions. - * (Atomically) Equivalent to: - * @code - * if (*dst == *exp) - * *dst = *src - * else - * *exp = *dst - * @endcode - * - * @note This function is currently available for the x86-64 and aarch64 - * platforms. - * - * @note The success and failure arguments must be one of the __ATOMIC_* values - * defined in the C++11 standard. For details on their behavior, refer to the - * standard. - * - * @param dst - * The destination into which the value will be written. - * @param exp - * Pointer to the expected value. If the operation fails, this memory is - * updated with the actual value. - * @param src - * Pointer to the new value. - * @param weak - * A value of true allows the comparison to spuriously fail and allows the - * 'exp' update to occur non-atomically (i.e. a torn read may occur). - * Implementations may ignore this argument and only implement the strong - * variant. - * @param success - * If successful, the operation's memory behavior conforms to this (or a - * stronger) model. - * @param failure - * If unsuccessful, the operation's memory behavior conforms to this (or a - * stronger) model. This argument cannot be __ATOMIC_RELEASE, - * __ATOMIC_ACQ_REL, or a stronger model than success. - * @return - * Non-zero on success; 0 on failure. - */ -__rte_experimental -static inline int -rte_atomic128_cmp_exchange(rte_int128_t *dst, - rte_int128_t *exp, - const rte_int128_t *src, - unsigned int weak, - int success, - int failure); - -#endif /* __DOXYGEN__ */ - -#endif /* _RTE_ATOMIC_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_byteorder.h b/lib/librte_eal/common/include/generic/rte_byteorder.h deleted file mode 100644 index 38e8cfd32b..0000000000 --- a/lib/librte_eal/common/include/generic/rte_byteorder.h +++ /dev/null @@ -1,247 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_BYTEORDER_H_ -#define _RTE_BYTEORDER_H_ - -/** - * @file - * - * Byte Swap Operations - * - * This file defines a generic API for byte swap operations. Part of - * the implementation is architecture-specific. - */ - -#include -#ifdef RTE_EXEC_ENV_FREEBSD -#include -#else -#include -#endif - -#include -#include - -/* - * Compile-time endianness detection - */ -#define RTE_BIG_ENDIAN 1 -#define RTE_LITTLE_ENDIAN 2 -#if defined __BYTE_ORDER__ -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -#define RTE_BYTE_ORDER RTE_BIG_ENDIAN -#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN -#endif /* __BYTE_ORDER__ */ -#elif defined __BYTE_ORDER -#if __BYTE_ORDER == __BIG_ENDIAN -#define RTE_BYTE_ORDER RTE_BIG_ENDIAN -#elif __BYTE_ORDER == __LITTLE_ENDIAN -#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN -#endif /* __BYTE_ORDER */ -#elif defined __BIG_ENDIAN__ -#define RTE_BYTE_ORDER RTE_BIG_ENDIAN -#elif defined __LITTLE_ENDIAN__ -#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN -#endif -#if !defined(RTE_BYTE_ORDER) -#error Unknown endianness. -#endif - -#define RTE_STATIC_BSWAP16(v) \ - ((((uint16_t)(v) & UINT16_C(0x00ff)) << 8) | \ - (((uint16_t)(v) & UINT16_C(0xff00)) >> 8)) - -#define RTE_STATIC_BSWAP32(v) \ - ((((uint32_t)(v) & UINT32_C(0x000000ff)) << 24) | \ - (((uint32_t)(v) & UINT32_C(0x0000ff00)) << 8) | \ - (((uint32_t)(v) & UINT32_C(0x00ff0000)) >> 8) | \ - (((uint32_t)(v) & UINT32_C(0xff000000)) >> 24)) - -#define RTE_STATIC_BSWAP64(v) \ - ((((uint64_t)(v) & UINT64_C(0x00000000000000ff)) << 56) | \ - (((uint64_t)(v) & UINT64_C(0x000000000000ff00)) << 40) | \ - (((uint64_t)(v) & UINT64_C(0x0000000000ff0000)) << 24) | \ - (((uint64_t)(v) & UINT64_C(0x00000000ff000000)) << 8) | \ - (((uint64_t)(v) & UINT64_C(0x000000ff00000000)) >> 8) | \ - (((uint64_t)(v) & UINT64_C(0x0000ff0000000000)) >> 24) | \ - (((uint64_t)(v) & UINT64_C(0x00ff000000000000)) >> 40) | \ - (((uint64_t)(v) & UINT64_C(0xff00000000000000)) >> 56)) - -/* - * These macros are functionally similar to rte_cpu_to_(be|le)(16|32|64)(), - * they take values in host CPU order and return them converted to the - * intended endianness. - * - * They resolve at compilation time to integer constants which can safely be - * used with static initializers, since those cannot involve function calls. - * - * On the other hand, they are not as optimized as their rte_cpu_to_*() - * counterparts, therefore applications should refrain from using them on - * variable values, particularly inside performance-sensitive code. - */ -#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN -#define RTE_BE16(v) (rte_be16_t)(v) -#define RTE_BE32(v) (rte_be32_t)(v) -#define RTE_BE64(v) (rte_be64_t)(v) -#define RTE_LE16(v) (rte_le16_t)(RTE_STATIC_BSWAP16(v)) -#define RTE_LE32(v) (rte_le32_t)(RTE_STATIC_BSWAP32(v)) -#define RTE_LE64(v) (rte_le64_t)(RTE_STATIC_BSWAP64(v)) -#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN -#define RTE_BE16(v) (rte_be16_t)(RTE_STATIC_BSWAP16(v)) -#define RTE_BE32(v) (rte_be32_t)(RTE_STATIC_BSWAP32(v)) -#define RTE_BE64(v) (rte_be64_t)(RTE_STATIC_BSWAP64(v)) -#define RTE_LE16(v) (rte_be16_t)(v) -#define RTE_LE32(v) (rte_be32_t)(v) -#define RTE_LE64(v) (rte_be64_t)(v) -#else -#error Unsupported endianness. -#endif - -/* - * The following types should be used when handling values according to a - * specific byte ordering, which may differ from that of the host CPU. - * - * Libraries, public APIs and applications are encouraged to use them for - * documentation purposes. - */ -typedef uint16_t rte_be16_t; /**< 16-bit big-endian value. */ -typedef uint32_t rte_be32_t; /**< 32-bit big-endian value. */ -typedef uint64_t rte_be64_t; /**< 64-bit big-endian value. */ -typedef uint16_t rte_le16_t; /**< 16-bit little-endian value. */ -typedef uint32_t rte_le32_t; /**< 32-bit little-endian value. */ -typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */ - -/* - * An internal function to swap bytes in a 16-bit value. - * - * It is used by rte_bswap16() when the value is constant. Do not use - * this function directly; rte_bswap16() is preferred. - */ -static inline uint16_t -rte_constant_bswap16(uint16_t x) -{ - return (uint16_t)RTE_STATIC_BSWAP16(x); -} - -/* - * An internal function to swap bytes in a 32-bit value. - * - * It is used by rte_bswap32() when the value is constant. Do not use - * this function directly; rte_bswap32() is preferred. - */ -static inline uint32_t -rte_constant_bswap32(uint32_t x) -{ - return (uint32_t)RTE_STATIC_BSWAP32(x); -} - -/* - * An internal function to swap bytes of a 64-bit value. - * - * It is used by rte_bswap64() when the value is constant. Do not use - * this function directly; rte_bswap64() is preferred. - */ -static inline uint64_t -rte_constant_bswap64(uint64_t x) -{ - return (uint64_t)RTE_STATIC_BSWAP64(x); -} - - -#ifdef __DOXYGEN__ - -/** - * Swap bytes in a 16-bit value. - */ -static uint16_t rte_bswap16(uint16_t _x); - -/** - * Swap bytes in a 32-bit value. - */ -static uint32_t rte_bswap32(uint32_t x); - -/** - * Swap bytes in a 64-bit value. - */ -static uint64_t rte_bswap64(uint64_t x); - -/** - * Convert a 16-bit value from CPU order to little endian. - */ -static rte_le16_t rte_cpu_to_le_16(uint16_t x); - -/** - * Convert a 32-bit value from CPU order to little endian. - */ -static rte_le32_t rte_cpu_to_le_32(uint32_t x); - -/** - * Convert a 64-bit value from CPU order to little endian. - */ -static rte_le64_t rte_cpu_to_le_64(uint64_t x); - - -/** - * Convert a 16-bit value from CPU order to big endian. - */ -static rte_be16_t rte_cpu_to_be_16(uint16_t x); - -/** - * Convert a 32-bit value from CPU order to big endian. - */ -static rte_be32_t rte_cpu_to_be_32(uint32_t x); - -/** - * Convert a 64-bit value from CPU order to big endian. - */ -static rte_be64_t rte_cpu_to_be_64(uint64_t x); - - -/** - * Convert a 16-bit value from little endian to CPU order. - */ -static uint16_t rte_le_to_cpu_16(rte_le16_t x); - -/** - * Convert a 32-bit value from little endian to CPU order. - */ -static uint32_t rte_le_to_cpu_32(rte_le32_t x); - -/** - * Convert a 64-bit value from little endian to CPU order. - */ -static uint64_t rte_le_to_cpu_64(rte_le64_t x); - - -/** - * Convert a 16-bit value from big endian to CPU order. - */ -static uint16_t rte_be_to_cpu_16(rte_be16_t x); - -/** - * Convert a 32-bit value from big endian to CPU order. - */ -static uint32_t rte_be_to_cpu_32(rte_be32_t x); - -/** - * Convert a 64-bit value from big endian to CPU order. - */ -static uint64_t rte_be_to_cpu_64(rte_be64_t x); - -#endif /* __DOXYGEN__ */ - -#ifdef RTE_FORCE_INTRINSICS -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) -#define rte_bswap16(x) __builtin_bswap16(x) -#endif - -#define rte_bswap32(x) __builtin_bswap32(x) - -#define rte_bswap64(x) __builtin_bswap64(x) - -#endif - -#endif /* _RTE_BYTEORDER_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_cpuflags.h b/lib/librte_eal/common/include/generic/rte_cpuflags.h deleted file mode 100644 index 872f0ebe3e..0000000000 --- a/lib/librte_eal/common/include/generic/rte_cpuflags.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_CPUFLAGS_H_ -#define _RTE_CPUFLAGS_H_ - -/** - * @file - * Architecture specific API to determine available CPU features at runtime. - */ - -#include "rte_common.h" -#include - -/** - * Enumeration of all CPU features supported - */ -__extension__ -enum rte_cpu_flag_t; - -/** - * Get name of CPU flag - * - * @param feature - * CPU flag ID - * @return - * flag name - * NULL if flag ID is invalid - */ -__extension__ -const char * -rte_cpu_get_flag_name(enum rte_cpu_flag_t feature); - -/** - * Function for checking a CPU flag availability - * - * @param feature - * CPU flag to query CPU for - * @return - * 1 if flag is available - * 0 if flag is not available - * -ENOENT if flag is invalid - */ -__extension__ -int -rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature); - -/** - * This function checks that the currently used CPU supports the CPU features - * that were specified at compile time. It is called automatically within the - * EAL, so does not need to be used by applications. This version returns a - * result so that decisions may be made (for instance, graceful shutdowns). - */ -int -rte_cpu_is_supported(void); - -/** - * This function attempts to retrieve a value from the auxiliary vector. - * If it is unsuccessful, the result will be 0, and errno will be set. - * - * @return A value from the auxiliary vector. When the value is 0, check - * errno to determine if an error occurred. - */ -unsigned long -rte_cpu_getauxval(unsigned long type); - -/** - * This function retrieves a value from the auxiliary vector, and compares it - * as a string against the value retrieved. - * - * @return The result of calling strcmp() against the value retrieved from - * the auxiliary vector. When the value is 0 (meaning a match is found), - * check errno to determine if an error occurred. - */ -int -rte_cpu_strcmp_auxval(unsigned long type, const char *str); - -#endif /* _RTE_CPUFLAGS_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_cycles.h b/lib/librte_eal/common/include/generic/rte_cycles.h deleted file mode 100644 index 73d1fa7b92..0000000000 --- a/lib/librte_eal/common/include/generic/rte_cycles.h +++ /dev/null @@ -1,181 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation. - * Copyright(c) 2013 6WIND S.A. - */ - -#ifndef _RTE_CYCLES_H_ -#define _RTE_CYCLES_H_ - -/** - * @file - * - * Simple Time Reference Functions (Cycles and HPET). - */ - -#include -#include -#include -#include - -#define MS_PER_S 1000 -#define US_PER_S 1000000 -#define NS_PER_S 1000000000 - -enum timer_source { - EAL_TIMER_TSC = 0, - EAL_TIMER_HPET -}; -extern enum timer_source eal_timer_source; - -/** - * Get the measured frequency of the RDTSC counter - * - * @return - * The TSC frequency for this lcore - */ -uint64_t -rte_get_tsc_hz(void); - -/** - * Return the number of TSC cycles since boot - * - * @return - * the number of cycles - */ -static inline uint64_t -rte_get_tsc_cycles(void); - -#ifdef RTE_LIBEAL_USE_HPET -/** - * Return the number of HPET cycles since boot - * - * This counter is global for all execution units. The number of - * cycles in one second can be retrieved using rte_get_hpet_hz(). - * - * @return - * the number of cycles - */ -uint64_t -rte_get_hpet_cycles(void); - -/** - * Get the number of HPET cycles in one second. - * - * @return - * The number of cycles in one second. - */ -uint64_t -rte_get_hpet_hz(void); - -/** - * Initialise the HPET for use. This must be called before the rte_get_hpet_hz - * and rte_get_hpet_cycles APIs are called. If this function does not succeed, - * then the HPET functions are unavailable and should not be called. - * - * @param make_default - * If set, the hpet timer becomes the default timer whose values are - * returned by the rte_get_timer_hz/cycles API calls - * - * @return - * 0 on success, - * -1 on error, and the make_default parameter is ignored. - */ -int rte_eal_hpet_init(int make_default); - -#endif - -/** - * Get the number of cycles since boot from the default timer. - * - * @return - * The number of cycles - */ -static inline uint64_t -rte_get_timer_cycles(void) -{ -#ifdef RTE_LIBEAL_USE_HPET - switch(eal_timer_source) { - case EAL_TIMER_TSC: -#endif - return rte_get_tsc_cycles(); -#ifdef RTE_LIBEAL_USE_HPET - case EAL_TIMER_HPET: - return rte_get_hpet_cycles(); - default: rte_panic("Invalid timer source specified\n"); - } -#endif -} - -/** - * Get the number of cycles in one second for the default timer. - * - * @return - * The number of cycles in one second. - */ -static inline uint64_t -rte_get_timer_hz(void) -{ -#ifdef RTE_LIBEAL_USE_HPET - switch(eal_timer_source) { - case EAL_TIMER_TSC: -#endif - return rte_get_tsc_hz(); -#ifdef RTE_LIBEAL_USE_HPET - case EAL_TIMER_HPET: - return rte_get_hpet_hz(); - default: rte_panic("Invalid timer source specified\n"); - } -#endif -} -/** - * Wait at least us microseconds. - * This function can be replaced with user-defined function. - * @see rte_delay_us_callback_register - * - * @param us - * The number of microseconds to wait. - */ -extern void -(*rte_delay_us)(unsigned int us); - -/** - * Wait at least ms milliseconds. - * - * @param ms - * The number of milliseconds to wait. - */ -static inline void -rte_delay_ms(unsigned ms) -{ - rte_delay_us(ms * 1000); -} - -/** - * Blocking delay function. - * - * @param us - * Number of microseconds to wait. - */ -void rte_delay_us_block(unsigned int us); - -/** - * Delay function that uses system sleep. - * Does not block the CPU core. - * - * @param us - * Number of microseconds to wait. - */ -__rte_experimental -void -rte_delay_us_sleep(unsigned int us); - -/** - * Replace rte_delay_us with user defined function. - * - * @param userfunc - * User function which replaces rte_delay_us. rte_delay_us_block restores - * builtin block delay function. - */ -void rte_delay_us_callback_register(void(*userfunc)(unsigned int)); - -#endif /* _RTE_CYCLES_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_io.h b/lib/librte_eal/common/include/generic/rte_io.h deleted file mode 100644 index da457f7f7e..0000000000 --- a/lib/librte_eal/common/include/generic/rte_io.h +++ /dev/null @@ -1,350 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016 Cavium, Inc - */ - -#ifndef _RTE_IO_H_ -#define _RTE_IO_H_ - -/** - * @file - * I/O device memory operations - * - * This file defines the generic API for I/O device memory read/write operations - */ - -#include -#include -#include - -#ifdef __DOXYGEN__ - -/** - * Read a 8-bit value from I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint8_t -rte_read8_relaxed(const volatile void *addr); - -/** - * Read a 16-bit value from I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint16_t -rte_read16_relaxed(const volatile void *addr); - -/** - * Read a 32-bit value from I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint32_t -rte_read32_relaxed(const volatile void *addr); - -/** - * Read a 64-bit value from I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint64_t -rte_read64_relaxed(const volatile void *addr); - -/** - * Write a 8-bit value to I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ - -static inline void -rte_write8_relaxed(uint8_t value, volatile void *addr); - -/** - * Write a 16-bit value to I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write16_relaxed(uint16_t value, volatile void *addr); - -/** - * Write a 32-bit value to I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write32_relaxed(uint32_t value, volatile void *addr); - -/** - * Write a 64-bit value to I/O device memory address *addr*. - * - * The relaxed version does not have additional I/O memory barrier, useful in - * accessing the device registers of integrated controllers which implicitly - * strongly ordered with respect to memory access. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write64_relaxed(uint64_t value, volatile void *addr); - -/** - * Read a 8-bit value from I/O device memory address *addr*. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint8_t -rte_read8(const volatile void *addr); - -/** - * Read a 16-bit value from I/O device memory address *addr*. - * - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint16_t -rte_read16(const volatile void *addr); - -/** - * Read a 32-bit value from I/O device memory address *addr*. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint32_t -rte_read32(const volatile void *addr); - -/** - * Read a 64-bit value from I/O device memory address *addr*. - * - * @param addr - * I/O memory address to read the value from - * @return - * read value - */ -static inline uint64_t -rte_read64(const volatile void *addr); - -/** - * Write a 8-bit value to I/O device memory address *addr*. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ - -static inline void -rte_write8(uint8_t value, volatile void *addr); - -/** - * Write a 16-bit value to I/O device memory address *addr*. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write16(uint16_t value, volatile void *addr); - -/** - * Write a 32-bit value to I/O device memory address *addr*. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write32(uint32_t value, volatile void *addr); - -/** - * Write a 64-bit value to I/O device memory address *addr*. - * - * @param value - * Value to write - * @param addr - * I/O memory address to write the value to - */ -static inline void -rte_write64(uint64_t value, volatile void *addr); - -#endif /* __DOXYGEN__ */ - -#ifndef RTE_OVERRIDE_IO_H - -static __rte_always_inline uint8_t -rte_read8_relaxed(const volatile void *addr) -{ - return *(const volatile uint8_t *)addr; -} - -static __rte_always_inline uint16_t -rte_read16_relaxed(const volatile void *addr) -{ - return *(const volatile uint16_t *)addr; -} - -static __rte_always_inline uint32_t -rte_read32_relaxed(const volatile void *addr) -{ - return *(const volatile uint32_t *)addr; -} - -static __rte_always_inline uint64_t -rte_read64_relaxed(const volatile void *addr) -{ - return *(const volatile uint64_t *)addr; -} - -static __rte_always_inline void -rte_write8_relaxed(uint8_t value, volatile void *addr) -{ - *(volatile uint8_t *)addr = value; -} - -static __rte_always_inline void -rte_write16_relaxed(uint16_t value, volatile void *addr) -{ - *(volatile uint16_t *)addr = value; -} - -static __rte_always_inline void -rte_write32_relaxed(uint32_t value, volatile void *addr) -{ - *(volatile uint32_t *)addr = value; -} - -static __rte_always_inline void -rte_write64_relaxed(uint64_t value, volatile void *addr) -{ - *(volatile uint64_t *)addr = value; -} - -static __rte_always_inline uint8_t -rte_read8(const volatile void *addr) -{ - uint8_t val; - val = rte_read8_relaxed(addr); - rte_io_rmb(); - return val; -} - -static __rte_always_inline uint16_t -rte_read16(const volatile void *addr) -{ - uint16_t val; - val = rte_read16_relaxed(addr); - rte_io_rmb(); - return val; -} - -static __rte_always_inline uint32_t -rte_read32(const volatile void *addr) -{ - uint32_t val; - val = rte_read32_relaxed(addr); - rte_io_rmb(); - return val; -} - -static __rte_always_inline uint64_t -rte_read64(const volatile void *addr) -{ - uint64_t val; - val = rte_read64_relaxed(addr); - rte_io_rmb(); - return val; -} - -static __rte_always_inline void -rte_write8(uint8_t value, volatile void *addr) -{ - rte_io_wmb(); - rte_write8_relaxed(value, addr); -} - -static __rte_always_inline void -rte_write16(uint16_t value, volatile void *addr) -{ - rte_io_wmb(); - rte_write16_relaxed(value, addr); -} - -static __rte_always_inline void -rte_write32(uint32_t value, volatile void *addr) -{ - rte_io_wmb(); - rte_write32_relaxed(value, addr); -} - -static __rte_always_inline void -rte_write64(uint64_t value, volatile void *addr) -{ - rte_io_wmb(); - rte_write64_relaxed(value, addr); -} - -#endif /* RTE_OVERRIDE_IO_H */ - -#endif /* _RTE_IO_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_mcslock.h b/lib/librte_eal/common/include/generic/rte_mcslock.h deleted file mode 100644 index 2bef28351c..0000000000 --- a/lib/librte_eal/common/include/generic/rte_mcslock.h +++ /dev/null @@ -1,179 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Arm Limited - */ - -#ifndef _RTE_MCSLOCK_H_ -#define _RTE_MCSLOCK_H_ - -/** - * @file - * - * RTE MCS lock - * - * This file defines the main data structure and APIs for MCS queued lock. - * - * The MCS lock (proposed by John M. Mellor-Crummey and Michael L. Scott) - * provides scalability by spinning on a CPU/thread local variable which - * avoids expensive cache bouncings. It provides fairness by maintaining - * a list of acquirers and passing the lock to each CPU/thread in the order - * they acquired the lock. - */ - -#include -#include -#include - -/** - * The rte_mcslock_t type. - */ -typedef struct rte_mcslock { - struct rte_mcslock *next; - int locked; /* 1 if the queue locked, 0 otherwise */ -} rte_mcslock_t; - -/** - * @warning - * @b EXPERIMENTAL: This API may change without prior notice - * - * Take the MCS lock. - * - * @param msl - * A pointer to the pointer of a MCS lock. - * When the lock is initialized or declared, the msl pointer should be - * set to NULL. - * @param me - * A pointer to a new node of MCS lock. Each CPU/thread acquiring the - * lock should use its 'own node'. - */ -__rte_experimental -static inline void -rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) -{ - rte_mcslock_t *prev; - - /* Init me node */ - __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED); - __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); - - /* If the queue is empty, the exchange operation is enough to acquire - * the lock. Hence, the exchange operation requires acquire semantics. - * The store to me->next above should complete before the node is - * visible to other CPUs/threads. Hence, the exchange operation requires - * release semantics as well. - */ - prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL); - if (likely(prev == NULL)) { - /* Queue was empty, no further action required, - * proceed with lock taken. - */ - return; - } - __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED); - - /* The while-load of me->locked should not move above the previous - * store to prev->next. Otherwise it will cause a deadlock. Need a - * store-load barrier. - */ - __atomic_thread_fence(__ATOMIC_ACQ_REL); - /* If the lock has already been acquired, it first atomically - * places the node at the end of the queue and then proceeds - * to spin on me->locked until the previous lock holder resets - * the me->locked using mcslock_unlock(). - */ - while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE)) - rte_pause(); -} - -/** - * @warning - * @b EXPERIMENTAL: This API may change without prior notice - * - * Release the MCS lock. - * - * @param msl - * A pointer to the pointer of a MCS lock. - * @param me - * A pointer to the node of MCS lock passed in rte_mcslock_lock. - */ -__rte_experimental -static inline void -rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me) -{ - /* Check if there are more nodes in the queue. */ - if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) { - /* No, last member in the queue. */ - rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED); - - /* Release the lock by setting it to NULL */ - if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0, - __ATOMIC_RELEASE, __ATOMIC_RELAXED))) - return; - - /* Speculative execution would be allowed to read in the - * while-loop first. This has the potential to cause a - * deadlock. Need a load barrier. - */ - __atomic_thread_fence(__ATOMIC_ACQUIRE); - /* More nodes added to the queue by other CPUs. - * Wait until the next pointer is set. - */ - while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL) - rte_pause(); - } - - /* Pass lock to next waiter. */ - __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE); -} - -/** - * @warning - * @b EXPERIMENTAL: This API may change without prior notice - * - * Try to take the lock. - * - * @param msl - * A pointer to the pointer of a MCS lock. - * @param me - * A pointer to a new node of MCS lock. - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -__rte_experimental -static inline int -rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me) -{ - /* Init me node */ - __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); - - /* Try to lock */ - rte_mcslock_t *expected = NULL; - - /* The lock can be taken only when the queue is empty. Hence, - * the compare-exchange operation requires acquire semantics. - * The store to me->next above should complete before the node - * is visible to other CPUs/threads. Hence, the compare-exchange - * operation requires release semantics as well. - */ - return __atomic_compare_exchange_n(msl, &expected, me, 0, - __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); -} - -/** - * @warning - * @b EXPERIMENTAL: This API may change without prior notice - * - * Test if the lock is taken. - * - * @param msl - * A pointer to a MCS lock node. - * @return - * 1 if the lock is currently taken; 0 otherwise. - */ -__rte_experimental -static inline int -rte_mcslock_is_locked(rte_mcslock_t *msl) -{ - return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL); -} - -#endif /* _RTE_MCSLOCK_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_memcpy.h b/lib/librte_eal/common/include/generic/rte_memcpy.h deleted file mode 100644 index 701e550c31..0000000000 --- a/lib/librte_eal/common/include/generic/rte_memcpy.h +++ /dev/null @@ -1,112 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_MEMCPY_H_ -#define _RTE_MEMCPY_H_ - -/** - * @file - * - * Functions for vectorised implementation of memcpy(). - */ - -/** - * Copy 16 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov16(uint8_t *dst, const uint8_t *src); - -/** - * Copy 32 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov32(uint8_t *dst, const uint8_t *src); - -#ifdef __DOXYGEN__ - -/** - * Copy 48 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov48(uint8_t *dst, const uint8_t *src); - -#endif /* __DOXYGEN__ */ - -/** - * Copy 64 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov64(uint8_t *dst, const uint8_t *src); - -/** - * Copy 128 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov128(uint8_t *dst, const uint8_t *src); - -/** - * Copy 256 bytes from one location to another using optimised - * instructions. The locations should not overlap. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - */ -static inline void -rte_mov256(uint8_t *dst, const uint8_t *src); - -#ifdef __DOXYGEN__ - -/** - * Copy bytes from one location to another. The locations must not overlap. - * - * @note This is implemented as a macro, so it's address should not be taken - * and care is needed as parameter expressions may be evaluated multiple times. - * - * @param dst - * Pointer to the destination of the data. - * @param src - * Pointer to the source data. - * @param n - * Number of bytes to copy. - * @return - * Pointer to the destination data. - */ -static void * -rte_memcpy(void *dst, const void *src, size_t n); - -#endif /* __DOXYGEN__ */ - -#endif /* _RTE_MEMCPY_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_pause.h b/lib/librte_eal/common/include/generic/rte_pause.h deleted file mode 100644 index 7422785f1a..0000000000 --- a/lib/librte_eal/common/include/generic/rte_pause.h +++ /dev/null @@ -1,128 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Cavium, Inc - * Copyright(c) 2019 Arm Limited - */ - -#ifndef _RTE_PAUSE_H_ -#define _RTE_PAUSE_H_ - -/** - * @file - * - * CPU pause operation. - * - */ - -#include -#include -#include -#include -#include - -/** - * Pause CPU execution for a short while - * - * This call is intended for tight loops which poll a shared resource or wait - * for an event. A short pause within the loop may reduce the power consumption. - */ -static inline void rte_pause(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change, or be removed, without prior notice - * - * Wait for *addr to be updated with a 16-bit expected value, with a relaxed - * memory ordering model meaning the loads around this API can be reordered. - * - * @param addr - * A pointer to the memory location. - * @param expected - * A 16-bit expected value to be in the memory location. - * @param memorder - * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. - */ -__rte_experimental -static __rte_always_inline void -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, - int memorder); - -/** - * @warning - * @b EXPERIMENTAL: this API may change, or be removed, without prior notice - * - * Wait for *addr to be updated with a 32-bit expected value, with a relaxed - * memory ordering model meaning the loads around this API can be reordered. - * - * @param addr - * A pointer to the memory location. - * @param expected - * A 32-bit expected value to be in the memory location. - * @param memorder - * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. - */ -__rte_experimental -static __rte_always_inline void -rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, - int memorder); - -/** - * @warning - * @b EXPERIMENTAL: this API may change, or be removed, without prior notice - * - * Wait for *addr to be updated with a 64-bit expected value, with a relaxed - * memory ordering model meaning the loads around this API can be reordered. - * - * @param addr - * A pointer to the memory location. - * @param expected - * A 64-bit expected value to be in the memory location. - * @param memorder - * Two different memory orders that can be specified: - * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to - * C++11 memory orders with the same names, see the C++11 standard or - * the GCC wiki on atomic synchronization for detailed definition. - */ -__rte_experimental -static __rte_always_inline void -rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, - int memorder); - -#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED -static __rte_always_inline void -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, - int memorder) -{ - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); - - while (__atomic_load_n(addr, memorder) != expected) - rte_pause(); -} - -static __rte_always_inline void -rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, - int memorder) -{ - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); - - while (__atomic_load_n(addr, memorder) != expected) - rte_pause(); -} - -static __rte_always_inline void -rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, - int memorder) -{ - assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); - - while (__atomic_load_n(addr, memorder) != expected) - rte_pause(); -} -#endif - -#endif /* _RTE_PAUSE_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_prefetch.h b/lib/librte_eal/common/include/generic/rte_prefetch.h deleted file mode 100644 index 6e47bdfbad..0000000000 --- a/lib/librte_eal/common/include/generic/rte_prefetch.h +++ /dev/null @@ -1,54 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2015 Intel Corporation - */ - -#ifndef _RTE_PREFETCH_H_ -#define _RTE_PREFETCH_H_ - -/** - * @file - * - * Prefetch operations. - * - * This file defines an API for prefetch macros / inline-functions, - * which are architecture-dependent. Prefetching occurs when a - * processor requests an instruction or data from memory to cache - * before it is actually needed, potentially speeding up the execution of the - * program. - */ - -/** - * Prefetch a cache line into all cache levels. - * @param p - * Address to prefetch - */ -static inline void rte_prefetch0(const volatile void *p); - -/** - * Prefetch a cache line into all cache levels except the 0th cache level. - * @param p - * Address to prefetch - */ -static inline void rte_prefetch1(const volatile void *p); - -/** - * Prefetch a cache line into all cache levels except the 0th and 1th cache - * levels. - * @param p - * Address to prefetch - */ -static inline void rte_prefetch2(const volatile void *p); - -/** - * Prefetch a cache line into all cache levels (non-temporal/transient version) - * - * The non-temporal prefetch is intended as a prefetch hint that processor will - * use the prefetched data only once or short period, unlike the - * rte_prefetch0() function which imply that prefetched data to use repeatedly. - * - * @param p - * Address to prefetch - */ -static inline void rte_prefetch_non_temporal(const volatile void *p); - -#endif /* _RTE_PREFETCH_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h deleted file mode 100644 index da9bc3e9c0..0000000000 --- a/lib/librte_eal/common/include/generic/rte_rwlock.h +++ /dev/null @@ -1,239 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_RWLOCK_H_ -#define _RTE_RWLOCK_H_ - -/** - * @file - * - * RTE Read-Write Locks - * - * This file defines an API for read-write locks. The lock is used to - * protect data that allows multiple readers in parallel, but only - * one writer. All readers are blocked until the writer is finished - * writing. - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** - * The rte_rwlock_t type. - * - * cnt is -1 when write lock is held, and > 0 when read locks are held. - */ -typedef struct { - volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */ -} rte_rwlock_t; - -/** - * A static rwlock initializer. - */ -#define RTE_RWLOCK_INITIALIZER { 0 } - -/** - * Initialize the rwlock to an unlocked state. - * - * @param rwl - * A pointer to the rwlock structure. - */ -static inline void -rte_rwlock_init(rte_rwlock_t *rwl) -{ - rwl->cnt = 0; -} - -/** - * Take a read lock. Loop until the lock is held. - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_read_lock(rte_rwlock_t *rwl) -{ - int32_t x; - int success = 0; - - while (success == 0) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* write lock is held */ - if (x < 0) { - rte_pause(); - continue; - } - success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } -} - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * try to take a read lock. - * - * @param rwl - * A pointer to a rwlock structure. - * @return - * - zero if the lock is successfully taken - * - -EBUSY if lock could not be acquired for reading because a - * writer holds the lock - */ -__rte_experimental -static inline int -rte_rwlock_read_trylock(rte_rwlock_t *rwl) -{ - int32_t x; - int success = 0; - - while (success == 0) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* write lock is held */ - if (x < 0) - return -EBUSY; - success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } - - return 0; -} - -/** - * Release a read lock. - * - * @param rwl - * A pointer to the rwlock structure. - */ -static inline void -rte_rwlock_read_unlock(rte_rwlock_t *rwl) -{ - __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE); -} - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * try to take a write lock. - * - * @param rwl - * A pointer to a rwlock structure. - * @return - * - zero if the lock is successfully taken - * - -EBUSY if lock could not be acquired for writing because - * it was already locked for reading or writing - */ -__rte_experimental -static inline int -rte_rwlock_write_trylock(rte_rwlock_t *rwl) -{ - int32_t x; - - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0) - return -EBUSY; - - return 0; -} - -/** - * Take a write lock. Loop until the lock is held. - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_write_lock(rte_rwlock_t *rwl) -{ - int32_t x; - int success = 0; - - while (success == 0) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* a lock is held */ - if (x != 0) { - rte_pause(); - continue; - } - success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } -} - -/** - * Release a write lock. - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_write_unlock(rte_rwlock_t *rwl) -{ - __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE); -} - -/** - * Try to execute critical section in a hardware memory transaction, if it - * fails or not available take a read lock - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_read_lock_tm(rte_rwlock_t *rwl); - -/** - * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back - * - * @param rwl - * A pointer to the rwlock structure. - */ -static inline void -rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl); - -/** - * Try to execute critical section in a hardware memory transaction, if it - * fails or not available take a write lock - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_write_lock_tm(rte_rwlock_t *rwl); - -/** - * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back - * - * @param rwl - * A pointer to a rwlock structure. - */ -static inline void -rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_RWLOCK_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_spinlock.h b/lib/librte_eal/common/include/generic/rte_spinlock.h deleted file mode 100644 index 87ae7a4f18..0000000000 --- a/lib/librte_eal/common/include/generic/rte_spinlock.h +++ /dev/null @@ -1,305 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_SPINLOCK_H_ -#define _RTE_SPINLOCK_H_ - -/** - * @file - * - * RTE Spinlocks - * - * This file defines an API for read-write locks, which are implemented - * in an architecture-specific way. This kind of lock simply waits in - * a loop repeatedly checking until the lock becomes available. - * - * All locks must be initialised before use, and only initialised once. - * - */ - -#include -#ifdef RTE_FORCE_INTRINSICS -#include -#endif -#include - -/** - * The rte_spinlock_t type. - */ -typedef struct { - volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ -} rte_spinlock_t; - -/** - * A static spinlock initializer. - */ -#define RTE_SPINLOCK_INITIALIZER { 0 } - -/** - * Initialize the spinlock to an unlocked state. - * - * @param sl - * A pointer to the spinlock. - */ -static inline void -rte_spinlock_init(rte_spinlock_t *sl) -{ - sl->locked = 0; -} - -/** - * Take the spinlock. - * - * @param sl - * A pointer to the spinlock. - */ -static inline void -rte_spinlock_lock(rte_spinlock_t *sl); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_spinlock_lock(rte_spinlock_t *sl) -{ - int exp = 0; - - while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { - while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED)) - rte_pause(); - exp = 0; - } -} -#endif - -/** - * Release the spinlock. - * - * @param sl - * A pointer to the spinlock. - */ -static inline void -rte_spinlock_unlock (rte_spinlock_t *sl); - -#ifdef RTE_FORCE_INTRINSICS -static inline void -rte_spinlock_unlock (rte_spinlock_t *sl) -{ - __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE); -} -#endif - -/** - * Try to take the lock. - * - * @param sl - * A pointer to the spinlock. - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int -rte_spinlock_trylock (rte_spinlock_t *sl); - -#ifdef RTE_FORCE_INTRINSICS -static inline int -rte_spinlock_trylock (rte_spinlock_t *sl) -{ - int exp = 0; - return __atomic_compare_exchange_n(&sl->locked, &exp, 1, - 0, /* disallow spurious failure */ - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); -} -#endif - -/** - * Test if the lock is taken. - * - * @param sl - * A pointer to the spinlock. - * @return - * 1 if the lock is currently taken; 0 otherwise. - */ -static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) -{ - return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE); -} - -/** - * Test if hardware transactional memory (lock elision) is supported - * - * @return - * 1 if the hardware transactional memory is supported; 0 otherwise. - */ -static inline int rte_tm_supported(void); - -/** - * Try to execute critical section in a hardware memory transaction, - * if it fails or not available take the spinlock. - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param sl - * A pointer to the spinlock. - */ -static inline void -rte_spinlock_lock_tm(rte_spinlock_t *sl); - -/** - * Commit hardware memory transaction or release the spinlock if - * the spinlock is used as a fall-back - * - * @param sl - * A pointer to the spinlock. - */ -static inline void -rte_spinlock_unlock_tm(rte_spinlock_t *sl); - -/** - * Try to execute critical section in a hardware memory transaction, - * if it fails or not available try to take the lock. - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param sl - * A pointer to the spinlock. - * @return - * 1 if the hardware memory transaction is successfully started - * or lock is successfully taken; 0 otherwise. - */ -static inline int -rte_spinlock_trylock_tm(rte_spinlock_t *sl); - -/** - * The rte_spinlock_recursive_t type. - */ -typedef struct { - rte_spinlock_t sl; /**< the actual spinlock */ - volatile int user; /**< core id using lock, -1 for unused */ - volatile int count; /**< count of time this lock has been called */ -} rte_spinlock_recursive_t; - -/** - * A static recursive spinlock initializer. - */ -#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0} - -/** - * Initialize the recursive spinlock to an unlocked state. - * - * @param slr - * A pointer to the recursive spinlock. - */ -static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr) -{ - rte_spinlock_init(&slr->sl); - slr->user = -1; - slr->count = 0; -} - -/** - * Take the recursive spinlock. - * - * @param slr - * A pointer to the recursive spinlock. - */ -static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr) -{ - int id = rte_gettid(); - - if (slr->user != id) { - rte_spinlock_lock(&slr->sl); - slr->user = id; - } - slr->count++; -} -/** - * Release the recursive spinlock. - * - * @param slr - * A pointer to the recursive spinlock. - */ -static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr) -{ - if (--(slr->count) == 0) { - slr->user = -1; - rte_spinlock_unlock(&slr->sl); - } - -} - -/** - * Try to take the recursive lock. - * - * @param slr - * A pointer to the recursive spinlock. - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr) -{ - int id = rte_gettid(); - - if (slr->user != id) { - if (rte_spinlock_trylock(&slr->sl) == 0) - return 0; - slr->user = id; - } - slr->count++; - return 1; -} - - -/** - * Try to execute critical section in a hardware memory transaction, - * if it fails or not available take the recursive spinlocks - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param slr - * A pointer to the recursive spinlock. - */ -static inline void rte_spinlock_recursive_lock_tm( - rte_spinlock_recursive_t *slr); - -/** - * Commit hardware memory transaction or release the recursive spinlock - * if the recursive spinlock is used as a fall-back - * - * @param slr - * A pointer to the recursive spinlock. - */ -static inline void rte_spinlock_recursive_unlock_tm( - rte_spinlock_recursive_t *slr); - -/** - * Try to execute critical section in a hardware memory transaction, - * if it fails or not available try to take the recursive lock - * - * NOTE: An attempt to perform a HW I/O operation inside a hardware memory - * transaction always aborts the transaction since the CPU is not able to - * roll-back should the transaction fail. Therefore, hardware transactional - * locks are not advised to be used around rte_eth_rx_burst() and - * rte_eth_tx_burst() calls. - * - * @param slr - * A pointer to the recursive spinlock. - * @return - * 1 if the hardware memory transaction is successfully started - * or lock is successfully taken; 0 otherwise. - */ -static inline int rte_spinlock_recursive_trylock_tm( - rte_spinlock_recursive_t *slr); - -#endif /* _RTE_SPINLOCK_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_ticketlock.h b/lib/librte_eal/common/include/generic/rte_ticketlock.h deleted file mode 100644 index c295ae7f7e..0000000000 --- a/lib/librte_eal/common/include/generic/rte_ticketlock.h +++ /dev/null @@ -1,223 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Arm Limited - */ - -#ifndef _RTE_TICKETLOCK_H_ -#define _RTE_TICKETLOCK_H_ - -/** - * @file - * - * RTE ticket locks - * - * This file defines an API for ticket locks, which give each waiting - * thread a ticket and take the lock one by one, first come, first - * serviced. - * - * All locks must be initialised before use, and only initialised once. - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** - * The rte_ticketlock_t type. - */ -typedef union { - uint32_t tickets; - struct { - uint16_t current; - uint16_t next; - } s; -} rte_ticketlock_t; - -/** - * A static ticketlock initializer. - */ -#define RTE_TICKETLOCK_INITIALIZER { 0 } - -/** - * Initialize the ticketlock to an unlocked state. - * - * @param tl - * A pointer to the ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_init(rte_ticketlock_t *tl) -{ - __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED); -} - -/** - * Take the ticketlock. - * - * @param tl - * A pointer to the ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_lock(rte_ticketlock_t *tl) -{ - uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED); - rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE); -} - -/** - * Release the ticketlock. - * - * @param tl - * A pointer to the ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_unlock(rte_ticketlock_t *tl) -{ - uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED); - __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE); -} - -/** - * Try to take the lock. - * - * @param tl - * A pointer to the ticketlock. - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -__rte_experimental -static inline int -rte_ticketlock_trylock(rte_ticketlock_t *tl) -{ - rte_ticketlock_t old, new; - old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED); - new.tickets = old.tickets; - new.s.next++; - if (old.s.next == old.s.current) { - if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets, - new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) - return 1; - } - - return 0; -} - -/** - * Test if the lock is taken. - * - * @param tl - * A pointer to the ticketlock. - * @return - * 1 if the lock is currently taken; 0 otherwise. - */ -__rte_experimental -static inline int -rte_ticketlock_is_locked(rte_ticketlock_t *tl) -{ - rte_ticketlock_t tic; - tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE); - return (tic.s.current != tic.s.next); -} - -/** - * The rte_ticketlock_recursive_t type. - */ -#define TICKET_LOCK_INVALID_ID -1 - -typedef struct { - rte_ticketlock_t tl; /**< the actual ticketlock */ - int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */ - unsigned int count; /**< count of time this lock has been called */ -} rte_ticketlock_recursive_t; - -/** - * A static recursive ticketlock initializer. - */ -#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \ - TICKET_LOCK_INVALID_ID, 0} - -/** - * Initialize the recursive ticketlock to an unlocked state. - * - * @param tlr - * A pointer to the recursive ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr) -{ - rte_ticketlock_init(&tlr->tl); - __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED); - tlr->count = 0; -} - -/** - * Take the recursive ticketlock. - * - * @param tlr - * A pointer to the recursive ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr) -{ - int id = rte_gettid(); - - if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { - rte_ticketlock_lock(&tlr->tl); - __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); - } - tlr->count++; -} - -/** - * Release the recursive ticketlock. - * - * @param tlr - * A pointer to the recursive ticketlock. - */ -__rte_experimental -static inline void -rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr) -{ - if (--(tlr->count) == 0) { - __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, - __ATOMIC_RELAXED); - rte_ticketlock_unlock(&tlr->tl); - } -} - -/** - * Try to take the recursive lock. - * - * @param tlr - * A pointer to the recursive ticketlock. - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -__rte_experimental -static inline int -rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr) -{ - int id = rte_gettid(); - - if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { - if (rte_ticketlock_trylock(&tlr->tl) == 0) - return 0; - __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); - } - tlr->count++; - return 1; -} - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_TICKETLOCK_H_ */ diff --git a/lib/librte_eal/common/include/generic/rte_vect.h b/lib/librte_eal/common/include/generic/rte_vect.h deleted file mode 100644 index 3fc47979f8..0000000000 --- a/lib/librte_eal/common/include/generic/rte_vect.h +++ /dev/null @@ -1,186 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2016 6WIND S.A. - */ - -#ifndef _RTE_VECT_H_ -#define _RTE_VECT_H_ - -/** - * @file - * SIMD vector types - * - * This file defines types to use vector instructions with generic C code. - */ - -#include - -/* Unsigned vector types */ - -/** - * 64 bits vector size to use with unsigned 8 bits elements. - * - * a = (rte_v64u8_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef uint8_t rte_v64u8_t __attribute__((vector_size(8), aligned(8))); - -/** - * 64 bits vector size to use with unsigned 16 bits elements. - * - * a = (rte_v64u16_t){ a0, a1, a2, a3 } - */ -typedef uint16_t rte_v64u16_t __attribute__((vector_size(8), aligned(8))); - -/** - * 64 bits vector size to use with unsigned 32 bits elements. - * - * a = (rte_v64u32_t){ a0, a1 } - */ -typedef uint32_t rte_v64u32_t __attribute__((vector_size(8), aligned(8))); - -/** - * 128 bits vector size to use with unsigned 8 bits elements. - * - * a = (rte_v128u8_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15 } - */ -typedef uint8_t rte_v128u8_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with unsigned 16 bits elements. - * - * a = (rte_v128u16_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef uint16_t rte_v128u16_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with unsigned 32 bits elements. - * - * a = (rte_v128u32_t){ a0, a1, a2, a3 } - */ -typedef uint32_t rte_v128u32_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with unsigned 64 bits elements. - * - * a = (rte_v128u64_t){ a0, a1 } - */ -typedef uint64_t rte_v128u64_t __attribute__((vector_size(16), aligned(16))); - -/** - * 256 bits vector size to use with unsigned 8 bits elements. - * - * a = (rte_v256u8_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15, - * a16, a17, a18, a19, a20, a21, a22, a23, - * a24, a25, a26, a27, a28, a29, a30, a31 } - */ -typedef uint8_t rte_v256u8_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with unsigned 16 bits elements. - * - * a = (rte_v256u16_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15 } - */ -typedef uint16_t rte_v256u16_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with unsigned 32 bits elements. - * - * a = (rte_v256u32_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef uint32_t rte_v256u32_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with unsigned 64 bits elements. - * - * a = (rte_v256u64_t){ a0, a1, a2, a3 } - */ -typedef uint64_t rte_v256u64_t __attribute__((vector_size(32), aligned(32))); - - -/* Signed vector types */ - -/** - * 64 bits vector size to use with 8 bits elements. - * - * a = (rte_v64s8_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef int8_t rte_v64s8_t __attribute__((vector_size(8), aligned(8))); - -/** - * 64 bits vector size to use with 16 bits elements. - * - * a = (rte_v64s16_t){ a0, a1, a2, a3 } - */ -typedef int16_t rte_v64s16_t __attribute__((vector_size(8), aligned(8))); - -/** - * 64 bits vector size to use with 32 bits elements. - * - * a = (rte_v64s32_t){ a0, a1 } - */ -typedef int32_t rte_v64s32_t __attribute__((vector_size(8), aligned(8))); - -/** - * 128 bits vector size to use with 8 bits elements. - * - * a = (rte_v128s8_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15 } - */ -typedef int8_t rte_v128s8_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with 16 bits elements. - * - * a = (rte_v128s16_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef int16_t rte_v128s16_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with 32 bits elements. - * - * a = (rte_v128s32_t){ a0, a1, a2, a3 } - */ -typedef int32_t rte_v128s32_t __attribute__((vector_size(16), aligned(16))); - -/** - * 128 bits vector size to use with 64 bits elements. - * - * a = (rte_v128s64_t){ a1, a2 } - */ -typedef int64_t rte_v128s64_t __attribute__((vector_size(16), aligned(16))); - -/** - * 256 bits vector size to use with 8 bits elements. - * - * a = (rte_v256s8_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15, - * a16, a17, a18, a19, a20, a21, a22, a23, - * a24, a25, a26, a27, a28, a29, a30, a31 } - */ -typedef int8_t rte_v256s8_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with 16 bits elements. - * - * a = (rte_v256s16_t){ a00, a01, a02, a03, a04, a05, a06, a07, - * a08, a09, a10, a11, a12, a13, a14, a15 } - */ -typedef int16_t rte_v256s16_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with 32 bits elements. - * - * a = (rte_v256s32_t){ a0, a1, a2, a3, a4, a5, a6, a7 } - */ -typedef int32_t rte_v256s32_t __attribute__((vector_size(32), aligned(32))); - -/** - * 256 bits vector size to use with 64 bits elements. - * - * a = (rte_v256s64_t){ a0, a1, a2, a3 } - */ -typedef int64_t rte_v256s64_t __attribute__((vector_size(32), aligned(32))); - -#endif /* _RTE_VECT_H_ */ diff --git a/lib/librte_eal/common/include/rte_alarm.h b/lib/librte_eal/common/include/rte_alarm.h deleted file mode 100644 index 7e4d0b2407..0000000000 --- a/lib/librte_eal/common/include/rte_alarm.h +++ /dev/null @@ -1,77 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_ALARM_H_ -#define _RTE_ALARM_H_ - -/** - * @file - * - * Alarm functions - * - * Simple alarm-clock functionality supplied by eal. - * Does not require hpet support. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/** - * Signature of callback back function called when an alarm goes off. - */ -typedef void (*rte_eal_alarm_callback)(void *arg); - -/** - * Function to set a callback to be triggered when us microseconds - * have expired. Accuracy of timing to the microsecond is not guaranteed. The - * alarm function will not be called *before* the requested time, but may - * be called a short period of time afterwards. - * The alarm handler will be called only once. There is no need to call - * "rte_eal_alarm_cancel" from within the callback function. - * - * @param us - * The time in microseconds before the callback is called - * @param cb - * The function to be called when the alarm expires - * @param cb_arg - * Pointer parameter to be passed to the callback function - * - * @return - * On success, zero. - * On failure, a negative error number - */ -int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg); - -/** - * Function to cancel an alarm callback which has been registered before. If - * used outside alarm callback it wait for all callbacks to finish execution. - * - * @param cb_fn - * alarm callback - * @param cb_arg - * Pointer parameter to be passed to the callback function. To remove all - * copies of a given callback function, irrespective of parameter, (void *)-1 - * can be used here. - * - * @return - * - value greater than 0 and rte_errno not changed - returned value is - * the number of canceled alarm callback functions - * - value greater or equal 0 and rte_errno set to EINPROGRESS, at least one - * alarm could not be canceled because cancellation was requested from alarm - * callback context. Returned value is the number of successfully canceled - * alarm callbacks - * - 0 and rte_errno set to ENOENT - no alarm found - * - -1 and rte_errno set to EINVAL - invalid parameter (NULL callback) - */ -int rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg); - -#ifdef __cplusplus -} -#endif - - -#endif /* _RTE_ALARM_H_ */ diff --git a/lib/librte_eal/common/include/rte_bitmap.h b/lib/librte_eal/common/include/rte_bitmap.h deleted file mode 100644 index 6b846f251b..0000000000 --- a/lib/librte_eal/common/include/rte_bitmap.h +++ /dev/null @@ -1,490 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef __INCLUDE_RTE_BITMAP_H__ -#define __INCLUDE_RTE_BITMAP_H__ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * @file - * RTE Bitmap - * - * The bitmap component provides a mechanism to manage large arrays of bits - * through bit get/set/clear and bit array scan operations. - * - * The bitmap scan operation is optimized for 64-bit CPUs using 64/128 byte cache - * lines. The bitmap is hierarchically organized using two arrays (array1 and - * array2), with each bit in array1 being associated with a full cache line - * (512/1024 bits) of bitmap bits, which are stored in array2: the bit in array1 - * is set only when there is at least one bit set within its associated array2 - * bits, otherwise the bit in array1 is cleared. The read and write operations - * for array1 and array2 are always done in slabs of 64 bits. - * - * This bitmap is not thread safe. For lock free operation on a specific bitmap - * instance, a single writer thread performing bit set/clear operations is - * allowed, only the writer thread can do bitmap scan operations, while there - * can be several reader threads performing bit get operations in parallel with - * the writer thread. When the use of locking primitives is acceptable, the - * serialization of the bit set/clear and bitmap scan operations needs to be - * enforced by the caller, while the bit get operation does not require locking - * the bitmap. - * - ***/ - -#include -#include -#include -#include -#include -#include -#include - -/* Slab */ -#define RTE_BITMAP_SLAB_BIT_SIZE 64 -#define RTE_BITMAP_SLAB_BIT_SIZE_LOG2 6 -#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1) - -/* Cache line (CL) */ -#define RTE_BITMAP_CL_BIT_SIZE (RTE_CACHE_LINE_SIZE * 8) -#define RTE_BITMAP_CL_BIT_SIZE_LOG2 (RTE_CACHE_LINE_SIZE_LOG2 + 3) -#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1) - -#define RTE_BITMAP_CL_SLAB_SIZE (RTE_BITMAP_CL_BIT_SIZE / RTE_BITMAP_SLAB_BIT_SIZE) -#define RTE_BITMAP_CL_SLAB_SIZE_LOG2 (RTE_BITMAP_CL_BIT_SIZE_LOG2 - RTE_BITMAP_SLAB_BIT_SIZE_LOG2) -#define RTE_BITMAP_CL_SLAB_MASK (RTE_BITMAP_CL_SLAB_SIZE - 1) - -/** Bitmap data structure */ -struct rte_bitmap { - /* Context for array1 and array2 */ - uint64_t *array1; /**< Bitmap array1 */ - uint64_t *array2; /**< Bitmap array2 */ - uint32_t array1_size; /**< Number of 64-bit slabs in array1 that are actually used */ - uint32_t array2_size; /**< Number of 64-bit slabs in array2 */ - - /* Context for the "scan next" operation */ - uint32_t index1; /**< Bitmap scan: Index of current array1 slab */ - uint32_t offset1; /**< Bitmap scan: Offset of current bit within current array1 slab */ - uint32_t index2; /**< Bitmap scan: Index of current array2 slab */ - uint32_t go2; /**< Bitmap scan: Go/stop condition for current array2 cache line */ - - /* Storage space for array1 and array2 */ - uint8_t memory[]; -}; - -static inline void -__rte_bitmap_index1_inc(struct rte_bitmap *bmp) -{ - bmp->index1 = (bmp->index1 + 1) & (bmp->array1_size - 1); -} - -static inline uint64_t -__rte_bitmap_mask1_get(struct rte_bitmap *bmp) -{ - return (~1llu) << bmp->offset1; -} - -static inline void -__rte_bitmap_index2_set(struct rte_bitmap *bmp) -{ - bmp->index2 = (((bmp->index1 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2) + bmp->offset1) << RTE_BITMAP_CL_SLAB_SIZE_LOG2); -} - -static inline uint32_t -__rte_bitmap_get_memory_footprint(uint32_t n_bits, - uint32_t *array1_byte_offset, uint32_t *array1_slabs, - uint32_t *array2_byte_offset, uint32_t *array2_slabs) -{ - uint32_t n_slabs_context, n_slabs_array1, n_cache_lines_context_and_array1; - uint32_t n_cache_lines_array2; - uint32_t n_bytes_total; - - n_cache_lines_array2 = (n_bits + RTE_BITMAP_CL_BIT_SIZE - 1) / RTE_BITMAP_CL_BIT_SIZE; - n_slabs_array1 = (n_cache_lines_array2 + RTE_BITMAP_SLAB_BIT_SIZE - 1) / RTE_BITMAP_SLAB_BIT_SIZE; - n_slabs_array1 = rte_align32pow2(n_slabs_array1); - n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8); - n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE; - n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * RTE_CACHE_LINE_SIZE; - - if (array1_byte_offset) { - *array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8); - } - if (array1_slabs) { - *array1_slabs = n_slabs_array1; - } - if (array2_byte_offset) { - *array2_byte_offset = n_cache_lines_context_and_array1 * RTE_CACHE_LINE_SIZE; - } - if (array2_slabs) { - *array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE; - } - - return n_bytes_total; -} - -static inline void -__rte_bitmap_scan_init(struct rte_bitmap *bmp) -{ - bmp->index1 = bmp->array1_size - 1; - bmp->offset1 = RTE_BITMAP_SLAB_BIT_SIZE - 1; - __rte_bitmap_index2_set(bmp); - bmp->index2 += RTE_BITMAP_CL_SLAB_SIZE; - - bmp->go2 = 0; -} - -/** - * Bitmap memory footprint calculation - * - * @param n_bits - * Number of bits in the bitmap - * @return - * Bitmap memory footprint measured in bytes on success, 0 on error - */ -static inline uint32_t -rte_bitmap_get_memory_footprint(uint32_t n_bits) { - /* Check input arguments */ - if (n_bits == 0) { - return 0; - } - - return __rte_bitmap_get_memory_footprint(n_bits, NULL, NULL, NULL, NULL); -} - -/** - * Bitmap initialization - * - * @param n_bits - * Number of pre-allocated bits in array2. - * @param mem - * Base address of array1 and array2. - * @param mem_size - * Minimum expected size of bitmap. - * @return - * Handle to bitmap instance. - */ -static inline struct rte_bitmap * -rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size) -{ - struct rte_bitmap *bmp; - uint32_t array1_byte_offset, array1_slabs, array2_byte_offset, array2_slabs; - uint32_t size; - - /* Check input arguments */ - if (n_bits == 0) { - return NULL; - } - - if ((mem == NULL) || (((uintptr_t) mem) & RTE_CACHE_LINE_MASK)) { - return NULL; - } - - size = __rte_bitmap_get_memory_footprint(n_bits, - &array1_byte_offset, &array1_slabs, - &array2_byte_offset, &array2_slabs); - if (size < mem_size) { - return NULL; - } - - /* Setup bitmap */ - memset(mem, 0, size); - bmp = (struct rte_bitmap *) mem; - - bmp->array1 = (uint64_t *) &mem[array1_byte_offset]; - bmp->array1_size = array1_slabs; - bmp->array2 = (uint64_t *) &mem[array2_byte_offset]; - bmp->array2_size = array2_slabs; - - __rte_bitmap_scan_init(bmp); - - return bmp; -} - -/** - * Bitmap free - * - * @param bmp - * Handle to bitmap instance - * @return - * 0 upon success, error code otherwise - */ -static inline int -rte_bitmap_free(struct rte_bitmap *bmp) -{ - /* Check input arguments */ - if (bmp == NULL) { - return -1; - } - - return 0; -} - -/** - * Bitmap reset - * - * @param bmp - * Handle to bitmap instance - */ -static inline void -rte_bitmap_reset(struct rte_bitmap *bmp) -{ - memset(bmp->array1, 0, bmp->array1_size * sizeof(uint64_t)); - memset(bmp->array2, 0, bmp->array2_size * sizeof(uint64_t)); - __rte_bitmap_scan_init(bmp); -} - -/** - * Bitmap location prefetch into CPU L1 cache - * - * @param bmp - * Handle to bitmap instance - * @param pos - * Bit position - * @return - * 0 upon success, error code otherwise - */ -static inline void -rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos) -{ - uint64_t *slab2; - uint32_t index2; - - index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - slab2 = bmp->array2 + index2; - rte_prefetch0((void *) slab2); -} - -/** - * Bitmap bit get - * - * @param bmp - * Handle to bitmap instance - * @param pos - * Bit position - * @return - * 0 when bit is cleared, non-zero when bit is set - */ -static inline uint64_t -rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos) -{ - uint64_t *slab2; - uint32_t index2, offset2; - - index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; - slab2 = bmp->array2 + index2; - return (*slab2) & (1llu << offset2); -} - -/** - * Bitmap bit set - * - * @param bmp - * Handle to bitmap instance - * @param pos - * Bit position - */ -static inline void -rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos) -{ - uint64_t *slab1, *slab2; - uint32_t index1, index2, offset1, offset2; - - /* Set bit in array2 slab and set bit in array1 slab */ - index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; - index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); - offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; - slab2 = bmp->array2 + index2; - slab1 = bmp->array1 + index1; - - *slab2 |= 1llu << offset2; - *slab1 |= 1llu << offset1; -} - -/** - * Bitmap slab set - * - * @param bmp - * Handle to bitmap instance - * @param pos - * Bit position identifying the array2 slab - * @param slab - * Value to be assigned to the 64-bit slab in array2 - */ -static inline void -rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab) -{ - uint64_t *slab1, *slab2; - uint32_t index1, index2, offset1; - - /* Set bits in array2 slab and set bit in array1 slab */ - index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); - offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; - slab2 = bmp->array2 + index2; - slab1 = bmp->array1 + index1; - - *slab2 |= slab; - *slab1 |= 1llu << offset1; -} - -static inline uint64_t -__rte_bitmap_line_not_empty(uint64_t *slab2) -{ - uint64_t v1, v2, v3, v4; - - v1 = slab2[0] | slab2[1]; - v2 = slab2[2] | slab2[3]; - v3 = slab2[4] | slab2[5]; - v4 = slab2[6] | slab2[7]; - v1 |= v2; - v3 |= v4; - - return v1 | v3; -} - -/** - * Bitmap bit clear - * - * @param bmp - * Handle to bitmap instance - * @param pos - * Bit position - */ -static inline void -rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos) -{ - uint64_t *slab1, *slab2; - uint32_t index1, index2, offset1, offset2; - - /* Clear bit in array2 slab */ - index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; - slab2 = bmp->array2 + index2; - - /* Return if array2 slab is not all-zeros */ - *slab2 &= ~(1llu << offset2); - if (*slab2){ - return; - } - - /* Check the entire cache line of array2 for all-zeros */ - index2 &= ~ RTE_BITMAP_CL_SLAB_MASK; - slab2 = bmp->array2 + index2; - if (__rte_bitmap_line_not_empty(slab2)) { - return; - } - - /* The array2 cache line is all-zeros, so clear bit in array1 slab */ - index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); - offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; - slab1 = bmp->array1 + index1; - *slab1 &= ~(1llu << offset1); - - return; -} - -static inline int -__rte_bitmap_scan_search(struct rte_bitmap *bmp) -{ - uint64_t value1; - uint32_t i; - - /* Check current array1 slab */ - value1 = bmp->array1[bmp->index1]; - value1 &= __rte_bitmap_mask1_get(bmp); - - if (rte_bsf64_safe(value1, &bmp->offset1)) - return 1; - - __rte_bitmap_index1_inc(bmp); - bmp->offset1 = 0; - - /* Look for another array1 slab */ - for (i = 0; i < bmp->array1_size; i ++, __rte_bitmap_index1_inc(bmp)) { - value1 = bmp->array1[bmp->index1]; - - if (rte_bsf64_safe(value1, &bmp->offset1)) - return 1; - } - - return 0; -} - -static inline void -__rte_bitmap_scan_read_init(struct rte_bitmap *bmp) -{ - __rte_bitmap_index2_set(bmp); - bmp->go2 = 1; - rte_prefetch1((void *)(bmp->array2 + bmp->index2 + 8)); -} - -static inline int -__rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) -{ - uint64_t *slab2; - - slab2 = bmp->array2 + bmp->index2; - for ( ; bmp->go2 ; bmp->index2 ++, slab2 ++, bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) { - if (*slab2) { - *pos = bmp->index2 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2; - *slab = *slab2; - - bmp->index2 ++; - slab2 ++; - bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK; - return 1; - } - } - - return 0; -} - -/** - * Bitmap scan (with automatic wrap-around) - * - * @param bmp - * Handle to bitmap instance - * @param pos - * When function call returns 1, pos contains the position of the next set - * bit, otherwise not modified - * @param slab - * When function call returns 1, slab contains the value of the entire 64-bit - * slab where the bit indicated by pos is located. Slabs are always 64-bit - * aligned, so the position of the first bit of the slab (this bit is not - * necessarily set) is pos / 64. Once a slab has been returned by the bitmap - * scan operation, the internal pointers of the bitmap are updated to point - * after this slab, so the same slab will not be returned again if it - * contains more than one bit which is set. When function call returns 0, - * slab is not modified. - * @return - * 0 if there is no bit set in the bitmap, 1 otherwise - */ -static inline int -rte_bitmap_scan(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) -{ - /* Return data from current array2 line if available */ - if (__rte_bitmap_scan_read(bmp, pos, slab)) { - return 1; - } - - /* Look for non-empty array2 line */ - if (__rte_bitmap_scan_search(bmp)) { - __rte_bitmap_scan_read_init(bmp); - __rte_bitmap_scan_read(bmp, pos, slab); - return 1; - } - - /* Empty bitmap */ - return 0; -} - -#ifdef __cplusplus -} -#endif - -#endif /* __INCLUDE_RTE_BITMAP_H__ */ diff --git a/lib/librte_eal/common/include/rte_branch_prediction.h b/lib/librte_eal/common/include/rte_branch_prediction.h deleted file mode 100644 index 854ef9e5dd..0000000000 --- a/lib/librte_eal/common/include/rte_branch_prediction.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -/** - * @file - * Branch Prediction Helpers in RTE - */ - -#ifndef _RTE_BRANCH_PREDICTION_H_ -#define _RTE_BRANCH_PREDICTION_H_ - -/** - * Check if a branch is likely to be taken. - * - * This compiler builtin allows the developer to indicate if a branch is - * likely to be taken. Example: - * - * if (likely(x > 1)) - * do_stuff(); - * - */ -#ifndef likely -#define likely(x) __builtin_expect(!!(x), 1) -#endif /* likely */ - -/** - * Check if a branch is unlikely to be taken. - * - * This compiler builtin allows the developer to indicate if a branch is - * unlikely to be taken. Example: - * - * if (unlikely(x < 1)) - * do_stuff(); - * - */ -#ifndef unlikely -#define unlikely(x) __builtin_expect(!!(x), 0) -#endif /* unlikely */ - -#endif /* _RTE_BRANCH_PREDICTION_H_ */ diff --git a/lib/librte_eal/common/include/rte_bus.h b/lib/librte_eal/common/include/rte_bus.h deleted file mode 100644 index d3034d0edf..0000000000 --- a/lib/librte_eal/common/include/rte_bus.h +++ /dev/null @@ -1,389 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2016 NXP - */ - -#ifndef _RTE_BUS_H_ -#define _RTE_BUS_H_ - -/** - * @file - * - * DPDK device bus interface - * - * This file exposes API and interfaces for bus abstraction - * over the devices and drivers in EAL. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#include -#include - -/** Double linked list of buses */ -TAILQ_HEAD(rte_bus_list, rte_bus); - - -/** - * IOVA mapping mode. - * - * IOVA mapping mode is iommu programming mode of a device. - * That device (for example: IOMMU backed DMA device) based - * on rte_iova_mode will generate physical or virtual address. - * - */ -enum rte_iova_mode { - RTE_IOVA_DC = 0, /* Don't care mode */ - RTE_IOVA_PA = (1 << 0), /* DMA using physical address */ - RTE_IOVA_VA = (1 << 1) /* DMA using virtual address */ -}; - -/** - * Bus specific scan for devices attached on the bus. - * For each bus object, the scan would be responsible for finding devices and - * adding them to its private device list. - * - * A bus should mandatorily implement this method. - * - * @return - * 0 for successful scan - * <0 for unsuccessful scan with error value - */ -typedef int (*rte_bus_scan_t)(void); - -/** - * Implementation specific probe function which is responsible for linking - * devices on that bus with applicable drivers. - * - * This is called while iterating over each registered bus. - * - * @return - * 0 for successful probe - * !0 for any error while probing - */ -typedef int (*rte_bus_probe_t)(void); - -/** - * Device iterator to find a device on a bus. - * - * This function returns an rte_device if one of those held by the bus - * matches the data passed as parameter. - * - * If the comparison function returns zero this function should stop iterating - * over any more devices. To continue a search the device of a previous search - * can be passed via the start parameter. - * - * @param cmp - * Comparison function. - * - * @param data - * Data to compare each device against. - * - * @param start - * starting point for the iteration - * - * @return - * The first device matching the data, NULL if none exists. - */ -typedef struct rte_device * -(*rte_bus_find_device_t)(const struct rte_device *start, rte_dev_cmp_t cmp, - const void *data); - -/** - * Implementation specific probe function which is responsible for linking - * devices on that bus with applicable drivers. - * - * @param dev - * Device pointer that was returned by a previous call to find_device. - * - * @return - * 0 on success. - * !0 on error. - */ -typedef int (*rte_bus_plug_t)(struct rte_device *dev); - -/** - * Implementation specific remove function which is responsible for unlinking - * devices on that bus from assigned driver. - * - * @param dev - * Device pointer that was returned by a previous call to find_device. - * - * @return - * 0 on success. - * !0 on error. - */ -typedef int (*rte_bus_unplug_t)(struct rte_device *dev); - -/** - * Bus specific parsing function. - * Validates the syntax used in the textual representation of a device, - * If the syntax is valid and ``addr`` is not NULL, writes the bus-specific - * device representation to ``addr``. - * - * @param[in] name - * device textual description - * - * @param[out] addr - * device information location address, into which parsed info - * should be written. If NULL, nothing should be written, which - * is not an error. - * - * @return - * 0 if parsing was successful. - * !0 for any error. - */ -typedef int (*rte_bus_parse_t)(const char *name, void *addr); - -/** - * Device level DMA map function. - * After a successful call, the memory segment will be mapped to the - * given device. - * - * @param dev - * Device pointer. - * @param addr - * Virtual address to map. - * @param iova - * IOVA address to map. - * @param len - * Length of the memory segment being mapped. - * - * @return - * 0 if mapping was successful. - * Negative value and rte_errno is set otherwise. - */ -typedef int (*rte_dev_dma_map_t)(struct rte_device *dev, void *addr, - uint64_t iova, size_t len); - -/** - * Device level DMA unmap function. - * After a successful call, the memory segment will no longer be - * accessible by the given device. - * - * @param dev - * Device pointer. - * @param addr - * Virtual address to unmap. - * @param iova - * IOVA address to unmap. - * @param len - * Length of the memory segment being mapped. - * - * @return - * 0 if un-mapping was successful. - * Negative value and rte_errno is set otherwise. - */ -typedef int (*rte_dev_dma_unmap_t)(struct rte_device *dev, void *addr, - uint64_t iova, size_t len); - -/** - * Implement a specific hot-unplug handler, which is responsible for - * handle the failure when device be hot-unplugged. When the event of - * hot-unplug be detected, it could call this function to handle - * the hot-unplug failure and avoid app crash. - * @param dev - * Pointer of the device structure. - * - * @return - * 0 on success. - * !0 on error. - */ -typedef int (*rte_bus_hot_unplug_handler_t)(struct rte_device *dev); - -/** - * Implement a specific sigbus handler, which is responsible for handling - * the sigbus error which is either original memory error, or specific memory - * error that caused of device be hot-unplugged. When sigbus error be captured, - * it could call this function to handle sigbus error. - * @param failure_addr - * Pointer of the fault address of the sigbus error. - * - * @return - * 0 for success handle the sigbus for hot-unplug. - * 1 for not process it, because it is a generic sigbus error. - * -1 for failed to handle the sigbus for hot-unplug. - */ -typedef int (*rte_bus_sigbus_handler_t)(const void *failure_addr); - -/** - * Bus scan policies - */ -enum rte_bus_scan_mode { - RTE_BUS_SCAN_UNDEFINED, - RTE_BUS_SCAN_WHITELIST, - RTE_BUS_SCAN_BLACKLIST, -}; - -/** - * A structure used to configure bus operations. - */ -struct rte_bus_conf { - enum rte_bus_scan_mode scan_mode; /**< Scan policy. */ -}; - - -/** - * Get common iommu class of the all the devices on the bus. The bus may - * check that those devices are attached to iommu driver. - * If no devices are attached to the bus. The bus may return with don't care - * (_DC) value. - * Otherwise, The bus will return appropriate _pa or _va iova mode. - * - * @return - * enum rte_iova_mode value. - */ -typedef enum rte_iova_mode (*rte_bus_get_iommu_class_t)(void); - - -/** - * A structure describing a generic bus. - */ -struct rte_bus { - TAILQ_ENTRY(rte_bus) next; /**< Next bus object in linked list */ - const char *name; /**< Name of the bus */ - rte_bus_scan_t scan; /**< Scan for devices attached to bus */ - rte_bus_probe_t probe; /**< Probe devices on bus */ - rte_bus_find_device_t find_device; /**< Find a device on the bus */ - rte_bus_plug_t plug; /**< Probe single device for drivers */ - rte_bus_unplug_t unplug; /**< Remove single device from driver */ - rte_bus_parse_t parse; /**< Parse a device name */ - rte_dev_dma_map_t dma_map; /**< DMA map for device in the bus */ - rte_dev_dma_unmap_t dma_unmap; /**< DMA unmap for device in the bus */ - struct rte_bus_conf conf; /**< Bus configuration */ - rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */ - rte_dev_iterate_t dev_iterate; /**< Device iterator. */ - rte_bus_hot_unplug_handler_t hot_unplug_handler; - /**< handle hot-unplug failure on the bus */ - rte_bus_sigbus_handler_t sigbus_handler; - /**< handle sigbus error on the bus */ - -}; - -/** - * Register a Bus handler. - * - * @param bus - * A pointer to a rte_bus structure describing the bus - * to be registered. - */ -void rte_bus_register(struct rte_bus *bus); - -/** - * Unregister a Bus handler. - * - * @param bus - * A pointer to a rte_bus structure describing the bus - * to be unregistered. - */ -void rte_bus_unregister(struct rte_bus *bus); - -/** - * Scan all the buses. - * - * @return - * 0 in case of success in scanning all buses - * !0 in case of failure to scan - */ -int rte_bus_scan(void); - -/** - * For each device on the buses, perform a driver 'match' and call the - * driver-specific probe for device initialization. - * - * @return - * 0 for successful match/probe - * !0 otherwise - */ -int rte_bus_probe(void); - -/** - * Dump information of all the buses registered with EAL. - * - * @param f - * A valid and open output stream handle - */ -void rte_bus_dump(FILE *f); - -/** - * Bus comparison function. - * - * @param bus - * Bus under test. - * - * @param data - * Data to compare against. - * - * @return - * 0 if the bus matches the data. - * !0 if the bus does not match. - * <0 if ordering is possible and the bus is lower than the data. - * >0 if ordering is possible and the bus is greater than the data. - */ -typedef int (*rte_bus_cmp_t)(const struct rte_bus *bus, const void *data); - -/** - * Bus iterator to find a particular bus. - * - * This function compares each registered bus to find one that matches - * the data passed as parameter. - * - * If the comparison function returns zero this function will stop iterating - * over any more buses. To continue a search the bus of a previous search can - * be passed via the start parameter. - * - * @param start - * Starting point for the iteration. - * - * @param cmp - * Comparison function. - * - * @param data - * Data to pass to comparison function. - * - * @return - * A pointer to a rte_bus structure or NULL in case no bus matches - */ -struct rte_bus *rte_bus_find(const struct rte_bus *start, rte_bus_cmp_t cmp, - const void *data); - -/** - * Find the registered bus for a particular device. - */ -struct rte_bus *rte_bus_find_by_device(const struct rte_device *dev); - -/** - * Find the registered bus for a given name. - */ -struct rte_bus *rte_bus_find_by_name(const char *busname); - - -/** - * Get the common iommu class of devices bound on to buses available in the - * system. RTE_IOVA_DC means that no preference has been expressed. - * - * @return - * enum rte_iova_mode value. - */ -enum rte_iova_mode rte_bus_get_iommu_class(void); - -/** - * Helper for Bus registration. - * The constructor has higher priority than PMD constructors. - */ -#define RTE_REGISTER_BUS(nm, bus) \ -RTE_INIT_PRIO(businitfn_ ##nm, BUS) \ -{\ - (bus).name = RTE_STR(nm);\ - rte_bus_register(&bus); \ -} - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_BUS_H */ diff --git a/lib/librte_eal/common/include/rte_class.h b/lib/librte_eal/common/include/rte_class.h deleted file mode 100644 index 856d09b22d..0000000000 --- a/lib/librte_eal/common/include/rte_class.h +++ /dev/null @@ -1,134 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018 Gaëtan Rivet - */ - -#ifndef _RTE_CLASS_H_ -#define _RTE_CLASS_H_ - -/** - * @file - * - * DPDK device class interface. - * - * This file describes the interface of the device class - * abstraction layer. - * - * A device class defines the type of function a device - * will be used for e.g.: Ethernet adapter (eth), - * cryptographic co-processor (crypto), etc. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#include - -/** Double linked list of classes */ -TAILQ_HEAD(rte_class_list, rte_class); - -/** - * A structure describing a generic device class. - */ -struct rte_class { - TAILQ_ENTRY(rte_class) next; /**< Next device class in linked list */ - const char *name; /**< Name of the class */ - rte_dev_iterate_t dev_iterate; /**< Device iterator. */ -}; - -/** - * Class comparison function. - * - * @param cls - * Class under test. - * - * @param data - * Data to compare against. - * - * @return - * 0 if the class matches the data. - * !0 if the class does not match. - * <0 if ordering is possible and the class is lower than the data. - * >0 if ordering is possible and the class is greater than the data. - */ -typedef int (*rte_class_cmp_t)(const struct rte_class *cls, const void *data); - -/** - * Class iterator to find a particular class. - * - * This function compares each registered class to find one that matches - * the data passed as parameter. - * - * If the comparison function returns zero this function will stop iterating - * over any more classes. To continue a search the class of a previous search - * can be passed via the start parameter. - * - * @param start - * Starting point for the iteration. - * - * @param cmp - * Comparison function. - * - * @param data - * Data to pass to comparison function. - * - * @return - * A pointer to a rte_class structure or NULL in case no class matches - */ -__rte_experimental -struct rte_class * -rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp, - const void *data); - -/** - * Find the registered class for a given name. - */ -__rte_experimental -struct rte_class * -rte_class_find_by_name(const char *name); - -/** - * Register a Class handle. - * - * @param cls - * A pointer to a rte_class structure describing the class - * to be registered. - */ -__rte_experimental -void rte_class_register(struct rte_class *cls); - -/** - * Unregister a Class handle. - * - * @param cls - * A pointer to a rte_class structure describing the class - * to be unregistered. - */ -__rte_experimental -void rte_class_unregister(struct rte_class *cls); - -/** - * Helper for Class registration. - * The constructor has lower priority than Bus constructors. - * The constructor has higher priority than PMD constructors. - */ -#define RTE_REGISTER_CLASS(nm, cls) \ -RTE_INIT_PRIO(classinitfn_ ##nm, CLASS) \ -{\ - (cls).name = RTE_STR(nm); \ - rte_class_register(&cls); \ -} - -#define RTE_UNREGISTER_CLASS(nm, cls) \ -RTE_FINI_PRIO(classfinifn_ ##nm, CLASS) \ -{ \ - rte_class_unregister(&cls); \ -} - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_CLASS_H_ */ diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h deleted file mode 100644 index f820c2eae2..0000000000 --- a/lib/librte_eal/common/include/rte_common.h +++ /dev/null @@ -1,823 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2019 Intel Corporation - */ - -#ifndef _RTE_COMMON_H_ -#define _RTE_COMMON_H_ - -/** - * @file - * - * Generic, commonly-used macro and inline function definitions - * for DPDK. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include -#include - -#include - -/* OS specific include */ -#include - -#ifndef typeof -#define typeof __typeof__ -#endif - -#ifndef asm -#define asm __asm__ -#endif - -/** C extension macro for environments lacking C11 features. */ -#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L -#define RTE_STD_C11 __extension__ -#else -#define RTE_STD_C11 -#endif - -/* - * RTE_TOOLCHAIN_GCC is defined if the target is built with GCC, - * while a host application (like pmdinfogen) may have another compiler. - * RTE_CC_IS_GNU is true if the file is compiled with GCC, - * no matter it is a target or host application. - */ -#define RTE_CC_IS_GNU 0 -#if defined __clang__ -#define RTE_CC_CLANG -#elif defined __INTEL_COMPILER -#define RTE_CC_ICC -#elif defined __GNUC__ -#define RTE_CC_GCC -#undef RTE_CC_IS_GNU -#define RTE_CC_IS_GNU 1 -#endif -#if RTE_CC_IS_GNU -#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \ - __GNUC_PATCHLEVEL__) -#endif - -#ifdef RTE_ARCH_STRICT_ALIGN -typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1))); -typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1))); -typedef uint16_t unaligned_uint16_t __attribute__ ((aligned(1))); -#else -typedef uint64_t unaligned_uint64_t; -typedef uint32_t unaligned_uint32_t; -typedef uint16_t unaligned_uint16_t; -#endif - -/** - * Force alignment - */ -#define __rte_aligned(a) __attribute__((__aligned__(a))) - -/** - * Force a structure to be packed - */ -#define __rte_packed __attribute__((__packed__)) - -/******* Macro to mark functions and fields scheduled for removal *****/ -#define __rte_deprecated __attribute__((__deprecated__)) - -/** - * Mark a function or variable to a weak reference. - */ -#define __rte_weak __attribute__((__weak__)) - -/*********** Macros to eliminate unused variable warnings ********/ - -/** - * short definition to mark a function parameter unused - */ -#define __rte_unused __attribute__((__unused__)) - -/** - * definition to mark a variable or function parameter as used so - * as to avoid a compiler warning - */ -#define RTE_SET_USED(x) (void)(x) - -/** - * Check format string and its arguments at compile-time. - * - * GCC on Windows assumes MS-specific format string by default, - * even if the underlying stdio implementation is ANSI-compliant, - * so this must be overridden. - */ -#if RTE_CC_IS_GNU -#define __rte_format_printf(format_index, first_arg) \ - __attribute__((format(gnu_printf, format_index, first_arg))) -#else -#define __rte_format_printf(format_index, first_arg) \ - __attribute__((format(printf, format_index, first_arg))) -#endif - -#define RTE_PRIORITY_LOG 101 -#define RTE_PRIORITY_BUS 110 -#define RTE_PRIORITY_CLASS 120 -#define RTE_PRIORITY_LAST 65535 - -#define RTE_PRIO(prio) \ - RTE_PRIORITY_ ## prio - -/** - * Run function before main() with high priority. - * - * @param func - * Constructor function. - * @param prio - * Priority number must be above 100. - * Lowest number is the first to run. - */ -#ifndef RTE_INIT_PRIO /* Allow to override from EAL */ -#define RTE_INIT_PRIO(func, prio) \ -static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void) -#endif - -/** - * Run function before main() with low priority. - * - * The constructor will be run after prioritized constructors. - * - * @param func - * Constructor function. - */ -#define RTE_INIT(func) \ - RTE_INIT_PRIO(func, LAST) - -/** - * Run after main() with low priority. - * - * @param func - * Destructor function name. - * @param prio - * Priority number must be above 100. - * Lowest number is the last to run. - */ -#ifndef RTE_FINI_PRIO /* Allow to override from EAL */ -#define RTE_FINI_PRIO(func, prio) \ -static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) -#endif - -/** - * Run after main() with high priority. - * - * The destructor will be run *before* prioritized destructors. - * - * @param func - * Destructor function name. - */ -#define RTE_FINI(func) \ - RTE_FINI_PRIO(func, LAST) - -/** - * Force a function to be inlined - */ -#define __rte_always_inline inline __attribute__((always_inline)) - -/** - * Force a function to be noinlined - */ -#define __rte_noinline __attribute__((noinline)) - -/*********** Macros for pointer arithmetic ********/ - -/** - * add a byte-value offset to a pointer - */ -#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x))) - -/** - * subtract a byte-value offset from a pointer - */ -#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x))) - -/** - * get the difference between two pointer values, i.e. how far apart - * in bytes are the locations they point two. It is assumed that - * ptr1 is greater than ptr2. - */ -#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2)) - -/** - * Workaround to cast a const field of a structure to non-const type. - */ -#define RTE_CAST_FIELD(var, field, type) \ - (*(type *)((uintptr_t)(var) + offsetof(typeof(*(var)), field))) - -/*********** Macros/static functions for doing alignment ********/ - - -/** - * Macro to align a pointer to a given power-of-two. The resultant - * pointer will be a pointer of the same type as the first parameter, and - * point to an address no higher than the first parameter. Second parameter - * must be a power-of-two value. - */ -#define RTE_PTR_ALIGN_FLOOR(ptr, align) \ - ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align)) - -/** - * Macro to align a value to a given power-of-two. The resultant value - * will be of the same type as the first parameter, and will be no - * bigger than the first parameter. Second parameter must be a - * power-of-two value. - */ -#define RTE_ALIGN_FLOOR(val, align) \ - (typeof(val))((val) & (~((typeof(val))((align) - 1)))) - -/** - * Macro to align a pointer to a given power-of-two. The resultant - * pointer will be a pointer of the same type as the first parameter, and - * point to an address no lower than the first parameter. Second parameter - * must be a power-of-two value. - */ -#define RTE_PTR_ALIGN_CEIL(ptr, align) \ - RTE_PTR_ALIGN_FLOOR((typeof(ptr))RTE_PTR_ADD(ptr, (align) - 1), align) - -/** - * Macro to align a value to a given power-of-two. The resultant value - * will be of the same type as the first parameter, and will be no lower - * than the first parameter. Second parameter must be a power-of-two - * value. - */ -#define RTE_ALIGN_CEIL(val, align) \ - RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align) - -/** - * Macro to align a pointer to a given power-of-two. The resultant - * pointer will be a pointer of the same type as the first parameter, and - * point to an address no lower than the first parameter. Second parameter - * must be a power-of-two value. - * This function is the same as RTE_PTR_ALIGN_CEIL - */ -#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align) - -/** - * Macro to align a value to a given power-of-two. The resultant - * value will be of the same type as the first parameter, and - * will be no lower than the first parameter. Second parameter - * must be a power-of-two value. - * This function is the same as RTE_ALIGN_CEIL - */ -#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align) - -/** - * Macro to align a value to the multiple of given value. The resultant - * value will be of the same type as the first parameter and will be no lower - * than the first parameter. - */ -#define RTE_ALIGN_MUL_CEIL(v, mul) \ - (((v + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul)) - -/** - * Macro to align a value to the multiple of given value. The resultant - * value will be of the same type as the first parameter and will be no higher - * than the first parameter. - */ -#define RTE_ALIGN_MUL_FLOOR(v, mul) \ - ((v / ((typeof(v))(mul))) * (typeof(v))(mul)) - -/** - * Macro to align value to the nearest multiple of the given value. - * The resultant value might be greater than or less than the first parameter - * whichever difference is the lowest. - */ -#define RTE_ALIGN_MUL_NEAR(v, mul) \ - ({ \ - typeof(v) ceil = RTE_ALIGN_MUL_CEIL(v, mul); \ - typeof(v) floor = RTE_ALIGN_MUL_FLOOR(v, mul); \ - (ceil - v) > (v - floor) ? floor : ceil; \ - }) - -/** - * Checks if a pointer is aligned to a given power-of-two value - * - * @param ptr - * The pointer whose alignment is to be checked - * @param align - * The power-of-two value to which the ptr should be aligned - * - * @return - * True(1) where the pointer is correctly aligned, false(0) otherwise - */ -static inline int -rte_is_aligned(void *ptr, unsigned align) -{ - return RTE_PTR_ALIGN(ptr, align) == ptr; -} - -/*********** Macros for compile type checks ********/ - -/** - * Triggers an error at compilation time if the condition is true. - */ -#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) - -/*********** Cache line related macros ********/ - -/** Cache line mask. */ -#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) - -/** Return the first cache-aligned value greater or equal to size. */ -#define RTE_CACHE_LINE_ROUNDUP(size) \ - (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / \ - RTE_CACHE_LINE_SIZE)) - -/** Cache line size in terms of log2 */ -#if RTE_CACHE_LINE_SIZE == 64 -#define RTE_CACHE_LINE_SIZE_LOG2 6 -#elif RTE_CACHE_LINE_SIZE == 128 -#define RTE_CACHE_LINE_SIZE_LOG2 7 -#else -#error "Unsupported cache line size" -#endif - -/** Minimum Cache line size. */ -#define RTE_CACHE_LINE_MIN_SIZE 64 - -/** Force alignment to cache line. */ -#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) - -/** Force minimum cache line alignment. */ -#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) - -/*********** PA/IOVA type definitions ********/ - -/** Physical address */ -typedef uint64_t phys_addr_t; -#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) - -/** - * IO virtual address type. - * When the physical addressing mode (IOVA as PA) is in use, - * the translation from an IO virtual address (IOVA) to a physical address - * is a direct mapping, i.e. the same value. - * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation. - */ -typedef uint64_t rte_iova_t; -#define RTE_BAD_IOVA ((rte_iova_t)-1) - -/*********** Structure alignment markers ********/ - -/** Generic marker for any place in a structure. */ -__extension__ typedef void *RTE_MARKER[0]; -/** Marker for 1B alignment in a structure. */ -__extension__ typedef uint8_t RTE_MARKER8[0]; -/** Marker for 2B alignment in a structure. */ -__extension__ typedef uint16_t RTE_MARKER16[0]; -/** Marker for 4B alignment in a structure. */ -__extension__ typedef uint32_t RTE_MARKER32[0]; -/** Marker for 8B alignment in a structure. */ -__extension__ typedef uint64_t RTE_MARKER64[0]; - -/** - * Combines 32b inputs most significant set bits into the least - * significant bits to construct a value with the same MSBs as x - * but all 1's under it. - * - * @param x - * The integer whose MSBs need to be combined with its LSBs - * @return - * The combined value. - */ -static inline uint32_t -rte_combine32ms1b(register uint32_t x) -{ - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - - return x; -} - -/** - * Combines 64b inputs most significant set bits into the least - * significant bits to construct a value with the same MSBs as x - * but all 1's under it. - * - * @param v - * The integer whose MSBs need to be combined with its LSBs - * @return - * The combined value. - */ -static inline uint64_t -rte_combine64ms1b(register uint64_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - - return v; -} - -/*********** Macros to work with powers of 2 ********/ - -/** - * Macro to return 1 if n is a power of 2, 0 otherwise - */ -#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n))) - -/** - * Returns true if n is a power of 2 - * @param n - * Number to check - * @return 1 if true, 0 otherwise - */ -static inline int -rte_is_power_of_2(uint32_t n) -{ - return n && !(n & (n - 1)); -} - -/** - * Aligns input parameter to the next power of 2 - * - * @param x - * The integer value to align - * - * @return - * Input parameter aligned to the next power of 2 - */ -static inline uint32_t -rte_align32pow2(uint32_t x) -{ - x--; - x = rte_combine32ms1b(x); - - return x + 1; -} - -/** - * Aligns input parameter to the previous power of 2 - * - * @param x - * The integer value to align - * - * @return - * Input parameter aligned to the previous power of 2 - */ -static inline uint32_t -rte_align32prevpow2(uint32_t x) -{ - x = rte_combine32ms1b(x); - - return x - (x >> 1); -} - -/** - * Aligns 64b input parameter to the next power of 2 - * - * @param v - * The 64b value to align - * - * @return - * Input parameter aligned to the next power of 2 - */ -static inline uint64_t -rte_align64pow2(uint64_t v) -{ - v--; - v = rte_combine64ms1b(v); - - return v + 1; -} - -/** - * Aligns 64b input parameter to the previous power of 2 - * - * @param v - * The 64b value to align - * - * @return - * Input parameter aligned to the previous power of 2 - */ -static inline uint64_t -rte_align64prevpow2(uint64_t v) -{ - v = rte_combine64ms1b(v); - - return v - (v >> 1); -} - -/*********** Macros for calculating min and max **********/ - -/** - * Macro to return the minimum of two numbers - */ -#define RTE_MIN(a, b) \ - __extension__ ({ \ - typeof (a) _a = (a); \ - typeof (b) _b = (b); \ - _a < _b ? _a : _b; \ - }) - -/** - * Macro to return the maximum of two numbers - */ -#define RTE_MAX(a, b) \ - __extension__ ({ \ - typeof (a) _a = (a); \ - typeof (b) _b = (b); \ - _a > _b ? _a : _b; \ - }) - -/*********** Other general functions / macros ********/ - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). - * If a least significant 1 bit is found, its bit index is returned. - * If the content of the input parameter is zero, then the content of the return - * value is undefined. - * @param v - * input parameter, should not be zero. - * @return - * least significant set bit in the input parameter. - */ -static inline uint32_t -rte_bsf32(uint32_t v) -{ - return (uint32_t)__builtin_ctz(v); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). Safe version (checks for input parameter being zero). - * - * @warning ``pos`` must be a valid pointer. It is not checked! - * - * @param v - * The input parameter. - * @param pos - * If ``v`` was not 0, this value will contain position of least significant - * bit within the input parameter. - * @return - * Returns 0 if ``v`` was 0, otherwise returns 1. - */ -static inline int -rte_bsf32_safe(uint64_t v, uint32_t *pos) -{ - if (v == 0) - return 0; - - *pos = rte_bsf32(v); - return 1; -} - -/** - * Return the rounded-up log2 of a integer. - * - * @note Contrary to the logarithm mathematical operation, - * rte_log2_u32(0) == 0 and not -inf. - * - * @param v - * The input parameter. - * @return - * The rounded-up log2 of the input, or 0 if the input is 0. - */ -static inline uint32_t -rte_log2_u32(uint32_t v) -{ - if (v == 0) - return 0; - v = rte_align32pow2(v); - return rte_bsf32(v); -} - - -/** - * Return the last (most-significant) bit set. - * - * @note The last (most significant) bit is at position 32. - * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32 - * - * @param x - * The input parameter. - * @return - * The last (most-significant) bit set, or 0 if the input is 0. - */ -static inline int -rte_fls_u32(uint32_t x) -{ - return (x == 0) ? 0 : 32 - __builtin_clz(x); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). - * If a least significant 1 bit is found, its bit index is returned. - * If the content of the input parameter is zero, then the content of the return - * value is undefined. - * @param v - * input parameter, should not be zero. - * @return - * least significant set bit in the input parameter. - */ -static inline int -rte_bsf64(uint64_t v) -{ - return (uint32_t)__builtin_ctzll(v); -} - -/** - * Searches the input parameter for the least significant set bit - * (starting from zero). Safe version (checks for input parameter being zero). - * - * @warning ``pos`` must be a valid pointer. It is not checked! - * - * @param v - * The input parameter. - * @param pos - * If ``v`` was not 0, this value will contain position of least significant - * bit within the input parameter. - * @return - * Returns 0 if ``v`` was 0, otherwise returns 1. - */ -static inline int -rte_bsf64_safe(uint64_t v, uint32_t *pos) -{ - if (v == 0) - return 0; - - *pos = rte_bsf64(v); - return 1; -} - -/** - * Return the last (most-significant) bit set. - * - * @note The last (most significant) bit is at position 64. - * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1, - * rte_fls_u64(0x8000000000000000) = 64 - * - * @param x - * The input parameter. - * @return - * The last (most-significant) bit set, or 0 if the input is 0. - */ -static inline int -rte_fls_u64(uint64_t x) -{ - return (x == 0) ? 0 : 64 - __builtin_clzll(x); -} - -/** - * Return the rounded-up log2 of a 64-bit integer. - * - * @note Contrary to the logarithm mathematical operation, - * rte_log2_u64(0) == 0 and not -inf. - * - * @param v - * The input parameter. - * @return - * The rounded-up log2 of the input, or 0 if the input is 0. - */ -static inline uint32_t -rte_log2_u64(uint64_t v) -{ - if (v == 0) - return 0; - v = rte_align64pow2(v); - /* we checked for v being 0 already, so no undefined behavior */ - return rte_bsf64(v); -} - -#ifndef offsetof -/** Return the offset of a field in a structure. */ -#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) -#endif - -/** - * Return pointer to the wrapping struct instance. - * - * Example: - * - * struct wrapper { - * ... - * struct child c; - * ... - * }; - * - * struct child *x = obtain(...); - * struct wrapper *w = container_of(x, struct wrapper, c); - */ -#ifndef container_of -#define container_of(ptr, type, member) __extension__ ({ \ - const typeof(((type *)0)->member) *_ptr = (ptr); \ - __attribute__((unused)) type *_target_ptr = \ - (type *)(ptr); \ - (type *)(((uintptr_t)_ptr) - offsetof(type, member)); \ - }) -#endif - -/** - * Get the size of a field in a structure. - * - * @param type - * The type of the structure. - * @param field - * The field in the structure. - * @return - * The size of the field in the structure, in bytes. - */ -#define RTE_SIZEOF_FIELD(type, field) (sizeof(((type *)0)->field)) - -#define _RTE_STR(x) #x -/** Take a macro value and get a string version of it */ -#define RTE_STR(x) _RTE_STR(x) - -/** - * ISO C helpers to modify format strings using variadic macros. - * This is a replacement for the ", ## __VA_ARGS__" GNU extension. - * An empty %s argument is appended to avoid a dangling comma. - */ -#define RTE_FMT(fmt, ...) fmt "%.0s", __VA_ARGS__ "" -#define RTE_FMT_HEAD(fmt, ...) fmt -#define RTE_FMT_TAIL(fmt, ...) __VA_ARGS__ - -/** Mask value of type "tp" for the first "ln" bit set. */ -#define RTE_LEN2MASK(ln, tp) \ - ((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln)))) - -/** Number of elements in the array. */ -#define RTE_DIM(a) (sizeof (a) / sizeof ((a)[0])) - -/** - * Converts a numeric string to the equivalent uint64_t value. - * As well as straight number conversion, also recognises the suffixes - * k, m and g for kilobytes, megabytes and gigabytes respectively. - * - * If a negative number is passed in i.e. a string with the first non-black - * character being "-", zero is returned. Zero is also returned in the case of - * an error with the strtoull call in the function. - * - * @param str - * String containing number to convert. - * @return - * Number. - */ -static inline uint64_t -rte_str_to_size(const char *str) -{ - char *endptr; - unsigned long long size; - - while (isspace((int)*str)) - str++; - if (*str == '-') - return 0; - - errno = 0; - size = strtoull(str, &endptr, 0); - if (errno) - return 0; - - if (*endptr == ' ') - endptr++; /* allow 1 space gap */ - - switch (*endptr){ - case 'G': case 'g': size *= 1024; /* fall-through */ - case 'M': case 'm': size *= 1024; /* fall-through */ - case 'K': case 'k': size *= 1024; /* fall-through */ - default: - break; - } - return size; -} - -/** - * Function to terminate the application immediately, printing an error - * message and returning the exit_code back to the shell. - * - * This function never returns - * - * @param exit_code - * The exit code to be returned by the application - * @param format - * The format string to be used for printing the message. This can include - * printf format characters which will be expanded using any further parameters - * to the function. - */ -void -rte_exit(int exit_code, const char *format, ...) - __attribute__((noreturn)) - __rte_format_printf(2, 3); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lib/librte_eal/common/include/rte_compat.h b/lib/librte_eal/common/include/rte_compat.h deleted file mode 100644 index 3eb33784b3..0000000000 --- a/lib/librte_eal/common/include/rte_compat.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015 Neil Horman . - * All rights reserved. - */ - -#ifndef _RTE_COMPAT_H_ -#define _RTE_COMPAT_H_ - -#ifndef ALLOW_EXPERIMENTAL_API - -#define __rte_experimental \ -__attribute__((deprecated("Symbol is not yet part of stable ABI"), \ -section(".text.experimental"))) - -#else - -#define __rte_experimental \ -__attribute__((section(".text.experimental"))) - -#endif - -#endif /* _RTE_COMPAT_H_ */ diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h deleted file mode 100644 index 7edd4b89cc..0000000000 --- a/lib/librte_eal/common/include/rte_debug.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_DEBUG_H_ -#define _RTE_DEBUG_H_ - -/** - * @file - * - * Debug Functions in RTE - * - * This file defines a generic API for debug operations. Part of - * the implementation is architecture-specific. - */ - -#include "rte_log.h" -#include "rte_branch_prediction.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Dump the stack of the calling core to the console. - */ -void rte_dump_stack(void); - -/** - * Dump the registers of the calling core to the console. - * - * Note: Not implemented in a userapp environment; use gdb instead. - */ -void rte_dump_registers(void); - -/** - * Provide notification of a critical non-recoverable error and terminate - * execution abnormally. - * - * Display the format string and its expanded arguments (printf-like). - * - * In a linux environment, this function dumps the stack and calls - * abort() resulting in a core dump if enabled. - * - * The function never returns. - * - * @param ... - * The format string, followed by the variable list of arguments. - */ -#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy") -#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__) - -#ifdef RTE_ENABLE_ASSERT -#define RTE_ASSERT(exp) RTE_VERIFY(exp) -#else -#define RTE_ASSERT(exp) do {} while (0) -#endif -#define RTE_VERIFY(exp) do { \ - if (unlikely(!(exp))) \ - rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \ -} while (0) - -/* - * Provide notification of a critical non-recoverable error and stop. - * - * This function should not be called directly. Refer to rte_panic() macro - * documentation. - */ -void __rte_panic(const char *funcname , const char *format, ...) -#ifdef __GNUC__ -#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) - __attribute__((cold)) -#endif -#endif - __attribute__((noreturn)) - __rte_format_printf(2, 3); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_DEBUG_H_ */ diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h deleted file mode 100644 index a5c35f00c0..0000000000 --- a/lib/librte_eal/common/include/rte_dev.h +++ /dev/null @@ -1,518 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014 6WIND S.A. - */ - -#ifndef _RTE_DEV_H_ -#define _RTE_DEV_H_ - -/** - * @file - * - * RTE PMD Driver Registration Interface - * - * This file manages the list of device drivers. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#include -#include -#include - -/** - * The device event type. - */ -enum rte_dev_event_type { - RTE_DEV_EVENT_ADD, /**< device being added */ - RTE_DEV_EVENT_REMOVE, /**< device being removed */ - RTE_DEV_EVENT_MAX /**< max value of this enum */ -}; - -struct rte_dev_event { - enum rte_dev_event_type type; /**< device event type */ - int subsystem; /**< subsystem id */ - char *devname; /**< device name */ -}; - -typedef void (*rte_dev_event_cb_fn)(const char *device_name, - enum rte_dev_event_type event, - void *cb_arg); - -/* Macros to check for invalid function pointers */ -#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \ - if ((func) == NULL) \ - return retval; \ -} while (0) - -#define RTE_FUNC_PTR_OR_RET(func) do { \ - if ((func) == NULL) \ - return; \ -} while (0) - -/** - * Device driver. - */ -enum rte_kernel_driver { - RTE_KDRV_UNKNOWN = 0, - RTE_KDRV_IGB_UIO, - RTE_KDRV_VFIO, - RTE_KDRV_UIO_GENERIC, - RTE_KDRV_NIC_UIO, - RTE_KDRV_NONE, -}; - -/** - * Device policies. - */ -enum rte_dev_policy { - RTE_DEV_WHITELISTED, - RTE_DEV_BLACKLISTED, -}; - -/** - * A generic memory resource representation. - */ -struct rte_mem_resource { - uint64_t phys_addr; /**< Physical address, 0 if not resource. */ - uint64_t len; /**< Length of the resource. */ - void *addr; /**< Virtual address, NULL when not mapped. */ -}; - -/** - * A structure describing a device driver. - */ -struct rte_driver { - TAILQ_ENTRY(rte_driver) next; /**< Next in list. */ - const char *name; /**< Driver name. */ - const char *alias; /**< Driver alias. */ -}; - -/* - * Internal identifier length - * Sufficiently large to allow for UUID or PCI address - */ -#define RTE_DEV_NAME_MAX_LEN 64 - -/** - * A structure describing a generic device. - */ -struct rte_device { - TAILQ_ENTRY(rte_device) next; /**< Next device */ - const char *name; /**< Device name */ - const struct rte_driver *driver; /**< Driver assigned after probing */ - const struct rte_bus *bus; /**< Bus handle assigned on scan */ - int numa_node; /**< NUMA node connection */ - struct rte_devargs *devargs; /**< Arguments for latest probing */ -}; - -/** - * Query status of a device. - * - * @param dev - * Generic device pointer. - * @return - * (int)true if already probed successfully, 0 otherwise. - */ -int rte_dev_is_probed(const struct rte_device *dev); - -/** - * Hotplug add a given device to a specific bus. - * - * In multi-process, it will request other processes to add the same device. - * A failure, in any process, will rollback the action - * - * @param busname - * The bus name the device is added to. - * @param devname - * The device name. Based on this device name, eal will identify a driver - * capable of handling it and pass it to the driver probing function. - * @param drvargs - * Device arguments to be passed to the driver. - * @return - * 0 on success, negative on error. - */ -int rte_eal_hotplug_add(const char *busname, const char *devname, - const char *drvargs); - -/** - * Add matching devices. - * - * In multi-process, it will request other processes to add the same device. - * A failure, in any process, will rollback the action - * - * @param devargs - * Device arguments including bus, class and driver properties. - * @return - * 0 on success, negative on error. - */ -int rte_dev_probe(const char *devargs); - -/** - * Hotplug remove a given device from a specific bus. - * - * In multi-process, it will request other processes to remove the same device. - * A failure, in any process, will rollback the action - * - * @param busname - * The bus name the device is removed from. - * @param devname - * The device name being removed. - * @return - * 0 on success, negative on error. - */ -int rte_eal_hotplug_remove(const char *busname, const char *devname); - -/** - * Remove one device. - * - * In multi-process, it will request other processes to remove the same device. - * A failure, in any process, will rollback the action - * - * @param dev - * Data structure of the device to remove. - * @return - * 0 on success, negative on error. - */ -int rte_dev_remove(struct rte_device *dev); - -/** - * Device comparison function. - * - * This type of function is used to compare an rte_device with arbitrary - * data. - * - * @param dev - * Device handle. - * - * @param data - * Data to compare against. The type of this parameter is determined by - * the kind of comparison performed by the function. - * - * @return - * 0 if the device matches the data. - * !0 if the device does not match. - * <0 if ordering is possible and the device is lower than the data. - * >0 if ordering is possible and the device is greater than the data. - */ -typedef int (*rte_dev_cmp_t)(const struct rte_device *dev, const void *data); - -#define RTE_PMD_EXPORT_NAME_ARRAY(n, idx) n##idx[] - -#define RTE_PMD_EXPORT_NAME(name, idx) \ -static const char RTE_PMD_EXPORT_NAME_ARRAY(this_pmd_name, idx) \ -__attribute__((used)) = RTE_STR(name) - -#define DRV_EXP_TAG(name, tag) __##name##_##tag - -#define RTE_PMD_REGISTER_PCI_TABLE(name, table) \ -static const char DRV_EXP_TAG(name, pci_tbl_export)[] __attribute__((used)) = \ -RTE_STR(table) - -#define RTE_PMD_REGISTER_PARAM_STRING(name, str) \ -static const char DRV_EXP_TAG(name, param_string_export)[] \ -__attribute__((used)) = str - -/** - * Advertise the list of kernel modules required to run this driver - * - * This string lists the kernel modules required for the devices - * associated to a PMD. The format of each line of the string is: - * " ". - * - * The possible formats for the device pattern are: - * "*" all devices supported by this driver - * "pci:*" all PCI devices supported by this driver - * "pci:v8086:d*:sv*:sd*" all PCI devices supported by this driver - * whose vendor id is 0x8086. - * - * The format of the kernel modules list is a parenthesized expression - * containing logical-and (&) and logical-or (|). - * - * The device pattern and the kmod expression are separated by a space. - * - * Example: - * - "* igb_uio | uio_pci_generic | vfio" - */ -#define RTE_PMD_REGISTER_KMOD_DEP(name, str) \ -static const char DRV_EXP_TAG(name, kmod_dep_export)[] \ -__attribute__((used)) = str - -/** - * Iteration context. - * - * This context carries over the current iteration state. - */ -struct rte_dev_iterator { - const char *dev_str; /**< device string. */ - const char *bus_str; /**< bus-related part of device string. */ - const char *cls_str; /**< class-related part of device string. */ - struct rte_bus *bus; /**< bus handle. */ - struct rte_class *cls; /**< class handle. */ - struct rte_device *device; /**< current position. */ - void *class_device; /**< additional specialized context. */ -}; - -/** - * Device iteration function. - * - * Find the next device matching properties passed in parameters. - * The function takes an additional ``start`` parameter, that is - * used as starting context when relevant. - * - * The function returns the current element in the iteration. - * This return value will potentially be used as a start parameter - * in subsequent calls to the function. - * - * The additional iterator parameter is only there if a specific - * implementation needs additional context. It must not be modified by - * the iteration function itself. - * - * @param start - * Starting iteration context. - * - * @param devstr - * Device description string. - * - * @param it - * Device iterator. - * - * @return - * The address of the current element matching the device description - * string. - */ -typedef void *(*rte_dev_iterate_t)(const void *start, - const char *devstr, - const struct rte_dev_iterator *it); - -/** - * Initializes a device iterator. - * - * This iterator allows accessing a list of devices matching a criteria. - * The device matching is made among all buses and classes currently registered, - * filtered by the device description given as parameter. - * - * This function will not allocate any memory. It is safe to stop the - * iteration at any moment and let the iterator go out of context. - * - * @param it - * Device iterator handle. - * - * @param str - * Device description string. - * - * @return - * 0 on successful initialization. - * <0 on error. - */ -__rte_experimental -int -rte_dev_iterator_init(struct rte_dev_iterator *it, const char *str); - -/** - * Iterates on a device iterator. - * - * Generates a new rte_device handle corresponding to the next element - * in the list described in comprehension by the iterator. - * - * The next object is returned, and the iterator is updated. - * - * @param it - * Device iterator handle. - * - * @return - * An rte_device handle if found. - * NULL if an error occurred (rte_errno is set). - * NULL if no device could be found (rte_errno is not set). - */ -__rte_experimental -struct rte_device * -rte_dev_iterator_next(struct rte_dev_iterator *it); - -#define RTE_DEV_FOREACH(dev, devstr, it) \ - for (rte_dev_iterator_init(it, devstr), \ - dev = rte_dev_iterator_next(it); \ - dev != NULL; \ - dev = rte_dev_iterator_next(it)) - -#ifdef __cplusplus -} -#endif - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * It registers the callback for the specific device. - * Multiple callbacks can be registered at the same time. - * - * @param device_name - * The device name, that is the param name of the struct rte_device, - * null value means for all devices. - * @param cb_fn - * callback address. - * @param cb_arg - * address of parameter for callback. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_event_callback_register(const char *device_name, - rte_dev_event_cb_fn cb_fn, - void *cb_arg); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * It unregisters the callback according to the specified device. - * - * @param device_name - * The device name, that is the param name of the struct rte_device, - * null value means for all devices and their callbacks. - * @param cb_fn - * callback address. - * @param cb_arg - * address of parameter for callback, (void *)-1 means to remove all - * registered which has the same callback address. - * - * @return - * - On success, return the number of callback entities removed. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_event_callback_unregister(const char *device_name, - rte_dev_event_cb_fn cb_fn, - void *cb_arg); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Executes all the user application registered callbacks for - * the specific device. - * - * @param device_name - * The device name. - * @param event - * the device event type. - */ -__rte_experimental -void -rte_dev_event_callback_process(const char *device_name, - enum rte_dev_event_type event); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Start the device event monitoring. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_event_monitor_start(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Stop the device event monitoring. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_event_monitor_stop(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Enable hotplug handling for devices. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_hotplug_handle_enable(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Disable hotplug handling for devices. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_dev_hotplug_handle_disable(void); - -/** - * Device level DMA map function. - * After a successful call, the memory segment will be mapped to the - * given device. - * - * @note: Memory must be registered in advance using rte_extmem_* APIs. - * - * @param dev - * Device pointer. - * @param addr - * Virtual address to map. - * @param iova - * IOVA address to map. - * @param len - * Length of the memory segment being mapped. - * - * @return - * 0 if mapping was successful. - * Negative value and rte_errno is set otherwise. - */ -__rte_experimental -int -rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len); - -/** - * Device level DMA unmap function. - * After a successful call, the memory segment will no longer be - * accessible by the given device. - * - * @note: Memory must be registered in advance using rte_extmem_* APIs. - * - * @param dev - * Device pointer. - * @param addr - * Virtual address to unmap. - * @param iova - * IOVA address to unmap. - * @param len - * Length of the memory segment being mapped. - * - * @return - * 0 if un-mapping was successful. - * Negative value and rte_errno is set otherwise. - */ -__rte_experimental -int -rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, - size_t len); - -#endif /* _RTE_DEV_H_ */ diff --git a/lib/librte_eal/common/include/rte_devargs.h b/lib/librte_eal/common/include/rte_devargs.h deleted file mode 100644 index 898efa0d66..0000000000 --- a/lib/librte_eal/common/include/rte_devargs.h +++ /dev/null @@ -1,238 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2014 6WIND S.A. - */ - -#ifndef _RTE_DEVARGS_H_ -#define _RTE_DEVARGS_H_ - -/** - * @file - * - * RTE devargs: list of devices and their user arguments - * - * This file stores a list of devices and their arguments given by - * the user when a DPDK application is started. These devices can be PCI - * devices or virtual devices. These devices are stored at startup in a - * list of rte_devargs structures. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -/** - * Type of generic device - */ -enum rte_devtype { - RTE_DEVTYPE_WHITELISTED_PCI, - RTE_DEVTYPE_BLACKLISTED_PCI, - RTE_DEVTYPE_VIRTUAL, -}; - -/** - * Structure that stores a device given by the user with its arguments - * - * A user device is a physical or a virtual device given by the user to - * the DPDK application at startup through command line arguments. - * - * The structure stores the configuration of the device, its PCI - * identifier if it's a PCI device or the driver name if it's a virtual - * device. - */ -struct rte_devargs { - /** Next in list. */ - TAILQ_ENTRY(rte_devargs) next; - /** Type of device. */ - enum rte_devtype type; - /** Device policy. */ - enum rte_dev_policy policy; - /** Name of the device. */ - char name[RTE_DEV_NAME_MAX_LEN]; - RTE_STD_C11 - union { - /** Arguments string as given by user or "" for no argument. */ - char *args; - const char *drv_str; - }; - struct rte_bus *bus; /**< bus handle. */ - struct rte_class *cls; /**< class handle. */ - const char *bus_str; /**< bus-related part of device string. */ - const char *cls_str; /**< class-related part of device string. */ - const char *data; /**< Device string storage. */ -}; - -/** - * Parse a device string. - * - * Verify that a bus is capable of handling the device passed - * in argument. Store which bus will handle the device, its name - * and the eventual device parameters. - * - * The syntax is: - * - * bus:device_identifier,arg1=val1,arg2=val2 - * - * where "bus:" is the bus name followed by any character separator. - * The bus name is optional. If no bus name is specified, each bus - * will attempt to recognize the device identifier. The first one - * to succeed will be used. - * - * Examples: - * - * pci:0000:05.00.0,arg=val - * 05.00.0,arg=val - * vdev:net_ring0 - * - * @param da - * The devargs structure holding the device information. - * - * @param dev - * String describing a device. - * - * @return - * - 0 on success. - * - Negative errno on error. - */ -int -rte_devargs_parse(struct rte_devargs *da, const char *dev); - -/** - * Parse a device string. - * - * Verify that a bus is capable of handling the device passed - * in argument. Store which bus will handle the device, its name - * and the eventual device parameters. - * - * The device string is built with a printf-like syntax. - * - * The syntax is: - * - * bus:device_identifier,arg1=val1,arg2=val2 - * - * where "bus:" is the bus name followed by any character separator. - * The bus name is optional. If no bus name is specified, each bus - * will attempt to recognize the device identifier. The first one - * to succeed will be used. - * - * Examples: - * - * pci:0000:05.00.0,arg=val - * 05.00.0,arg=val - * vdev:net_ring0 - * - * @param da - * The devargs structure holding the device information. - * @param format - * Format string describing a device. - * - * @return - * - 0 on success. - * - Negative errno on error. - */ -int -rte_devargs_parsef(struct rte_devargs *da, - const char *format, ...) -__rte_format_printf(2, 0); - -/** - * Insert an rte_devargs in the global list. - * - * @param da - * The devargs structure to insert. - * If a devargs for the same device is already inserted, - * it will be updated and returned. It means *da pointer can change. - * - * @return - * - 0 on success - * - Negative on error. - */ -int -rte_devargs_insert(struct rte_devargs **da); - -/** - * Add a device to the user device list - * See rte_devargs_parse() for details. - * - * @param devtype - * The type of the device. - * @param devargs_str - * The arguments as given by the user. - * - * @return - * - 0 on success - * - A negative value on error - */ -int rte_devargs_add(enum rte_devtype devtype, const char *devargs_str); - -/** - * Remove a device from the user device list. - * Its resources are freed. - * If the devargs cannot be found, nothing happens. - * - * @param devargs - * The instance or a copy of devargs to remove. - * - * @return - * 0 on success. - * <0 on error. - * >0 if the devargs was not within the user device list. - */ -int rte_devargs_remove(struct rte_devargs *devargs); - -/** - * Count the number of user devices of a specified type - * - * @param devtype - * The type of the devices to counted. - * - * @return - * The number of devices. - */ -unsigned int -rte_devargs_type_count(enum rte_devtype devtype); - -/** - * This function dumps the list of user device and their arguments. - * - * @param f - * A pointer to a file for output - */ -void rte_devargs_dump(FILE *f); - -/** - * Find next rte_devargs matching the provided bus name. - * - * @param busname - * Limit the iteration to devargs related to buses - * matching this name. - * Will return any next rte_devargs if NULL. - * - * @param start - * Starting iteration point. The iteration will start at - * the first rte_devargs if NULL. - * - * @return - * Next rte_devargs entry matching the requested bus, - * NULL if there is none. - */ -struct rte_devargs * -rte_devargs_next(const char *busname, const struct rte_devargs *start); - -/** - * Iterate over all rte_devargs for a specific bus. - */ -#define RTE_EAL_DEVARGS_FOREACH(busname, da) \ - for (da = rte_devargs_next(busname, NULL); \ - da != NULL; \ - da = rte_devargs_next(busname, da)) \ - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_DEVARGS_H_ */ diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h deleted file mode 100644 index 2f9ed298de..0000000000 --- a/lib/librte_eal/common/include/rte_eal.h +++ /dev/null @@ -1,495 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2018 Intel Corporation - */ - -#ifndef _RTE_EAL_H_ -#define _RTE_EAL_H_ - -/** - * @file - * - * EAL Configuration API - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */ - -/* Maximum thread_name length. */ -#define RTE_MAX_THREAD_NAME_LEN 16 - -/** - * The lcore role (used in RTE or not). - */ -enum rte_lcore_role_t { - ROLE_RTE, - ROLE_OFF, - ROLE_SERVICE, -}; - -/** - * The type of process in a linux, multi-process setup - */ -enum rte_proc_type_t { - RTE_PROC_AUTO = -1, /* allow auto-detection of primary/secondary */ - RTE_PROC_PRIMARY = 0, /* set to zero, so primary is the default */ - RTE_PROC_SECONDARY, - - RTE_PROC_INVALID -}; - -/** - * Get the process type in a multi-process setup - * - * @return - * The process type - */ -enum rte_proc_type_t rte_eal_process_type(void); - -/** - * Request iopl privilege for all RPL. - * - * This function should be called by pmds which need access to ioports. - - * @return - * - On success, returns 0. - * - On failure, returns -1. - */ -int rte_eal_iopl_init(void); - -/** - * Initialize the Environment Abstraction Layer (EAL). - * - * This function is to be executed on the MASTER lcore only, as soon - * as possible in the application's main() function. - * - * The function finishes the initialization process before main() is called. - * It puts the SLAVE lcores in the WAIT state. - * - * When the multi-partition feature is supported, depending on the - * configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this - * function waits to ensure that the magic number is set before - * returning. See also the rte_eal_get_configuration() function. Note: - * This behavior may change in the future. - * - * @param argc - * A non-negative value. If it is greater than 0, the array members - * for argv[0] through argv[argc] (non-inclusive) shall contain pointers - * to strings. - * @param argv - * An array of strings. The contents of the array, as well as the strings - * which are pointed to by the array, may be modified by this function. - * @return - * - On success, the number of parsed arguments, which is greater or - * equal to zero. After the call to rte_eal_init(), - * all arguments argv[x] with x < ret may have been modified by this - * function call and should not be further interpreted by the - * application. The EAL does not take any ownership of the memory used - * for either the argv array, or its members. - * - On failure, -1 and rte_errno is set to a value indicating the cause - * for failure. In some instances, the application will need to be - * restarted as part of clearing the issue. - * - * Error codes returned via rte_errno: - * EACCES indicates a permissions issue. - * - * EAGAIN indicates either a bus or system resource was not available, - * setup may be attempted again. - * - * EALREADY indicates that the rte_eal_init function has already been - * called, and cannot be called again. - * - * EFAULT indicates the tailq configuration name was not found in - * memory configuration. - * - * EINVAL indicates invalid parameters were passed as argv/argc. - * - * ENOMEM indicates failure likely caused by an out-of-memory condition. - * - * ENODEV indicates memory setup issues. - * - * ENOTSUP indicates that the EAL cannot initialize on this system. - * - * EPROTO indicates that the PCI bus is either not present, or is not - * readable by the eal. - * - * ENOEXEC indicates that a service core failed to launch successfully. - */ -int rte_eal_init(int argc, char **argv); - -/** - * Clean up the Environment Abstraction Layer (EAL) - * - * This function must be called to release any internal resources that EAL has - * allocated during rte_eal_init(). After this call, no DPDK function calls may - * be made. It is expected that common usage of this function is to call it - * just before terminating the process. - * - * @return 0 Successfully released all internal EAL resources - * @return -EFAULT There was an error in releasing all resources. - */ -int rte_eal_cleanup(void); - -/** - * Check if a primary process is currently alive - * - * This function returns true when a primary process is currently - * active. - * - * @param config_file_path - * The config_file_path argument provided should point at the location - * that the primary process will create its config file. If NULL, the default - * config file path is used. - * - * @return - * - If alive, returns 1. - * - If dead, returns 0. - */ -int rte_eal_primary_proc_alive(const char *config_file_path); - -#define RTE_MP_MAX_FD_NUM 8 /* The max amount of fds */ -#define RTE_MP_MAX_NAME_LEN 64 /* The max length of action name */ -#define RTE_MP_MAX_PARAM_LEN 256 /* The max length of param */ -struct rte_mp_msg { - char name[RTE_MP_MAX_NAME_LEN]; - int len_param; - int num_fds; - uint8_t param[RTE_MP_MAX_PARAM_LEN]; - int fds[RTE_MP_MAX_FD_NUM]; -}; - -struct rte_mp_reply { - int nb_sent; - int nb_received; - struct rte_mp_msg *msgs; /* caller to free */ -}; - -/** - * Action function typedef used by other components. - * - * As we create socket channel for primary/secondary communication, use - * this function typedef to register action for coming messages. - * - * @note When handling IPC request callbacks, the reply must be sent even in - * cases of error handling. Simply returning success or failure will *not* - * send a response to the requestor. - * Implementation of error signalling mechanism is up to the application. - * - * @note No memory allocations should take place inside the callback. - */ -typedef int (*rte_mp_t)(const struct rte_mp_msg *msg, const void *peer); - -/** - * Asynchronous reply function typedef used by other components. - * - * As we create socket channel for primary/secondary communication, use - * this function typedef to register action for coming responses to asynchronous - * requests. - * - * @note When handling IPC request callbacks, the reply must be sent even in - * cases of error handling. Simply returning success or failure will *not* - * send a response to the requestor. - * Implementation of error signalling mechanism is up to the application. - * - * @note No memory allocations should take place inside the callback. - */ -typedef int (*rte_mp_async_reply_t)(const struct rte_mp_msg *request, - const struct rte_mp_reply *reply); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Register an action function for primary/secondary communication. - * - * Call this function to register an action, if the calling component wants - * to response the messages from the corresponding component in its primary - * process or secondary processes. - * - * @note IPC may be unsupported in certain circumstances, so caller should check - * for ENOTSUP error. - * - * @param name - * The name argument plays as the nonredundant key to find the action. - * - * @param action - * The action argument is the function pointer to the action function. - * - * @return - * - 0 on success. - * - (<0) on failure. - */ -__rte_experimental -int -rte_mp_action_register(const char *name, rte_mp_t action); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Unregister an action function for primary/secondary communication. - * - * Call this function to unregister an action if the calling component does - * not want to response the messages from the corresponding component in its - * primary process or secondary processes. - * - * @note IPC may be unsupported in certain circumstances, so caller should check - * for ENOTSUP error. - * - * @param name - * The name argument plays as the nonredundant key to find the action. - * - */ -__rte_experimental -void -rte_mp_action_unregister(const char *name); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Send a message to the peer process. - * - * This function will send a message which will be responded by the action - * identified by name in the peer process. - * - * @param msg - * The msg argument contains the customized message. - * - * @return - * - On success, return 0. - * - On failure, return -1, and the reason will be stored in rte_errno. - */ -__rte_experimental -int -rte_mp_sendmsg(struct rte_mp_msg *msg); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Send a request to the peer process and expect a reply. - * - * This function sends a request message to the peer process, and will - * block until receiving reply message from the peer process. - * - * @note The caller is responsible to free reply->replies. - * - * @note This API must not be used inside memory-related or IPC callbacks, and - * no memory allocations should take place inside such callback. - * - * @note IPC may be unsupported in certain circumstances, so caller should check - * for ENOTSUP error. - * - * @param req - * The req argument contains the customized request message. - * - * @param reply - * The reply argument will be for storing all the replied messages; - * the caller is responsible for free reply->msgs. - * - * @param ts - * The ts argument specifies how long we can wait for the peer(s) to reply. - * - * @return - * - On success, return 0. - * - On failure, return -1, and the reason will be stored in rte_errno. - */ -__rte_experimental -int -rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply, - const struct timespec *ts); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Send a request to the peer process and expect a reply in a separate callback. - * - * This function sends a request message to the peer process, and will not - * block. Instead, reply will be received in a separate callback. - * - * @note IPC may be unsupported in certain circumstances, so caller should check - * for ENOTSUP error. - * - * @param req - * The req argument contains the customized request message. - * - * @param ts - * The ts argument specifies how long we can wait for the peer(s) to reply. - * - * @param clb - * The callback to trigger when all responses for this request have arrived. - * - * @return - * - On success, return 0. - * - On failure, return -1, and the reason will be stored in rte_errno. - */ -__rte_experimental -int -rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, - rte_mp_async_reply_t clb); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Send a reply to the peer process. - * - * This function will send a reply message in response to a request message - * received previously. - * - * @note When handling IPC request callbacks, the reply must be sent even in - * cases of error handling. Simply returning success or failure will *not* - * send a response to the requestor. - * Implementation of error signalling mechanism is up to the application. - * - * @param msg - * The msg argument contains the customized message. - * - * @param peer - * The peer argument is the pointer to the peer socket path. - * - * @return - * - On success, return 0. - * - On failure, return -1, and the reason will be stored in rte_errno. - */ -__rte_experimental -int -rte_mp_reply(struct rte_mp_msg *msg, const char *peer); - -/** - * Usage function typedef used by the application usage function. - * - * Use this function typedef to define and call rte_set_application_usage_hook() - * routine. - */ -typedef void (*rte_usage_hook_t)(const char * prgname); - -/** - * Add application usage routine callout from the eal_usage() routine. - * - * This function allows the application to include its usage message - * in the EAL system usage message. The routine rte_set_application_usage_hook() - * needs to be called before the rte_eal_init() routine in the application. - * - * This routine is optional for the application and will behave as if the set - * routine was never called as the default behavior. - * - * @param usage_func - * The func argument is a function pointer to the application usage routine. - * Called function is defined using rte_usage_hook_t typedef, which is of - * the form void rte_usage_func(const char * prgname). - * - * Calling this routine with a NULL value will reset the usage hook routine and - * return the current value, which could be NULL. - * @return - * - Returns the current value of the rte_application_usage pointer to allow - * the caller to daisy chain the usage routines if needing more then one. - */ -rte_usage_hook_t -rte_set_application_usage_hook(rte_usage_hook_t usage_func); - -/** - * Whether EAL is using huge pages (disabled by --no-huge option). - * The no-huge mode is not compatible with all drivers or features. - * - * @return - * Nonzero if hugepages are enabled. - */ -int rte_eal_has_hugepages(void); - -/** - * Whether EAL is using PCI bus. - * Disabled by --no-pci option. - * - * @return - * Nonzero if the PCI bus is enabled. - */ -int rte_eal_has_pci(void); - -/** - * Whether the EAL was asked to create UIO device. - * - * @return - * Nonzero if true. - */ -int rte_eal_create_uio_dev(void); - -/** - * The user-configured vfio interrupt mode. - * - * @return - * Interrupt mode configured with the command line, - * RTE_INTR_MODE_NONE by default. - */ -enum rte_intr_mode rte_eal_vfio_intr_mode(void); - -/** - * A wrap API for syscall gettid. - * - * @return - * On success, returns the thread ID of calling process. - * It is always successful. - */ -int rte_sys_gettid(void); - -/** - * Get system unique thread id. - * - * @return - * On success, returns the thread ID of calling process. - * It is always successful. - */ -static inline int rte_gettid(void) -{ - static RTE_DEFINE_PER_LCORE(int, _thread_id) = -1; - if (RTE_PER_LCORE(_thread_id) == -1) - RTE_PER_LCORE(_thread_id) = rte_sys_gettid(); - return RTE_PER_LCORE(_thread_id); -} - -/** - * Get the iova mode - * - * @return - * enum rte_iova_mode value. - */ -enum rte_iova_mode rte_eal_iova_mode(void); - -/** - * Get user provided pool ops name for mbuf - * - * @return - * returns user provided pool ops name. - */ -const char * -rte_eal_mbuf_user_pool_ops(void); - -/** - * Get the runtime directory of DPDK - * - * @return - * The runtime directory path of DPDK - */ -const char * -rte_eal_get_runtime_dir(void); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_EAL_H_ */ diff --git a/lib/librte_eal/common/include/rte_eal_interrupts.h b/lib/librte_eal/common/include/rte_eal_interrupts.h deleted file mode 100644 index 773a34a42b..0000000000 --- a/lib/librte_eal/common/include/rte_eal_interrupts.h +++ /dev/null @@ -1,238 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_INTERRUPTS_H_ -#error "don't include this file directly, please include generic " -#endif - -/** - * @file rte_eal_interrupts.h - * @internal - * - * Contains function prototypes exposed by the EAL for interrupt handling by - * drivers and other DPDK internal consumers. - */ - -#ifndef _RTE_EAL_INTERRUPTS_H_ -#define _RTE_EAL_INTERRUPTS_H_ - -#define RTE_MAX_RXTX_INTR_VEC_ID 512 -#define RTE_INTR_VEC_ZERO_OFFSET 0 -#define RTE_INTR_VEC_RXTX_OFFSET 1 - -/** - * The interrupt source type, e.g. UIO, VFIO, ALARM etc. - */ -enum rte_intr_handle_type { - RTE_INTR_HANDLE_UNKNOWN = 0, /**< generic unknown handle */ - RTE_INTR_HANDLE_UIO, /**< uio device handle */ - RTE_INTR_HANDLE_UIO_INTX, /**< uio generic handle */ - RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */ - RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */ - RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */ - RTE_INTR_HANDLE_ALARM, /**< alarm handle */ - RTE_INTR_HANDLE_EXT, /**< external handler */ - RTE_INTR_HANDLE_VDEV, /**< virtual device */ - RTE_INTR_HANDLE_DEV_EVENT, /**< device event handle */ - RTE_INTR_HANDLE_VFIO_REQ, /**< VFIO request handle */ - RTE_INTR_HANDLE_MAX /**< count of elements */ -}; - -#define RTE_INTR_EVENT_ADD 1UL -#define RTE_INTR_EVENT_DEL 2UL - -typedef void (*rte_intr_event_cb_t)(int fd, void *arg); - -struct rte_epoll_data { - uint32_t event; /**< event type */ - void *data; /**< User data */ - rte_intr_event_cb_t cb_fun; /**< IN: callback fun */ - void *cb_arg; /**< IN: callback arg */ -}; - -enum { - RTE_EPOLL_INVALID = 0, - RTE_EPOLL_VALID, - RTE_EPOLL_EXEC, -}; - -/** interrupt epoll event obj, taken by epoll_event.ptr */ -struct rte_epoll_event { - volatile uint32_t status; /**< OUT: event status */ - int fd; /**< OUT: event fd */ - int epfd; /**< OUT: epoll instance the ev associated with */ - struct rte_epoll_data epdata; -}; - -/** Handle for interrupts. */ -struct rte_intr_handle { - RTE_STD_C11 - union { - int vfio_dev_fd; /**< VFIO device file descriptor */ - int uio_cfg_fd; /**< UIO cfg file desc for uio_pci_generic */ - }; - int fd; /**< interrupt event file descriptor */ - enum rte_intr_handle_type type; /**< handle type */ - uint32_t max_intr; /**< max interrupt requested */ - uint32_t nb_efd; /**< number of available efd(event fd) */ - uint8_t efd_counter_size; /**< size of efd counter, used for vdev */ - int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */ - struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID]; - /**< intr vector epoll event */ - int *intr_vec; /**< intr vector number array */ -}; - -#define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */ - -/** - * It waits for events on the epoll instance. - * - * @param epfd - * Epoll instance fd on which the caller wait for events. - * @param events - * Memory area contains the events that will be available for the caller. - * @param maxevents - * Up to maxevents are returned, must greater than zero. - * @param timeout - * Specifying a timeout of -1 causes a block indefinitely. - * Specifying a timeout equal to zero cause to return immediately. - * @return - * - On success, returns the number of available event. - * - On failure, a negative value. - */ -int -rte_epoll_wait(int epfd, struct rte_epoll_event *events, - int maxevents, int timeout); - -/** - * It performs control operations on epoll instance referred by the epfd. - * It requests that the operation op be performed for the target fd. - * - * @param epfd - * Epoll instance fd on which the caller perform control operations. - * @param op - * The operation be performed for the target fd. - * @param fd - * The target fd on which the control ops perform. - * @param event - * Describes the object linked to the fd. - * Note: The caller must take care the object deletion after CTL_DEL. - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int -rte_epoll_ctl(int epfd, int op, int fd, - struct rte_epoll_event *event); - -/** - * The function returns the per thread epoll instance. - * - * @return - * epfd the epoll instance referred to. - */ -int -rte_intr_tls_epfd(void); - -/** - * @param intr_handle - * Pointer to the interrupt handle. - * @param epfd - * Epoll instance fd which the intr vector associated to. - * @param op - * The operation be performed for the vector. - * Operation type of {ADD, DEL}. - * @param vec - * RX intr vector number added to the epoll instance wait list. - * @param data - * User raw data. - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int -rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, - int epfd, int op, unsigned int vec, void *data); - -/** - * It deletes registered eventfds. - * - * @param intr_handle - * Pointer to the interrupt handle. - */ -void -rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle); - -/** - * It enables the packet I/O interrupt event if it's necessary. - * It creates event fd for each interrupt vector when MSIX is used, - * otherwise it multiplexes a single event fd. - * - * @param intr_handle - * Pointer to the interrupt handle. - * @param nb_efd - * Number of interrupt vector trying to enable. - * The value 0 is not allowed. - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int -rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd); - -/** - * It disables the packet I/O interrupt event. - * It deletes registered eventfds and closes the open fds. - * - * @param intr_handle - * Pointer to the interrupt handle. - */ -void -rte_intr_efd_disable(struct rte_intr_handle *intr_handle); - -/** - * The packet I/O interrupt on datapath is enabled or not. - * - * @param intr_handle - * Pointer to the interrupt handle. - */ -int -rte_intr_dp_is_en(struct rte_intr_handle *intr_handle); - -/** - * The interrupt handle instance allows other causes or not. - * Other causes stand for any none packet I/O interrupts. - * - * @param intr_handle - * Pointer to the interrupt handle. - */ -int -rte_intr_allow_others(struct rte_intr_handle *intr_handle); - -/** - * The multiple interrupt vector capability of interrupt handle instance. - * It returns zero if no multiple interrupt vector support. - * - * @param intr_handle - * Pointer to the interrupt handle. - */ -int -rte_intr_cap_multiple(struct rte_intr_handle *intr_handle); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * @internal - * Check if currently executing in interrupt context - * - * @return - * - non zero in case of interrupt context - * - zero in case of process context - */ -__rte_experimental -int -rte_thread_is_intr(void); - -#endif /* _RTE_EAL_INTERRUPTS_H_ */ diff --git a/lib/librte_eal/common/include/rte_eal_memconfig.h b/lib/librte_eal/common/include/rte_eal_memconfig.h deleted file mode 100644 index dede2ee324..0000000000 --- a/lib/librte_eal/common/include/rte_eal_memconfig.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_EAL_MEMCONFIG_H_ -#define _RTE_EAL_MEMCONFIG_H_ - -#include - -#include - -/** - * @file - * - * This API allows access to EAL shared memory configuration through an API. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Lock the internal EAL shared memory configuration for shared access. - */ -void -rte_mcfg_mem_read_lock(void); - -/** - * Unlock the internal EAL shared memory configuration for shared access. - */ -void -rte_mcfg_mem_read_unlock(void); - -/** - * Lock the internal EAL shared memory configuration for exclusive access. - */ -void -rte_mcfg_mem_write_lock(void); - -/** - * Unlock the internal EAL shared memory configuration for exclusive access. - */ -void -rte_mcfg_mem_write_unlock(void); - -/** - * Lock the internal EAL TAILQ list for shared access. - */ -void -rte_mcfg_tailq_read_lock(void); - -/** - * Unlock the internal EAL TAILQ list for shared access. - */ -void -rte_mcfg_tailq_read_unlock(void); - -/** - * Lock the internal EAL TAILQ list for exclusive access. - */ -void -rte_mcfg_tailq_write_lock(void); - -/** - * Unlock the internal EAL TAILQ list for exclusive access. - */ -void -rte_mcfg_tailq_write_unlock(void); - -/** - * Lock the internal EAL Mempool list for shared access. - */ -void -rte_mcfg_mempool_read_lock(void); - -/** - * Unlock the internal EAL Mempool list for shared access. - */ -void -rte_mcfg_mempool_read_unlock(void); - -/** - * Lock the internal EAL Mempool list for exclusive access. - */ -void -rte_mcfg_mempool_write_lock(void); - -/** - * Unlock the internal EAL Mempool list for exclusive access. - */ -void -rte_mcfg_mempool_write_unlock(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Lock the internal EAL Timer Library lock for exclusive access. - */ -__rte_experimental -void -rte_mcfg_timer_lock(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Unlock the internal EAL Timer Library lock for exclusive access. - */ -__rte_experimental -void -rte_mcfg_timer_unlock(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * If true, pages are put in single files (per memseg list), - * as opposed to creating a file per page. - */ -__rte_experimental -bool -rte_mcfg_get_single_file_segments(void); - -#ifdef __cplusplus -} -#endif - -#endif /*__RTE_EAL_MEMCONFIG_H_*/ diff --git a/lib/librte_eal/common/include/rte_errno.h b/lib/librte_eal/common/include/rte_errno.h deleted file mode 100644 index ba45591d24..0000000000 --- a/lib/librte_eal/common/include/rte_errno.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -/** - * @file - * - * API for error cause tracking - */ - -#ifndef _RTE_ERRNO_H_ -#define _RTE_ERRNO_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */ - -/** - * Error number value, stored per-thread, which can be queried after - * calls to certain functions to determine why those functions failed. - * - * Uses standard values from errno.h wherever possible, with a small number - * of additional possible values for RTE-specific conditions. - */ -#define rte_errno RTE_PER_LCORE(_rte_errno) - -/** - * Function which returns a printable string describing a particular - * error code. For non-RTE-specific error codes, this function returns - * the value from the libc strerror function. - * - * @param errnum - * The error number to be looked up - generally the value of rte_errno - * @return - * A pointer to a thread-local string containing the text describing - * the error. - */ -const char *rte_strerror(int errnum); - -#ifndef __ELASTERROR -/** - * Check if we have a defined value for the max system-defined errno values. - * if no max defined, start from 1000 to prevent overlap with standard values - */ -#define __ELASTERROR 1000 -#endif - -/** Error types */ -enum { - RTE_MIN_ERRNO = __ELASTERROR, /**< Start numbering above std errno vals */ - - E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */ - E_RTE_NO_CONFIG, /**< Missing rte_config */ - - RTE_MAX_ERRNO /**< Max RTE error number */ -}; - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_ERRNO_H_ */ diff --git a/lib/librte_eal/common/include/rte_fbarray.h b/lib/librte_eal/common/include/rte_fbarray.h deleted file mode 100644 index 6dccdbec98..0000000000 --- a/lib/librte_eal/common/include/rte_fbarray.h +++ /dev/null @@ -1,565 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2018 Intel Corporation - */ - -#ifndef RTE_FBARRAY_H -#define RTE_FBARRAY_H - -/** - * @file - * - * File-backed shared indexed array for DPDK. - * - * Basic workflow is expected to be the following: - * 1) Allocate array either using ``rte_fbarray_init()`` or - * ``rte_fbarray_attach()`` (depending on whether it's shared between - * multiple DPDK processes) - * 2) find free spots using ``rte_fbarray_find_next_free()`` - * 3) get pointer to data in the free spot using ``rte_fbarray_get()``, and - * copy data into the pointer (element size is fixed) - * 4) mark entry as used using ``rte_fbarray_set_used()`` - * - * Calls to ``rte_fbarray_init()`` and ``rte_fbarray_destroy()`` will have - * consequences for all processes, while calls to ``rte_fbarray_attach()`` and - * ``rte_fbarray_detach()`` will only have consequences within a single process. - * Therefore, it is safe to call ``rte_fbarray_attach()`` or - * ``rte_fbarray_detach()`` while another process is using ``rte_fbarray``, - * provided no other thread within the same process will try to use - * ``rte_fbarray`` before attaching or after detaching. It is not safe to call - * ``rte_fbarray_init()`` or ``rte_fbarray_destroy()`` while another thread or - * another process is using ``rte_fbarray``. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#include -#include - -#define RTE_FBARRAY_NAME_LEN 64 - -struct rte_fbarray { - char name[RTE_FBARRAY_NAME_LEN]; /**< name associated with an array */ - unsigned int count; /**< number of entries stored */ - unsigned int len; /**< current length of the array */ - unsigned int elt_sz; /**< size of each element */ - void *data; /**< data pointer */ - rte_rwlock_t rwlock; /**< multiprocess lock */ -}; - -/** - * Set up ``rte_fbarray`` structure and allocate underlying resources. - * - * Call this function to correctly set up ``rte_fbarray`` and allocate - * underlying files that will be backing the data in the current process. Note - * that in order to use and share ``rte_fbarray`` between multiple processes, - * data pointed to by ``arr`` pointer must itself be allocated in shared memory. - * - * @param arr - * Valid pointer to allocated ``rte_fbarray`` structure. - * - * @param name - * Unique name to be assigned to this array. - * - * @param len - * Number of elements initially available in the array. - * - * @param elt_sz - * Size of each element. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len, - unsigned int elt_sz); - - -/** - * Attach to a file backing an already allocated and correctly set up - * ``rte_fbarray`` structure. - * - * Call this function to attach to file that will be backing the data in the - * current process. The structure must have been previously correctly set up - * with a call to ``rte_fbarray_init()``. Calls to ``rte_fbarray_attach()`` are - * usually meant to be performed in a multiprocessing scenario, with data - * pointed to by ``arr`` pointer allocated in shared memory. - * - * @param arr - * Valid pointer to allocated and correctly set up rte_fbarray structure. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_attach(struct rte_fbarray *arr); - - -/** - * Deallocate resources for an already allocated and correctly set up - * ``rte_fbarray`` structure, and remove the underlying file. - * - * Call this function to deallocate all resources associated with an - * ``rte_fbarray`` structure within the current process. This will also - * zero-fill data pointed to by ``arr`` pointer and remove the underlying file - * backing the data, so it is expected that by the time this function is called, - * all other processes have detached from this ``rte_fbarray``. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_destroy(struct rte_fbarray *arr); - - -/** - * Deallocate resources for an already allocated and correctly set up - * ``rte_fbarray`` structure. - * - * Call this function to deallocate all resources associated with an - * ``rte_fbarray`` structure within current process. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_detach(struct rte_fbarray *arr); - - -/** - * Get pointer to element residing at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param idx - * Index of an element to get a pointer to. - * - * @return - * - non-NULL pointer on success. - * - NULL on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -void * -rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx); - - -/** - * Find index of a specified element within the array. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param elt - * Pointer to element to find index to. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt); - - -/** - * Mark specified element as used. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param idx - * Element index to mark as used. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx); - - -/** - * Mark specified element as free. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param idx - * Element index to mark as free. - * - * @return - * - 0 on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx); - - -/** - * Check whether element at specified index is marked as used. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param idx - * Element index to check as used. - * - * @return - * - 1 if element is used. - * - 0 if element is unused. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx); - - -/** - * Find index of next free element, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of next used element, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of next chunk of ``n`` free elements, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @param n - * Number of free elements to look for. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start, - unsigned int n); - - -/** - * Find index of next chunk of ``n`` used elements, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @param n - * Number of used elements to look for. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start, - unsigned int n); - - -/** - * Find how many more free entries there are, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_contig_free(struct rte_fbarray *arr, - unsigned int start); - - -/** - * Find how many more used entries there are, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start); - -/** - * Find index of previous free element, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of previous used element, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find lowest start index of chunk of ``n`` free elements, down from specified - * index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @param n - * Number of free elements to look for. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start, - unsigned int n); - - -/** - * Find lowest start index of chunk of ``n`` used elements, down from specified - * index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @param n - * Number of used elements to look for. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start, - unsigned int n); - - -/** - * Find how many more free entries there are before specified index (like - * ``rte_fbarray_find_contig_free`` but going in reverse). - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, - unsigned int start); - - -/** - * Find how many more used entries there are before specified index (like - * ``rte_fbarray_find_contig_used`` but going in reverse). - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of biggest chunk of free elements, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of biggest chunk of used elements, starting at specified index. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of biggest chunk of free elements before a specified index (like - * ``rte_fbarray_find_biggest_free``, but going in reverse). - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start); - - -/** - * Find index of biggest chunk of used elements before a specified index (like - * ``rte_fbarray_find_biggest_used``, but going in reverse). - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param start - * Element index to start search from. - * - * @return - * - non-negative integer on success. - * - -1 on failure, with ``rte_errno`` indicating reason for failure. - */ -__rte_experimental -int -rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start); - - -/** - * Dump ``rte_fbarray`` metadata. - * - * @param arr - * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. - * - * @param f - * File object to dump information into. - */ -__rte_experimental -void -rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f); - -#ifdef __cplusplus -} -#endif - -#endif /* RTE_FBARRAY_H */ diff --git a/lib/librte_eal/common/include/rte_function_versioning.h b/lib/librte_eal/common/include/rte_function_versioning.h deleted file mode 100644 index c924351d5e..0000000000 --- a/lib/librte_eal/common/include/rte_function_versioning.h +++ /dev/null @@ -1,90 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015 Neil Horman . - * All rights reserved. - */ - -#ifndef _RTE_FUNCTION_VERSIONING_H_ -#define _RTE_FUNCTION_VERSIONING_H_ -#include - -#ifndef RTE_USE_FUNCTION_VERSIONING -#error Use of function versioning disabled, is "use_function_versioning=true" in meson.build? -#endif - -#ifdef RTE_BUILD_SHARED_LIB - -/* - * Provides backwards compatibility when updating exported functions. - * When a symol is exported from a library to provide an API, it also provides a - * calling convention (ABI) that is embodied in its name, return type, - * arguments, etc. On occasion that function may need to change to accommodate - * new functionality, behavior, etc. When that occurs, it is desirable to - * allow for backwards compatibility for a time with older binaries that are - * dynamically linked to the dpdk. To support that, the __vsym and - * VERSION_SYMBOL macros are created. They, in conjunction with the - * _version.map file for a given library allow for multiple versions of - * a symbol to exist in a shared library so that older binaries need not be - * immediately recompiled. - * - * Refer to the guidelines document in the docs subdirectory for details on the - * use of these macros - */ - -/* - * Macro Parameters: - * b - function base name - * e - function version extension, to be concatenated with base name - * n - function symbol version string to be applied - * f - function prototype - * p - full function symbol name - */ - -/* - * VERSION_SYMBOL - * Creates a symbol version table entry binding symbol @DPDK_ to the internal - * function name - */ -#define VERSION_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@DPDK_" RTE_STR(n)) - -/* - * BIND_DEFAULT_SYMBOL - * Creates a symbol version entry instructing the linker to bind references to - * symbol to the internal symbol - */ -#define BIND_DEFAULT_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@@DPDK_" RTE_STR(n)) - -/* - * __vsym - * Annotation to be used in declaration of the internal symbol to signal - * that it is being used as an implementation of a particular version of symbol - * . - */ -#define __vsym __attribute__((used)) - -/* - * MAP_STATIC_SYMBOL - * If a function has been bifurcated into multiple versions, none of which - * are defined as the exported symbol name in the map file, this macro can be - * used to alias a specific version of the symbol to its exported name. For - * example, if you have 2 versions of a function foo_v1 and foo_v2, where the - * former is mapped to foo@DPDK_1 and the latter is mapped to foo@DPDK_2 when - * building a shared library, this macro can be used to map either foo_v1 or - * foo_v2 to the symbol foo when building a static library, e.g.: - * MAP_STATIC_SYMBOL(void foo(), foo_v2); - */ -#define MAP_STATIC_SYMBOL(f, p) - -#else -/* - * No symbol versioning in use - */ -#define VERSION_SYMBOL(b, e, n) -#define __vsym -#define BIND_DEFAULT_SYMBOL(b, e, n) -#define MAP_STATIC_SYMBOL(f, p) f __attribute__((alias(RTE_STR(p)))) -/* - * RTE_BUILD_SHARED_LIB=n - */ -#endif - -#endif /* _RTE_FUNCTION_VERSIONING_H_ */ diff --git a/lib/librte_eal/common/include/rte_hexdump.h b/lib/librte_eal/common/include/rte_hexdump.h deleted file mode 100644 index 2d03c089c4..0000000000 --- a/lib/librte_eal/common/include/rte_hexdump.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_HEXDUMP_H_ -#define _RTE_HEXDUMP_H_ - -/** - * @file - * Simple API to dump out memory in a special hex format. - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** -* Dump out memory in a special hex dump format. -* -* @param f -* A pointer to a file for output -* @param title -* If not NULL this string is printed as a header to the output. -* @param buf -* This is the buffer address to print out. -* @param len -* The number of bytes to dump out -* @return -* None. -*/ - -extern void -rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len); - -/** -* Dump out memory in a hex format with colons between bytes. -* -* @param f -* A pointer to a file for output -* @param title -* If not NULL this string is printed as a header to the output. -* @param buf -* This is the buffer address to print out. -* @param len -* The number of bytes to dump out -* @return -* None. -*/ - -void -rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len); - - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_HEXDUMP_H_ */ diff --git a/lib/librte_eal/common/include/rte_hypervisor.h b/lib/librte_eal/common/include/rte_hypervisor.h deleted file mode 100644 index 5fe719c1d4..0000000000 --- a/lib/librte_eal/common/include/rte_hypervisor.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 Mellanox Technologies, Ltd - */ - -#ifndef RTE_HYPERVISOR_H -#define RTE_HYPERVISOR_H - -/** - * @file - * Hypervisor awareness. - */ - -enum rte_hypervisor { - RTE_HYPERVISOR_NONE, - RTE_HYPERVISOR_KVM, - RTE_HYPERVISOR_HYPERV, - RTE_HYPERVISOR_VMWARE, - RTE_HYPERVISOR_UNKNOWN -}; - -/** - * Get the id of hypervisor it is running on. - */ -enum rte_hypervisor -rte_hypervisor_get(void); - -/** - * Get the name of a given hypervisor id. - */ -const char * -rte_hypervisor_get_name(enum rte_hypervisor id); - -#endif /* RTE_HYPERVISOR_H */ diff --git a/lib/librte_eal/common/include/rte_interrupts.h b/lib/librte_eal/common/include/rte_interrupts.h deleted file mode 100644 index e3b406abc2..0000000000 --- a/lib/librte_eal/common/include/rte_interrupts.h +++ /dev/null @@ -1,145 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_INTERRUPTS_H_ -#define _RTE_INTERRUPTS_H_ - -#include -#include - -/** - * @file - * - * The RTE interrupt interface provides functions to register/unregister - * callbacks for a specific interrupt. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** Interrupt handle */ -struct rte_intr_handle; - -/** Function to be registered for the specific interrupt */ -typedef void (*rte_intr_callback_fn)(void *cb_arg); - -/** - * Function to call after a callback is unregistered. - * Can be used to close fd and free cb_arg. - */ -typedef void (*rte_intr_unregister_callback_fn)(struct rte_intr_handle *intr_handle, - void *cb_arg); - -#include "rte_eal_interrupts.h" - -/** - * It registers the callback for the specific interrupt. Multiple - * callbacks can be registered at the same time. - * @param intr_handle - * Pointer to the interrupt handle. - * @param cb - * callback address. - * @param cb_arg - * address of parameter for callback. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int rte_intr_callback_register(const struct rte_intr_handle *intr_handle, - rte_intr_callback_fn cb, void *cb_arg); - -/** - * It unregisters the callback according to the specified interrupt handle. - * - * @param intr_handle - * pointer to the interrupt handle. - * @param cb - * callback address. - * @param cb_arg - * address of parameter for callback, (void *)-1 means to remove all - * registered which has the same callback address. - * - * @return - * - On success, return the number of callback entities removed. - * - On failure, a negative value. - */ -int rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle, - rte_intr_callback_fn cb, void *cb_arg); - -/** - * Unregister the callback according to the specified interrupt handle, - * after it's no longer active. Fail if source is not active. - * - * @param intr_handle - * pointer to the interrupt handle. - * @param cb_fn - * callback address. - * @param cb_arg - * address of parameter for callback, (void *)-1 means to remove all - * registered which has the same callback address. - * @param ucb_fn - * callback to call before cb is unregistered (optional). - * can be used to close fd and free cb_arg. - * - * @return - * - On success, return the number of callback entities marked for remove. - * - On failure, a negative value. - */ -__rte_experimental -int -rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle, - rte_intr_callback_fn cb_fn, void *cb_arg, - rte_intr_unregister_callback_fn ucb_fn); - -/** - * It enables the interrupt for the specified handle. - * - * @param intr_handle - * pointer to the interrupt handle. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int rte_intr_enable(const struct rte_intr_handle *intr_handle); - -/** - * It disables the interrupt for the specified handle. - * - * @param intr_handle - * pointer to the interrupt handle. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -int rte_intr_disable(const struct rte_intr_handle *intr_handle); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * It acknowledges an interrupt raised for the specified handle. - * - * This function should be called at the end of each interrupt handler either - * from application or driver, so that currently raised interrupt is acked and - * further new interrupts are raised. - * - * @param intr_handle - * pointer to the interrupt handle. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -__rte_experimental -int rte_intr_ack(const struct rte_intr_handle *intr_handle); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lib/librte_eal/common/include/rte_keepalive.h b/lib/librte_eal/common/include/rte_keepalive.h deleted file mode 100644 index 4bda7ca56f..0000000000 --- a/lib/librte_eal/common/include/rte_keepalive.h +++ /dev/null @@ -1,142 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2015-2016 Intel Corporation. - */ - -/** - * @file rte_keepalive.h - * DPDK RTE LCore Keepalive Monitor. - * - **/ - -#ifndef _KEEPALIVE_H_ -#define _KEEPALIVE_H_ - -#include -#include - -#ifndef RTE_KEEPALIVE_MAXCORES -/** - * Number of cores to track. - * @note Must be larger than the highest core id. */ -#define RTE_KEEPALIVE_MAXCORES RTE_MAX_LCORE -#endif - -enum rte_keepalive_state { - RTE_KA_STATE_UNUSED = 0, - RTE_KA_STATE_ALIVE = 1, - RTE_KA_STATE_MISSING = 4, - RTE_KA_STATE_DEAD = 2, - RTE_KA_STATE_GONE = 3, - RTE_KA_STATE_DOZING = 5, - RTE_KA_STATE_SLEEP = 6 -}; - -/** - * Keepalive failure callback. - * - * Receives a data pointer passed to rte_keepalive_create() and the id of the - * failed core. - * @param data Data pointer passed to rte_keepalive_create() - * @param id_core ID of the core that has failed - */ -typedef void (*rte_keepalive_failure_callback_t)( - void *data, - const int id_core); - -/** - * Keepalive relay callback. - * - * Receives a data pointer passed to rte_keepalive_register_relay_callback(), - * the id of the core for which state is to be forwarded, and details of the - * current core state. - * @param data Data pointer passed to rte_keepalive_register_relay_callback() - * @param id_core ID of the core for which state is being reported - * @param core_state The current state of the core - * @param Timestamp of when core was last seen alive - */ -typedef void (*rte_keepalive_relay_callback_t)( - void *data, - const int id_core, - enum rte_keepalive_state core_state, - uint64_t last_seen - ); - -/** - * Keepalive state structure. - * @internal - */ -struct rte_keepalive; - -/** - * Initialise keepalive sub-system. - * @param callback - * Function called upon detection of a dead core. - * @param data - * Data pointer to be passed to function callback. - * @return - * Keepalive structure success, NULL on failure. - */ -struct rte_keepalive *rte_keepalive_create( - rte_keepalive_failure_callback_t callback, - void *data); - -/** - * Checks & handles keepalive state of monitored cores. - * @param *ptr_timer Triggering timer (unused) - * @param *ptr_data Data pointer (keepalive structure) - */ -void rte_keepalive_dispatch_pings(void *ptr_timer, void *ptr_data); - -/** - * Registers a core for keepalive checks. - * @param *keepcfg - * Keepalive structure pointer - * @param id_core - * ID number of core to register. - */ -void rte_keepalive_register_core(struct rte_keepalive *keepcfg, - const int id_core); - -/** - * Per-core keepalive check. - * @param *keepcfg - * Keepalive structure pointer - * - * This function needs to be called from within the main process loop of - * the LCore to be checked. - */ -void -rte_keepalive_mark_alive(struct rte_keepalive *keepcfg); - -/** - * Per-core sleep-time indication. - * @param *keepcfg - * Keepalive structure pointer - * - * If CPU idling is enabled, this function needs to be called from within - * the main process loop of the LCore going to sleep, in order to avoid - * the LCore being mis-detected as dead. - */ -void -rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg); - -/** - * Registers a 'live core' callback. - * - * The complement of the 'dead core' callback. This is called when a - * core is known to be alive, and is intended for cases when an app - * needs to know 'liveness' beyond just knowing when a core has died. - * - * @param *keepcfg - * Keepalive structure pointer - * @param callback - * Function called upon detection of a dead core. - * @param data - * Data pointer to be passed to function callback. - */ -void -rte_keepalive_register_relay_callback(struct rte_keepalive *keepcfg, - rte_keepalive_relay_callback_t callback, - void *data); - -#endif /* _KEEPALIVE_H_ */ diff --git a/lib/librte_eal/common/include/rte_launch.h b/lib/librte_eal/common/include/rte_launch.h deleted file mode 100644 index 06a671752a..0000000000 --- a/lib/librte_eal/common/include/rte_launch.h +++ /dev/null @@ -1,148 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_LAUNCH_H_ -#define _RTE_LAUNCH_H_ - -/** - * @file - * - * Launch tasks on other lcores - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * State of an lcore. - */ -enum rte_lcore_state_t { - WAIT, /**< waiting a new command */ - RUNNING, /**< executing command */ - FINISHED, /**< command executed */ -}; - -/** - * Definition of a remote launch function. - */ -typedef int (lcore_function_t)(void *); - -/** - * Launch a function on another lcore. - * - * To be executed on the MASTER lcore only. - * - * Sends a message to a slave lcore (identified by the slave_id) that - * is in the WAIT state (this is true after the first call to - * rte_eal_init()). This can be checked by first calling - * rte_eal_wait_lcore(slave_id). - * - * When the remote lcore receives the message, it switches to - * the RUNNING state, then calls the function f with argument arg. Once the - * execution is done, the remote lcore switches to a FINISHED state and - * the return value of f is stored in a local variable to be read using - * rte_eal_wait_lcore(). - * - * The MASTER lcore returns as soon as the message is sent and knows - * nothing about the completion of f. - * - * Note: This function is not designed to offer optimum - * performance. It is just a practical way to launch a function on - * another lcore at initialization time. - * - * @param f - * The function to be called. - * @param arg - * The argument for the function. - * @param slave_id - * The identifier of the lcore on which the function should be executed. - * @return - * - 0: Success. Execution of function f started on the remote lcore. - * - (-EBUSY): The remote lcore is not in a WAIT state. - */ -int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id); - -/** - * This enum indicates whether the master core must execute the handler - * launched on all logical cores. - */ -enum rte_rmt_call_master_t { - SKIP_MASTER = 0, /**< lcore handler not executed by master core. */ - CALL_MASTER, /**< lcore handler executed by master core. */ -}; - -/** - * Launch a function on all lcores. - * - * Check that each SLAVE lcore is in a WAIT state, then call - * rte_eal_remote_launch() for each lcore. - * - * @param f - * The function to be called. - * @param arg - * The argument for the function. - * @param call_master - * If call_master set to SKIP_MASTER, the MASTER lcore does not call - * the function. If call_master is set to CALL_MASTER, the function - * is also called on master before returning. In any case, the master - * lcore returns as soon as it finished its job and knows nothing - * about the completion of f on the other lcores. - * @return - * - 0: Success. Execution of function f started on all remote lcores. - * - (-EBUSY): At least one remote lcore is not in a WAIT state. In this - * case, no message is sent to any of the lcores. - */ -int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg, - enum rte_rmt_call_master_t call_master); - -/** - * Get the state of the lcore identified by slave_id. - * - * To be executed on the MASTER lcore only. - * - * @param slave_id - * The identifier of the lcore. - * @return - * The state of the lcore. - */ -enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id); - -/** - * Wait until an lcore finishes its job. - * - * To be executed on the MASTER lcore only. - * - * If the slave lcore identified by the slave_id is in a FINISHED state, - * switch to the WAIT state. If the lcore is in RUNNING state, wait until - * the lcore finishes its job and moves to the FINISHED state. - * - * @param slave_id - * The identifier of the lcore. - * @return - * - 0: If the lcore identified by the slave_id is in a WAIT state. - * - The value that was returned by the previous remote launch - * function call if the lcore identified by the slave_id was in a - * FINISHED or RUNNING state. In this case, it changes the state - * of the lcore to WAIT. - */ -int rte_eal_wait_lcore(unsigned slave_id); - -/** - * Wait until all lcores finish their jobs. - * - * To be executed on the MASTER lcore only. Issue an - * rte_eal_wait_lcore() for every lcore. The return values are - * ignored. - * - * After a call to rte_eal_mp_wait_lcore(), the caller can assume - * that all slave lcores are in a WAIT state. - */ -void rte_eal_mp_wait_lcore(void); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_LAUNCH_H_ */ diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h deleted file mode 100644 index 476b8ef3a7..0000000000 --- a/lib/librte_eal/common/include/rte_lcore.h +++ /dev/null @@ -1,289 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_LCORE_H_ -#define _RTE_LCORE_H_ - -/** - * @file - * - * API for lcore and socket manipulation - * - */ -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define LCORE_ID_ANY UINT32_MAX /**< Any lcore. */ - -RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per thread "lcore id". */ -RTE_DECLARE_PER_LCORE(rte_cpuset_t, _cpuset); /**< Per thread "cpuset". */ - -/** - * Get a lcore's role. - * - * @param lcore_id - * The identifier of the lcore, which MUST be between 0 and RTE_MAX_LCORE-1. - * @return - * The role of the lcore. - */ -enum rte_lcore_role_t rte_eal_lcore_role(unsigned int lcore_id); - -/** - * Return the Application thread ID of the execution unit. - * - * Note: in most cases the lcore id returned here will also correspond - * to the processor id of the CPU on which the thread is pinned, this - * will not be the case if the user has explicitly changed the thread to - * core affinities using --lcores EAL argument e.g. --lcores '(0-3)@10' - * to run threads with lcore IDs 0, 1, 2 and 3 on physical core 10.. - * - * @return - * Logical core ID (in EAL thread) or LCORE_ID_ANY (in non-EAL thread) - */ -static inline unsigned -rte_lcore_id(void) -{ - return RTE_PER_LCORE(_lcore_id); -} - -/** - * Get the id of the master lcore - * - * @return - * the id of the master lcore - */ -unsigned int rte_get_master_lcore(void); - -/** - * Return the number of execution units (lcores) on the system. - * - * @return - * the number of execution units (lcores) on the system. - */ -unsigned int rte_lcore_count(void); - -/** - * Return the index of the lcore starting from zero. - * - * When option -c or -l is given, the index corresponds - * to the order in the list. - * For example: - * -c 0x30, lcore 4 has index 0, and 5 has index 1. - * -l 22,18 lcore 22 has index 0, and 18 has index 1. - * - * @param lcore_id - * The targeted lcore, or -1 for the current one. - * @return - * The relative index, or -1 if not enabled. - */ -int rte_lcore_index(int lcore_id); - -/** - * Return the ID of the physical socket of the logical core we are - * running on. - * @return - * the ID of current lcoreid's physical socket - */ -unsigned int rte_socket_id(void); - -/** - * Return number of physical sockets detected on the system. - * - * Note that number of nodes may not be correspondent to their physical id's: - * for example, a system may report two socket id's, but the actual socket id's - * may be 0 and 8. - * - * @return - * the number of physical sockets as recognized by EAL - */ -unsigned int -rte_socket_count(void); - -/** - * Return socket id with a particular index. - * - * This will return socket id at a particular position in list of all detected - * physical socket id's. For example, on a machine with sockets [0, 8], passing - * 1 as a parameter will return 8. - * - * @param idx - * index of physical socket id to return - * - * @return - * - physical socket id as recognized by EAL - * - -1 on error, with errno set to EINVAL - */ -int -rte_socket_id_by_idx(unsigned int idx); - -/** - * Get the ID of the physical socket of the specified lcore - * - * @param lcore_id - * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. - * @return - * the ID of lcoreid's physical socket - */ -unsigned int -rte_lcore_to_socket_id(unsigned int lcore_id); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Return the id of the lcore on a socket starting from zero. - * - * @param lcore_id - * The targeted lcore, or -1 for the current one. - * @return - * The relative index, or -1 if not enabled. - */ -__rte_experimental -int -rte_lcore_to_cpu_id(int lcore_id); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice. - * - * Return the cpuset for a given lcore. - * @param lcore_id - * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. - * @return - * The cpuset of that lcore - */ -__rte_experimental -rte_cpuset_t -rte_lcore_cpuset(unsigned int lcore_id); - -/** - * Test if an lcore is enabled. - * - * @param lcore_id - * The identifier of the lcore, which MUST be between 0 and - * RTE_MAX_LCORE-1. - * @return - * True if the given lcore is enabled; false otherwise. - */ -int rte_lcore_is_enabled(unsigned int lcore_id); - -/** - * Get the next enabled lcore ID. - * - * @param i - * The current lcore (reference). - * @param skip_master - * If true, do not return the ID of the master lcore. - * @param wrap - * If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise, - * return RTE_MAX_LCORE. - * @return - * The next lcore_id or RTE_MAX_LCORE if not found. - */ -unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap); - -/** - * Macro to browse all running lcores. - */ -#define RTE_LCORE_FOREACH(i) \ - for (i = rte_get_next_lcore(-1, 0, 0); \ - i -#include -#include -#include -#include - -#include -#include -#include - -struct rte_log_dynamic_type; - -/** The rte_log structure. */ -struct rte_logs { - uint32_t type; /**< Bitfield with enabled logs. */ - uint32_t level; /**< Log level. */ - FILE *file; /**< Output file set by rte_openlog_stream, or NULL. */ - size_t dynamic_types_len; - struct rte_log_dynamic_type *dynamic_types; -}; - -/** Global log information */ -extern struct rte_logs rte_logs; - -/* SDK log type */ -#define RTE_LOGTYPE_EAL 0 /**< Log related to eal. */ -#define RTE_LOGTYPE_MALLOC 1 /**< Log related to malloc. */ -#define RTE_LOGTYPE_RING 2 /**< Log related to ring. */ -#define RTE_LOGTYPE_MEMPOOL 3 /**< Log related to mempool. */ -#define RTE_LOGTYPE_TIMER 4 /**< Log related to timers. */ -#define RTE_LOGTYPE_PMD 5 /**< Log related to poll mode driver. */ -#define RTE_LOGTYPE_HASH 6 /**< Log related to hash table. */ -#define RTE_LOGTYPE_LPM 7 /**< Log related to LPM. */ -#define RTE_LOGTYPE_KNI 8 /**< Log related to KNI. */ -#define RTE_LOGTYPE_ACL 9 /**< Log related to ACL. */ -#define RTE_LOGTYPE_POWER 10 /**< Log related to power. */ -#define RTE_LOGTYPE_METER 11 /**< Log related to QoS meter. */ -#define RTE_LOGTYPE_SCHED 12 /**< Log related to QoS port scheduler. */ -#define RTE_LOGTYPE_PORT 13 /**< Log related to port. */ -#define RTE_LOGTYPE_TABLE 14 /**< Log related to table. */ -#define RTE_LOGTYPE_PIPELINE 15 /**< Log related to pipeline. */ -#define RTE_LOGTYPE_MBUF 16 /**< Log related to mbuf. */ -#define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */ -#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */ -#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */ -#define RTE_LOGTYPE_GSO 20 /**< Log related to GSO. */ - -/* these log types can be used in an application */ -#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */ -#define RTE_LOGTYPE_USER2 25 /**< User-defined log type 2. */ -#define RTE_LOGTYPE_USER3 26 /**< User-defined log type 3. */ -#define RTE_LOGTYPE_USER4 27 /**< User-defined log type 4. */ -#define RTE_LOGTYPE_USER5 28 /**< User-defined log type 5. */ -#define RTE_LOGTYPE_USER6 29 /**< User-defined log type 6. */ -#define RTE_LOGTYPE_USER7 30 /**< User-defined log type 7. */ -#define RTE_LOGTYPE_USER8 31 /**< User-defined log type 8. */ - -/** First identifier for extended logs */ -#define RTE_LOGTYPE_FIRST_EXT_ID 32 - -/* Can't use 0, as it gives compiler warnings */ -#define RTE_LOG_EMERG 1U /**< System is unusable. */ -#define RTE_LOG_ALERT 2U /**< Action must be taken immediately. */ -#define RTE_LOG_CRIT 3U /**< Critical conditions. */ -#define RTE_LOG_ERR 4U /**< Error conditions. */ -#define RTE_LOG_WARNING 5U /**< Warning conditions. */ -#define RTE_LOG_NOTICE 6U /**< Normal but significant condition. */ -#define RTE_LOG_INFO 7U /**< Informational. */ -#define RTE_LOG_DEBUG 8U /**< Debug-level messages. */ - -/** - * Change the stream that will be used by the logging system. - * - * This can be done at any time. The f argument represents the stream - * to be used to send the logs. If f is NULL, the default output is - * used (stderr). - * - * @param f - * Pointer to the stream. - * @return - * - 0 on success. - * - Negative on error. - */ -int rte_openlog_stream(FILE *f); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Retrieve the stream used by the logging system (see rte_openlog_stream() - * to change it). - * - * @return - * Pointer to the stream. - */ -__rte_experimental -FILE *rte_log_get_stream(void); - -/** - * Set the global log level. - * - * After this call, logs with a level lower or equal than the level - * passed as argument will be displayed. - * - * @param level - * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). - */ -void rte_log_set_global_level(uint32_t level); - -/** - * Get the global log level. - * - * @return - * The current global log level. - */ -uint32_t rte_log_get_global_level(void); - -/** - * Get the log level for a given type. - * - * @param logtype - * The log type identifier. - * @return - * 0 on success, a negative value if logtype is invalid. - */ -int rte_log_get_level(uint32_t logtype); - -/** - * For a given `logtype`, check if a log with `loglevel` can be printed. - * - * @param logtype - * The log type identifier - * @param loglevel - * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). - * @return - * Returns 'true' if log can be printed and 'false' if it can't. - */ -__rte_experimental -bool rte_log_can_log(uint32_t logtype, uint32_t loglevel); - -/** - * Set the log level for a given type based on shell pattern. - * - * @param pattern - * The match pattern identifying the log type. - * @param level - * The level to be set. - * @return - * 0 on success, a negative value if level is invalid. - */ -int rte_log_set_level_pattern(const char *pattern, uint32_t level); - -/** - * Set the log level for a given type based on regular expression. - * - * @param regex - * The regular expression identifying the log type. - * @param level - * The level to be set. - * @return - * 0 on success, a negative value if level is invalid. - */ -int rte_log_set_level_regexp(const char *regex, uint32_t level); - -/** - * Set the log level for a given type. - * - * @param logtype - * The log type identifier. - * @param level - * The level to be set. - * @return - * 0 on success, a negative value if logtype or level is invalid. - */ -int rte_log_set_level(uint32_t logtype, uint32_t level); - -/** - * Get the current loglevel for the message being processed. - * - * Before calling the user-defined stream for logging, the log - * subsystem sets a per-lcore variable containing the loglevel and the - * logtype of the message being processed. This information can be - * accessed by the user-defined log output function through this - * function. - * - * @return - * The loglevel of the message being processed. - */ -int rte_log_cur_msg_loglevel(void); - -/** - * Get the current logtype for the message being processed. - * - * Before calling the user-defined stream for logging, the log - * subsystem sets a per-lcore variable containing the loglevel and the - * logtype of the message being processed. This information can be - * accessed by the user-defined log output function through this - * function. - * - * @return - * The logtype of the message being processed. - */ -int rte_log_cur_msg_logtype(void); - -/** - * Register a dynamic log type - * - * If a log is already registered with the same type, the returned value - * is the same than the previous one. - * - * @param name - * The string identifying the log type. - * @return - * - >0: success, the returned value is the log type identifier. - * - (-ENOMEM): cannot allocate memory. - */ -int rte_log_register(const char *name); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Register a dynamic log type and try to pick its level from EAL options - * - * rte_log_register() is called inside. If successful, the function tries - * to search for matching regexp in the list of EAL log level options and - * pick the level from the last matching entry. If nothing can be applied - * from the list, the level will be set to the user-defined default value. - * - * @param name - * Name for the log type to be registered - * @param level_def - * Fallback level to be set if the global list has no matching options - * @return - * - >=0: the newly registered log type - * - <0: rte_log_register() error value - */ -__rte_experimental -int rte_log_register_type_and_pick_level(const char *name, uint32_t level_def); - -/** - * Dump log information. - * - * Dump the global level and the registered log types. - * - * @param f - * The output stream where the dump should be sent. - */ -void rte_log_dump(FILE *f); - -/** - * Generates a log message. - * - * The message will be sent in the stream defined by the previous call - * to rte_openlog_stream(). - * - * The level argument determines if the log should be displayed or - * not, depending on the global rte_logs variable. - * - * The preferred alternative is the RTE_LOG() because it adds the - * level and type in the logged string. - * - * @param level - * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). - * @param logtype - * The log type, for example, RTE_LOGTYPE_EAL. - * @param format - * The format string, as in printf(3), followed by the variable arguments - * required by the format. - * @return - * - 0: Success. - * - Negative on error. - */ -int rte_log(uint32_t level, uint32_t logtype, const char *format, ...) -#ifdef __GNUC__ -#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) - __attribute__((cold)) -#endif -#endif - __rte_format_printf(3, 4); - -/** - * Generates a log message. - * - * The message will be sent in the stream defined by the previous call - * to rte_openlog_stream(). - * - * The level argument determines if the log should be displayed or - * not, depending on the global rte_logs variable. A trailing - * newline may be added if needed. - * - * The preferred alternative is the RTE_LOG() because it adds the - * level and type in the logged string. - * - * @param level - * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). - * @param logtype - * The log type, for example, RTE_LOGTYPE_EAL. - * @param format - * The format string, as in printf(3), followed by the variable arguments - * required by the format. - * @param ap - * The va_list of the variable arguments required by the format. - * @return - * - 0: Success. - * - Negative on error. - */ -int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap) - __rte_format_printf(3, 0); - -/** - * Generates a log message. - * - * The RTE_LOG() is a helper that prefixes the string with the log level - * and type, and call rte_log(). - * - * @param l - * Log level. A value between EMERG (1) and DEBUG (8). The short name is - * expanded by the macro, so it cannot be an integer value. - * @param t - * The log type, for example, EAL. The short name is expanded by the - * macro, so it cannot be an integer value. - * @param ... - * The fmt string, as in printf(3), followed by the variable arguments - * required by the format. - * @return - * - 0: Success. - * - Negative on error. - */ -#define RTE_LOG(l, t, ...) \ - rte_log(RTE_LOG_ ## l, \ - RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) - -/** - * Generates a log message for data path. - * - * Similar to RTE_LOG(), except that it is removed at compilation time - * if the RTE_LOG_DP_LEVEL configuration option is lower than the log - * level argument. - * - * @param l - * Log level. A value between EMERG (1) and DEBUG (8). The short name is - * expanded by the macro, so it cannot be an integer value. - * @param t - * The log type, for example, EAL. The short name is expanded by the - * macro, so it cannot be an integer value. - * @param ... - * The fmt string, as in printf(3), followed by the variable arguments - * required by the format. - * @return - * - 0: Success. - * - Negative on error. - */ -#define RTE_LOG_DP(l, t, ...) \ - (void)((RTE_LOG_ ## l <= RTE_LOG_DP_LEVEL) ? \ - rte_log(RTE_LOG_ ## l, \ - RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \ - 0) - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_LOG_H_ */ diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h deleted file mode 100644 index 42ca05182f..0000000000 --- a/lib/librte_eal/common/include/rte_malloc.h +++ /dev/null @@ -1,560 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2019 Intel Corporation - */ - -#ifndef _RTE_MALLOC_H_ -#define _RTE_MALLOC_H_ - -/** - * @file - * RTE Malloc. This library provides methods for dynamically allocating memory - * from hugepages. - */ - -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Structure to hold heap statistics obtained from rte_malloc_get_socket_stats function. - */ -struct rte_malloc_socket_stats { - size_t heap_totalsz_bytes; /**< Total bytes on heap */ - size_t heap_freesz_bytes; /**< Total free bytes on heap */ - size_t greatest_free_size; /**< Size in bytes of largest free block */ - unsigned free_count; /**< Number of free elements on heap */ - unsigned alloc_count; /**< Number of allocated elements on heap */ - size_t heap_allocsz_bytes; /**< Total allocated bytes on heap */ -}; - -/** - * This function allocates memory from the huge-page area of memory. The memory - * is not cleared. In NUMA systems, the memory allocated resides on the same - * NUMA socket as the core that calls this function. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param size - * Size (in bytes) to be allocated. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_malloc(const char *type, size_t size, unsigned align); - -/** - * Allocate zero'ed memory from the heap. - * - * Equivalent to rte_malloc() except that the memory zone is - * initialised with zeros. In NUMA systems, the memory allocated resides on the - * same NUMA socket as the core that calls this function. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param size - * Size (in bytes) to be allocated. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_zmalloc(const char *type, size_t size, unsigned align); - -/** - * Replacement function for calloc(), using huge-page memory. Memory area is - * initialised with zeros. In NUMA systems, the memory allocated resides on the - * same NUMA socket as the core that calls this function. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param num - * Number of elements to be allocated. - * @param size - * Size (in bytes) of a single element. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_calloc(const char *type, size_t num, size_t size, unsigned align); - -/** - * Replacement function for realloc(), using huge-page memory. Reserved area - * memory is resized, preserving contents. In NUMA systems, the new area - * may not reside on the same NUMA node as the old one. - * - * @param ptr - * Pointer to already allocated memory - * @param size - * Size (in bytes) of new area. If this is 0, memory is freed. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the reallocated memory. - */ -void * -rte_realloc(void *ptr, size_t size, unsigned int align); - -/** - * Replacement function for realloc(), using huge-page memory. Reserved area - * memory is resized, preserving contents. In NUMA systems, the new area - * resides on requested NUMA socket. - * - * @param ptr - * Pointer to already allocated memory - * @param size - * Size (in bytes) of new area. If this is 0, memory is freed. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @param socket - * NUMA socket to allocate memory on. - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the reallocated memory. - */ -__rte_experimental -void * -rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket); - -/** - * This function allocates memory from the huge-page area of memory. The memory - * is not cleared. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param size - * Size (in bytes) to be allocated. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @param socket - * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function - * will behave the same as rte_malloc(). - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_malloc_socket(const char *type, size_t size, unsigned align, int socket); - -/** - * Allocate zero'ed memory from the heap. - * - * Equivalent to rte_malloc() except that the memory zone is - * initialised with zeros. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param size - * Size (in bytes) to be allocated. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @param socket - * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function - * will behave the same as rte_zmalloc(). - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket); - -/** - * Replacement function for calloc(), using huge-page memory. Memory area is - * initialised with zeros. - * - * @param type - * A string identifying the type of allocated objects (useful for debug - * purposes, such as identifying the cause of a memory leak). Can be NULL. - * @param num - * Number of elements to be allocated. - * @param size - * Size (in bytes) of a single element. - * @param align - * If 0, the return is a pointer that is suitably aligned for any kind of - * variable (in the same manner as malloc()). - * Otherwise, the return is a pointer that is a multiple of *align*. In - * this case, it must obviously be a power of two. (Minimum alignment is the - * cacheline size, i.e. 64-bytes) - * @param socket - * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function - * will behave the same as rte_calloc(). - * @return - * - NULL on error. Not enough memory, or invalid arguments (size is 0, - * align is not a power of two). - * - Otherwise, the pointer to the allocated object. - */ -void * -rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket); - -/** - * Frees the memory space pointed to by the provided pointer. - * - * This pointer must have been returned by a previous call to - * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of - * rte_free() is undefined if the pointer does not match this requirement. - * - * If the pointer is NULL, the function does nothing. - * - * @param ptr - * The pointer to memory to be freed. - */ -void -rte_free(void *ptr); - -/** - * If malloc debug is enabled, check a memory block for header - * and trailer markers to indicate that all is well with the block. - * If size is non-null, also return the size of the block. - * - * @param ptr - * pointer to the start of a data block, must have been returned - * by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc() - * or rte_realloc() - * @param size - * if non-null, and memory block pointer is valid, returns the size - * of the memory block - * @return - * -1 on error, invalid pointer passed or header and trailer markers - * are missing or corrupted - * 0 on success - */ -int -rte_malloc_validate(const void *ptr, size_t *size); - -/** - * Get heap statistics for the specified heap. - * - * @note This function is not thread-safe with respect to - * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. - * - * @param socket - * An unsigned integer specifying the socket to get heap statistics for - * @param socket_stats - * A structure which provides memory to store statistics - * @return - * Null on error - * Pointer to structure storing statistics on success - */ -int -rte_malloc_get_socket_stats(int socket, - struct rte_malloc_socket_stats *socket_stats); - -/** - * Add memory chunk to a heap with specified name. - * - * @note Multiple memory chunks can be added to the same heap - * - * @note Before accessing this memory in other processes, it needs to be - * attached in each of those processes by calling - * ``rte_malloc_heap_memory_attach`` in each other process. - * - * @note Memory must be previously allocated for DPDK to be able to use it as a - * malloc heap. Failing to do so will result in undefined behavior, up to and - * including segmentation faults. - * - * @note Calling this function will erase any contents already present at the - * supplied memory address. - * - * @param heap_name - * Name of the heap to add memory chunk to - * @param va_addr - * Start of virtual area to add to the heap. Must be aligned by ``page_sz``. - * @param len - * Length of virtual area to add to the heap. Must be aligned by ``page_sz``. - * @param iova_addrs - * Array of page IOVA addresses corresponding to each page in this memory - * area. Can be NULL, in which case page IOVA addresses will be set to - * RTE_BAD_IOVA. - * @param n_pages - * Number of elements in the iova_addrs array. Ignored if ``iova_addrs`` - * is NULL. - * @param page_sz - * Page size of the underlying memory - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * EPERM - attempted to add memory to a reserved heap - * ENOSPC - no more space in internal config to store a new memory chunk - */ -__rte_experimental -int -rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len, - rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz); - -/** - * Remove memory chunk from heap with specified name. - * - * @note Memory chunk being removed must be the same as one that was added; - * partially removing memory chunks is not supported - * - * @note Memory area must not contain any allocated elements to allow its - * removal from the heap - * - * @note All other processes must detach from the memory chunk prior to it being - * removed from the heap. - * - * @param heap_name - * Name of the heap to remove memory from - * @param va_addr - * Virtual address to remove from the heap - * @param len - * Length of virtual area to remove from the heap - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * EPERM - attempted to remove memory from a reserved heap - * ENOENT - heap or memory chunk was not found - * EBUSY - memory chunk still contains data - */ -__rte_experimental -int -rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len); - -/** - * Attach to an already existing chunk of external memory in another process. - * - * @note This function must be called before any attempt is made to use an - * already existing external memory chunk. This function does *not* need to - * be called if a call to ``rte_malloc_heap_memory_add`` was made in the - * current process. - * - * @param heap_name - * Heap name to which this chunk of memory belongs - * @param va_addr - * Start address of memory chunk to attach to - * @param len - * Length of memory chunk to attach to - * @return - * 0 on successful attach - * -1 on unsuccessful attach, with rte_errno set to indicate cause for error: - * EINVAL - one of the parameters was invalid - * EPERM - attempted to attach memory to a reserved heap - * ENOENT - heap or memory chunk was not found - */ -__rte_experimental -int -rte_malloc_heap_memory_attach(const char *heap_name, void *va_addr, size_t len); - -/** - * Detach from a chunk of external memory in secondary process. - * - * @note This function must be called in before any attempt is made to remove - * external memory from the heap in another process. This function does *not* - * need to be called if a call to ``rte_malloc_heap_memory_remove`` will be - * called in current process. - * - * @param heap_name - * Heap name to which this chunk of memory belongs - * @param va_addr - * Start address of memory chunk to attach to - * @param len - * Length of memory chunk to attach to - * @return - * 0 on successful detach - * -1 on unsuccessful detach, with rte_errno set to indicate cause for error: - * EINVAL - one of the parameters was invalid - * EPERM - attempted to detach memory from a reserved heap - * ENOENT - heap or memory chunk was not found - */ -__rte_experimental -int -rte_malloc_heap_memory_detach(const char *heap_name, void *va_addr, size_t len); - -/** - * Creates a new empty malloc heap with a specified name. - * - * @note Heaps created via this call will automatically get assigned a unique - * socket ID, which can be found using ``rte_malloc_heap_get_socket()`` - * - * @param heap_name - * Name of the heap to create. - * - * @return - * - 0 on successful creation - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - ``heap_name`` was NULL, empty or too long - * EEXIST - heap by name of ``heap_name`` already exists - * ENOSPC - no more space in internal config to store a new heap - */ -__rte_experimental -int -rte_malloc_heap_create(const char *heap_name); - -/** - * Destroys a previously created malloc heap with specified name. - * - * @note This function will return a failure result if not all memory allocated - * from the heap has been freed back to the heap - * - * @note This function will return a failure result if not all memory segments - * were removed from the heap prior to its destruction - * - * @param heap_name - * Name of the heap to create. - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - ``heap_name`` was NULL, empty or too long - * ENOENT - heap by the name of ``heap_name`` was not found - * EPERM - attempting to destroy reserved heap - * EBUSY - heap still contains data - */ -__rte_experimental -int -rte_malloc_heap_destroy(const char *heap_name); - -/** - * Find socket ID corresponding to a named heap. - * - * @param name - * Heap name to find socket ID for - * @return - * Socket ID in case of success (a non-negative number) - * -1 in case of error, with rte_errno set to one of the following: - * EINVAL - ``name`` was NULL - * ENOENT - heap identified by the name ``name`` was not found - */ -__rte_experimental -int -rte_malloc_heap_get_socket(const char *name); - -/** - * Check if a given socket ID refers to externally allocated memory. - * - * @note Passing SOCKET_ID_ANY will return 0. - * - * @param socket_id - * Socket ID to check - * @return - * 1 if socket ID refers to externally allocated memory - * 0 if socket ID refers to internal DPDK memory - * -1 if socket ID is invalid - */ -__rte_experimental -int -rte_malloc_heap_socket_is_external(int socket_id); - -/** - * Dump statistics. - * - * Dump for the specified type to a file. If the type argument is - * NULL, all memory types will be dumped. - * - * @note This function is not thread-safe with respect to - * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. - * - * @param f - * A pointer to a file for output - * @param type - * A string identifying the type of objects to dump, or NULL - * to dump all objects. - */ -void -rte_malloc_dump_stats(FILE *f, const char *type); - -/** - * Dump contents of all malloc heaps to a file. - * - * @note This function is not thread-safe with respect to - * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. - * - * @param f - * A pointer to a file for output - */ -__rte_experimental -void -rte_malloc_dump_heaps(FILE *f); - -/** - * Set the maximum amount of allocated memory for this type. - * - * This is not yet implemented - * - * @param type - * A string identifying the type of allocated objects. - * @param max - * The maximum amount of allocated bytes for this type. - * @return - * - 0: Success. - * - (-1): Error. - */ -__rte_deprecated -int -rte_malloc_set_limit(const char *type, size_t max); - -/** - * Return the IO address of a virtual address obtained through - * rte_malloc - * - * @param addr - * Address obtained from a previous rte_malloc call - * @return - * RTE_BAD_IOVA on error - * otherwise return an address suitable for IO - */ -rte_iova_t -rte_malloc_virt2iova(const void *addr); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_MALLOC_H_ */ diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h deleted file mode 100644 index 3d8d0bd697..0000000000 --- a/lib/librte_eal/common/include/rte_memory.h +++ /dev/null @@ -1,784 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_MEMORY_H_ -#define _RTE_MEMORY_H_ - -/** - * @file - * - * Memory-related RTE API. - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -__extension__ -enum rte_page_sizes { - RTE_PGSIZE_4K = 1ULL << 12, - RTE_PGSIZE_64K = 1ULL << 16, - RTE_PGSIZE_256K = 1ULL << 18, - RTE_PGSIZE_2M = 1ULL << 21, - RTE_PGSIZE_16M = 1ULL << 24, - RTE_PGSIZE_256M = 1ULL << 28, - RTE_PGSIZE_512M = 1ULL << 29, - RTE_PGSIZE_1G = 1ULL << 30, - RTE_PGSIZE_4G = 1ULL << 32, - RTE_PGSIZE_16G = 1ULL << 34, -}; - -#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ - -/** - * Physical memory segment descriptor. - */ -#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0) -/**< Prevent this segment from being freed back to the OS. */ -struct rte_memseg { - RTE_STD_C11 - union { - phys_addr_t phys_addr; /**< deprecated - Start physical address. */ - rte_iova_t iova; /**< Start IO address. */ - }; - RTE_STD_C11 - union { - void *addr; /**< Start virtual address. */ - uint64_t addr_64; /**< Makes sure addr is always 64 bits */ - }; - size_t len; /**< Length of the segment. */ - uint64_t hugepage_sz; /**< The pagesize of underlying memory */ - int32_t socket_id; /**< NUMA socket ID. */ - uint32_t nchannel; /**< Number of channels. */ - uint32_t nrank; /**< Number of ranks. */ - uint32_t flags; /**< Memseg-specific flags */ -} __rte_packed; - -/** - * memseg list is a special case as we need to store a bunch of other data - * together with the array itself. - */ -struct rte_memseg_list { - RTE_STD_C11 - union { - void *base_va; - /**< Base virtual address for this memseg list. */ - uint64_t addr_64; - /**< Makes sure addr is always 64-bits */ - }; - uint64_t page_sz; /**< Page size for all memsegs in this list. */ - int socket_id; /**< Socket ID for all memsegs in this list. */ - volatile uint32_t version; /**< version number for multiprocess sync. */ - size_t len; /**< Length of memory area covered by this memseg list. */ - unsigned int external; /**< 1 if this list points to external memory */ - unsigned int heap; /**< 1 if this list points to a heap */ - struct rte_fbarray memseg_arr; -}; - -/** - * Lock page in physical memory and prevent from swapping. - * - * @param virt - * The virtual address. - * @return - * 0 on success, negative on error. - */ -int rte_mem_lock_page(const void *virt); - -/** - * Get physical address of any mapped virtual address in the current process. - * It is found by browsing the /proc/self/pagemap special file. - * The page must be locked. - * - * @param virt - * The virtual address. - * @return - * The physical address or RTE_BAD_IOVA on error. - */ -phys_addr_t rte_mem_virt2phy(const void *virt); - -/** - * Get IO virtual address of any mapped virtual address in the current process. - * - * @note This function will not check internal page table. Instead, in IOVA as - * PA mode, it will fall back to getting real physical address (which may - * not match the expected IOVA, such as what was specified for external - * memory). - * - * @param virt - * The virtual address. - * @return - * The IO address or RTE_BAD_IOVA on error. - */ -rte_iova_t rte_mem_virt2iova(const void *virt); - -/** - * Get virtual memory address corresponding to iova address. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @param iova - * The iova address. - * @return - * Virtual address corresponding to iova address (or NULL if address does not - * exist within DPDK memory map). - */ -__rte_experimental -void * -rte_mem_iova2virt(rte_iova_t iova); - -/** - * Get memseg to which a particular virtual address belongs. - * - * @param virt - * The virtual address. - * @param msl - * The memseg list in which to look up based on ``virt`` address - * (can be NULL). - * @return - * Memseg pointer on success, or NULL on error. - */ -__rte_experimental -struct rte_memseg * -rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl); - -/** - * Get memseg list corresponding to virtual memory address. - * - * @param virt - * The virtual address. - * @return - * Memseg list to which this virtual address belongs to. - */ -__rte_experimental -struct rte_memseg_list * -rte_mem_virt2memseg_list(const void *virt); - -/** - * Memseg walk function prototype. - * - * Returning 0 will continue walk - * Returning 1 will stop the walk - * Returning -1 will stop the walk and report error - */ -typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl, - const struct rte_memseg *ms, void *arg); - -/** - * Memseg contig walk function prototype. This will trigger a callback on every - * VA-contiguous area starting at memseg ``ms``, so total valid VA space at each - * callback call will be [``ms->addr``, ``ms->addr + len``). - * - * Returning 0 will continue walk - * Returning 1 will stop the walk - * Returning -1 will stop the walk and report error - */ -typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl, - const struct rte_memseg *ms, size_t len, void *arg); - -/** - * Memseg list walk function prototype. This will trigger a callback on every - * allocated memseg list. - * - * Returning 0 will continue walk - * Returning 1 will stop the walk - * Returning -1 will stop the walk and report error - */ -typedef int (*rte_memseg_list_walk_t)(const struct rte_memseg_list *msl, - void *arg); - -/** - * Walk list of all memsegs. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @note This function will also walk through externally allocated segments. It - * is up to the user to decide whether to skip through these segments. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_walk(rte_memseg_walk_t func, void *arg); - -/** - * Walk each VA-contiguous area. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @note This function will also walk through externally allocated segments. It - * is up to the user to decide whether to skip through these segments. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg); - -/** - * Walk each allocated memseg list. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @note This function will also walk through externally allocated segments. It - * is up to the user to decide whether to skip through these segments. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg); - -/** - * Walk list of all memsegs without performing any locking. - * - * @note This function does not perform any locking, and is only safe to call - * from within memory-related callback functions. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg); - -/** - * Walk each VA-contiguous area without performing any locking. - * - * @note This function does not perform any locking, and is only safe to call - * from within memory-related callback functions. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg); - -/** - * Walk each allocated memseg list without performing any locking. - * - * @note This function does not perform any locking, and is only safe to call - * from within memory-related callback functions. - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - * @return - * 0 if walked over the entire list - * 1 if stopped by the user - * -1 if user function reported error - */ -__rte_experimental -int -rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg); - -/** - * Return file descriptor associated with a particular memseg (if available). - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @note This returns an internal file descriptor. Performing any operations on - * this file descriptor is inherently dangerous, so it should be treated - * as read-only for all intents and purposes. - * - * @param ms - * A pointer to memseg for which to get file descriptor. - * - * @return - * Valid file descriptor in case of success. - * -1 in case of error, with ``rte_errno`` set to the following values: - * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg - * - ENODEV - ``ms`` fd is not available - * - ENOENT - ``ms`` is an unused segment - * - ENOTSUP - segment fd's are not supported - */ -__rte_experimental -int -rte_memseg_get_fd(const struct rte_memseg *ms); - -/** - * Return file descriptor associated with a particular memseg (if available). - * - * @note This function does not perform any locking, and is only safe to call - * from within memory-related callback functions. - * - * @note This returns an internal file descriptor. Performing any operations on - * this file descriptor is inherently dangerous, so it should be treated - * as read-only for all intents and purposes. - * - * @param ms - * A pointer to memseg for which to get file descriptor. - * - * @return - * Valid file descriptor in case of success. - * -1 in case of error, with ``rte_errno`` set to the following values: - * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg - * - ENODEV - ``ms`` fd is not available - * - ENOENT - ``ms`` is an unused segment - * - ENOTSUP - segment fd's are not supported - */ -__rte_experimental -int -rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms); - -/** - * Get offset into segment file descriptor associated with a particular memseg - * (if available). - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @param ms - * A pointer to memseg for which to get file descriptor. - * @param offset - * A pointer to offset value where the result will be stored. - * - * @return - * Valid file descriptor in case of success. - * -1 in case of error, with ``rte_errno`` set to the following values: - * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg - * - EINVAL - ``offset`` pointer was NULL - * - ENODEV - ``ms`` fd is not available - * - ENOENT - ``ms`` is an unused segment - * - ENOTSUP - segment fd's are not supported - */ -__rte_experimental -int -rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset); - -/** - * Get offset into segment file descriptor associated with a particular memseg - * (if available). - * - * @note This function does not perform any locking, and is only safe to call - * from within memory-related callback functions. - * - * @param ms - * A pointer to memseg for which to get file descriptor. - * @param offset - * A pointer to offset value where the result will be stored. - * - * @return - * Valid file descriptor in case of success. - * -1 in case of error, with ``rte_errno`` set to the following values: - * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg - * - EINVAL - ``offset`` pointer was NULL - * - ENODEV - ``ms`` fd is not available - * - ENOENT - ``ms`` is an unused segment - * - ENOTSUP - segment fd's are not supported - */ -__rte_experimental -int -rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, - size_t *offset); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Register external memory chunk with DPDK. - * - * @note Using this API is mutually exclusive with ``rte_malloc`` family of - * API's. - * - * @note This API will not perform any DMA mapping. It is expected that user - * will do that themselves. - * - * @note Before accessing this memory in other processes, it needs to be - * attached in each of those processes by calling ``rte_extmem_attach`` in - * each other process. - * - * @param va_addr - * Start of virtual area to register. Must be aligned by ``page_sz``. - * @param len - * Length of virtual area to register. Must be aligned by ``page_sz``. - * @param iova_addrs - * Array of page IOVA addresses corresponding to each page in this memory - * area. Can be NULL, in which case page IOVA addresses will be set to - * RTE_BAD_IOVA. - * @param n_pages - * Number of elements in the iova_addrs array. Ignored if ``iova_addrs`` - * is NULL. - * @param page_sz - * Page size of the underlying memory - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * EEXIST - memory chunk is already registered - * ENOSPC - no more space in internal config to store a new memory chunk - */ -__rte_experimental -int -rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[], - unsigned int n_pages, size_t page_sz); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Unregister external memory chunk with DPDK. - * - * @note Using this API is mutually exclusive with ``rte_malloc`` family of - * API's. - * - * @note This API will not perform any DMA unmapping. It is expected that user - * will do that themselves. - * - * @note Before calling this function, all other processes must call - * ``rte_extmem_detach`` to detach from the memory area. - * - * @param va_addr - * Start of virtual area to unregister - * @param len - * Length of virtual area to unregister - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * ENOENT - memory chunk was not found - */ -__rte_experimental -int -rte_extmem_unregister(void *va_addr, size_t len); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Attach to external memory chunk registered in another process. - * - * @note Using this API is mutually exclusive with ``rte_malloc`` family of - * API's. - * - * @note This API will not perform any DMA mapping. It is expected that user - * will do that themselves. - * - * @param va_addr - * Start of virtual area to register - * @param len - * Length of virtual area to register - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * ENOENT - memory chunk was not found - */ -__rte_experimental -int -rte_extmem_attach(void *va_addr, size_t len); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Detach from external memory chunk registered in another process. - * - * @note Using this API is mutually exclusive with ``rte_malloc`` family of - * API's. - * - * @note This API will not perform any DMA unmapping. It is expected that user - * will do that themselves. - * - * @param va_addr - * Start of virtual area to unregister - * @param len - * Length of virtual area to unregister - * - * @return - * - 0 on success - * - -1 in case of error, with rte_errno set to one of the following: - * EINVAL - one of the parameters was invalid - * ENOENT - memory chunk was not found - */ -__rte_experimental -int -rte_extmem_detach(void *va_addr, size_t len); - -/** - * Dump the physical memory layout to a file. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @param f - * A pointer to a file for output - */ -void rte_dump_physmem_layout(FILE *f); - -/** - * Get the total amount of available physical memory. - * - * @note This function read-locks the memory hotplug subsystem, and thus cannot - * be used within memory-related callback functions. - * - * @return - * The total amount of available physical memory in bytes. - */ -uint64_t rte_eal_get_physmem_size(void); - -/** - * Get the number of memory channels. - * - * @return - * The number of memory channels on the system. The value is 0 if unknown - * or not the same on all devices. - */ -unsigned rte_memory_get_nchannel(void); - -/** - * Get the number of memory ranks. - * - * @return - * The number of memory ranks on the system. The value is 0 if unknown or - * not the same on all devices. - */ -unsigned rte_memory_get_nrank(void); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Check if all currently allocated memory segments are compliant with - * supplied DMA address width. - * - * @param maskbits - * Address width to check against. - */ -__rte_experimental -int rte_mem_check_dma_mask(uint8_t maskbits); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Check if all currently allocated memory segments are compliant with - * supplied DMA address width. This function will use - * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying - * memory_hotplug_lock will not be acquired avoiding deadlock during - * memory initialization. - * - * This function is just for EAL core memory internal use. Drivers should - * use the previous rte_mem_check_dma_mask. - * - * @param maskbits - * Address width to check against. - */ -__rte_experimental -int rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits); - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Set dma mask to use once memory initialization is done. Previous functions - * rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be - * used safely until memory has been initialized. - */ -__rte_experimental -void rte_mem_set_dma_mask(uint8_t maskbits); - -/** - * Drivers based on uio will not load unless physical - * addresses are obtainable. It is only possible to get - * physical addresses when running as a privileged user. - * - * @return - * 1 if the system is able to obtain physical addresses. - * 0 if using DMA addresses through an IOMMU. - */ -int rte_eal_using_phys_addrs(void); - - -/** - * Enum indicating which kind of memory event has happened. Used by callbacks to - * distinguish between memory allocations and deallocations. - */ -enum rte_mem_event { - RTE_MEM_EVENT_ALLOC = 0, /**< Allocation event. */ - RTE_MEM_EVENT_FREE, /**< Deallocation event. */ -}; -#define RTE_MEM_EVENT_CALLBACK_NAME_LEN 64 -/**< maximum length of callback name */ - -/** - * Function typedef used to register callbacks for memory events. - */ -typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type, - const void *addr, size_t len, void *arg); - -/** - * Function used to register callbacks for memory events. - * - * @note callbacks will happen while memory hotplug subsystem is write-locked, - * therefore some functions (e.g. `rte_memseg_walk()`) will cause a - * deadlock when called from within such callbacks. - * - * @note mem event callbacks not being supported is an expected error condition, - * so user code needs to handle this situation. In these cases, return - * value will be -1, and rte_errno will be set to ENOTSUP. - * - * @param name - * Name associated with specified callback to be added to the list. - * - * @param clb - * Callback function pointer. - * - * @param arg - * Argument to pass to the callback. - * - * @return - * 0 on successful callback register - * -1 on unsuccessful callback register, with rte_errno value indicating - * reason for failure. - */ -__rte_experimental -int -rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb, - void *arg); - -/** - * Function used to unregister callbacks for memory events. - * - * @param name - * Name associated with specified callback to be removed from the list. - * - * @param arg - * Argument to look for among callbacks with specified callback name. - * - * @return - * 0 on successful callback unregister - * -1 on unsuccessful callback unregister, with rte_errno value indicating - * reason for failure. - */ -__rte_experimental -int -rte_mem_event_callback_unregister(const char *name, void *arg); - - -#define RTE_MEM_ALLOC_VALIDATOR_NAME_LEN 64 -/**< maximum length of alloc validator name */ -/** - * Function typedef used to register memory allocation validation callbacks. - * - * Returning 0 will allow allocation attempt to continue. Returning -1 will - * prevent allocation from succeeding. - */ -typedef int (*rte_mem_alloc_validator_t)(int socket_id, - size_t cur_limit, size_t new_len); - -/** - * @brief Register validator callback for memory allocations. - * - * Callbacks registered by this function will be called right before memory - * allocator is about to trigger allocation of more pages from the system if - * said allocation will bring total memory usage above specified limit on - * specified socket. User will be able to cancel pending allocation if callback - * returns -1. - * - * @note callbacks will happen while memory hotplug subsystem is write-locked, - * therefore some functions (e.g. `rte_memseg_walk()`) will cause a - * deadlock when called from within such callbacks. - * - * @note validator callbacks not being supported is an expected error condition, - * so user code needs to handle this situation. In these cases, return - * value will be -1, and rte_errno will be set to ENOTSUP. - * - * @param name - * Name associated with specified callback to be added to the list. - * - * @param clb - * Callback function pointer. - * - * @param socket_id - * Socket ID on which to watch for allocations. - * - * @param limit - * Limit above which to trigger callbacks. - * - * @return - * 0 on successful callback register - * -1 on unsuccessful callback register, with rte_errno value indicating - * reason for failure. - */ -__rte_experimental -int -rte_mem_alloc_validator_register(const char *name, - rte_mem_alloc_validator_t clb, int socket_id, size_t limit); - -/** - * @brief Unregister validator callback for memory allocations. - * - * @param name - * Name associated with specified callback to be removed from the list. - * - * @param socket_id - * Socket ID on which to watch for allocations. - * - * @return - * 0 on successful callback unregister - * -1 on unsuccessful callback unregister, with rte_errno value indicating - * reason for failure. - */ -__rte_experimental -int -rte_mem_alloc_validator_unregister(const char *name, int socket_id); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_MEMORY_H_ */ diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h deleted file mode 100644 index f478fa9e67..0000000000 --- a/lib/librte_eal/common/include/rte_memzone.h +++ /dev/null @@ -1,320 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_MEMZONE_H_ -#define _RTE_MEMZONE_H_ - -/** - * @file - * RTE Memzone - * - * The goal of the memzone allocator is to reserve contiguous - * portions of physical memory. These zones are identified by a name. - * - * The memzone descriptors are shared by all partitions and are - * located in a known place of physical memory. This zone is accessed - * using rte_eal_get_configuration(). The lookup (by name) of a - * memory zone can be done in any partition and returns the same - * physical address. - * - * A reserved memory zone cannot be unreserved. The reservation shall - * be done at initialization time only. - */ - -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */ -#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */ -#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */ -#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */ -#define RTE_MEMZONE_256KB 0x00010000 /**< Use 256KB pages. */ -#define RTE_MEMZONE_256MB 0x00020000 /**< Use 256MB pages. */ -#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */ -#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */ -#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */ -#define RTE_MEMZONE_IOVA_CONTIG 0x00100000 /**< Ask for IOVA-contiguous memzone. */ - -/** - * A structure describing a memzone, which is a contiguous portion of - * physical memory identified by a name. - */ -struct rte_memzone { - -#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/ - char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */ - - RTE_STD_C11 - union { - phys_addr_t phys_addr; /**< deprecated - Start physical address. */ - rte_iova_t iova; /**< Start IO address. */ - }; - RTE_STD_C11 - union { - void *addr; /**< Start virtual address. */ - uint64_t addr_64; /**< Makes sure addr is always 64-bits */ - }; - size_t len; /**< Length of the memzone. */ - - uint64_t hugepage_sz; /**< The page size of underlying memory */ - - int32_t socket_id; /**< NUMA socket ID. */ - - uint32_t flags; /**< Characteristics of this memzone. */ -} __attribute__((__packed__)); - -/** - * Reserve a portion of physical memory. - * - * This function reserves some memory and returns a pointer to a - * correctly filled memzone descriptor. If the allocation cannot be - * done, return NULL. - * - * @note Reserving memzones with len set to 0 will only attempt to allocate - * memzones from memory that is already available. It will not trigger any - * new allocations. - * - * @note: When reserving memzones with len set to 0, it is preferable to also - * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but - * will likely not yield expected results. Specifically, the resulting memzone - * may not necessarily be the biggest memzone available, but rather biggest - * memzone available on socket id corresponding to an lcore from which - * reservation was called. - * - * @param name - * The name of the memzone. If it already exists, the function will - * fail and return NULL. - * @param len - * The size of the memory to be reserved. If it - * is 0, the biggest contiguous zone will be reserved. - * @param socket_id - * The socket identifier in the case of - * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA - * constraint for the reserved zone. - * @param flags - * The flags parameter is used to request memzones to be - * taken from specifically sized hugepages. - * - RTE_MEMZONE_2MB - Reserved from 2MB pages - * - RTE_MEMZONE_1GB - Reserved from 1GB pages - * - RTE_MEMZONE_16MB - Reserved from 16MB pages - * - RTE_MEMZONE_16GB - Reserved from 16GB pages - * - RTE_MEMZONE_256KB - Reserved from 256KB pages - * - RTE_MEMZONE_256MB - Reserved from 256MB pages - * - RTE_MEMZONE_512MB - Reserved from 512MB pages - * - RTE_MEMZONE_4GB - Reserved from 4GB pages - * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if - * the requested page size is unavailable. - * If this flag is not set, the function - * will return error on an unavailable size - * request. - * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. - * This option should be used when allocating - * memory intended for hardware rings etc. - * @return - * A pointer to a correctly-filled read-only memzone descriptor, or NULL - * on error. - * On error case, rte_errno will be set appropriately: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone - * - EINVAL - invalid parameters - */ -const struct rte_memzone *rte_memzone_reserve(const char *name, - size_t len, int socket_id, - unsigned flags); - -/** - * Reserve a portion of physical memory with alignment on a specified - * boundary. - * - * This function reserves some memory with alignment on a specified - * boundary, and returns a pointer to a correctly filled memzone - * descriptor. If the allocation cannot be done or if the alignment - * is not a power of 2, returns NULL. - * - * @note Reserving memzones with len set to 0 will only attempt to allocate - * memzones from memory that is already available. It will not trigger any - * new allocations. - * - * @note: When reserving memzones with len set to 0, it is preferable to also - * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but - * will likely not yield expected results. Specifically, the resulting memzone - * may not necessarily be the biggest memzone available, but rather biggest - * memzone available on socket id corresponding to an lcore from which - * reservation was called. - * - * @param name - * The name of the memzone. If it already exists, the function will - * fail and return NULL. - * @param len - * The size of the memory to be reserved. If it - * is 0, the biggest contiguous zone will be reserved. - * @param socket_id - * The socket identifier in the case of - * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA - * constraint for the reserved zone. - * @param flags - * The flags parameter is used to request memzones to be - * taken from specifically sized hugepages. - * - RTE_MEMZONE_2MB - Reserved from 2MB pages - * - RTE_MEMZONE_1GB - Reserved from 1GB pages - * - RTE_MEMZONE_16MB - Reserved from 16MB pages - * - RTE_MEMZONE_16GB - Reserved from 16GB pages - * - RTE_MEMZONE_256KB - Reserved from 256KB pages - * - RTE_MEMZONE_256MB - Reserved from 256MB pages - * - RTE_MEMZONE_512MB - Reserved from 512MB pages - * - RTE_MEMZONE_4GB - Reserved from 4GB pages - * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if - * the requested page size is unavailable. - * If this flag is not set, the function - * will return error on an unavailable size - * request. - * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. - * This option should be used when allocating - * memory intended for hardware rings etc. - * @param align - * Alignment for resulting memzone. Must be a power of 2. - * @return - * A pointer to a correctly-filled read-only memzone descriptor, or NULL - * on error. - * On error case, rte_errno will be set appropriately: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone - * - EINVAL - invalid parameters - */ -const struct rte_memzone *rte_memzone_reserve_aligned(const char *name, - size_t len, int socket_id, - unsigned flags, unsigned align); - -/** - * Reserve a portion of physical memory with specified alignment and - * boundary. - * - * This function reserves some memory with specified alignment and - * boundary, and returns a pointer to a correctly filled memzone - * descriptor. If the allocation cannot be done or if the alignment - * or boundary are not a power of 2, returns NULL. - * Memory buffer is reserved in a way, that it wouldn't cross specified - * boundary. That implies that requested length should be less or equal - * then boundary. - * - * @note Reserving memzones with len set to 0 will only attempt to allocate - * memzones from memory that is already available. It will not trigger any - * new allocations. - * - * @note: When reserving memzones with len set to 0, it is preferable to also - * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but - * will likely not yield expected results. Specifically, the resulting memzone - * may not necessarily be the biggest memzone available, but rather biggest - * memzone available on socket id corresponding to an lcore from which - * reservation was called. - * - * @param name - * The name of the memzone. If it already exists, the function will - * fail and return NULL. - * @param len - * The size of the memory to be reserved. If it - * is 0, the biggest contiguous zone will be reserved. - * @param socket_id - * The socket identifier in the case of - * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA - * constraint for the reserved zone. - * @param flags - * The flags parameter is used to request memzones to be - * taken from specifically sized hugepages. - * - RTE_MEMZONE_2MB - Reserved from 2MB pages - * - RTE_MEMZONE_1GB - Reserved from 1GB pages - * - RTE_MEMZONE_16MB - Reserved from 16MB pages - * - RTE_MEMZONE_16GB - Reserved from 16GB pages - * - RTE_MEMZONE_256KB - Reserved from 256KB pages - * - RTE_MEMZONE_256MB - Reserved from 256MB pages - * - RTE_MEMZONE_512MB - Reserved from 512MB pages - * - RTE_MEMZONE_4GB - Reserved from 4GB pages - * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if - * the requested page size is unavailable. - * If this flag is not set, the function - * will return error on an unavailable size - * request. - * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. - * This option should be used when allocating - * memory intended for hardware rings etc. - * @param align - * Alignment for resulting memzone. Must be a power of 2. - * @param bound - * Boundary for resulting memzone. Must be a power of 2 or zero. - * Zero value implies no boundary condition. - * @return - * A pointer to a correctly-filled read-only memzone descriptor, or NULL - * on error. - * On error case, rte_errno will be set appropriately: - * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone - * - EINVAL - invalid parameters - */ -const struct rte_memzone *rte_memzone_reserve_bounded(const char *name, - size_t len, int socket_id, - unsigned flags, unsigned align, unsigned bound); - -/** - * Free a memzone. - * - * @param mz - * A pointer to the memzone - * @return - * -EINVAL - invalid parameter. - * 0 - success - */ -int rte_memzone_free(const struct rte_memzone *mz); - -/** - * Lookup for a memzone. - * - * Get a pointer to a descriptor of an already reserved memory - * zone identified by the name given as an argument. - * - * @param name - * The name of the memzone. - * @return - * A pointer to a read-only memzone descriptor. - */ -const struct rte_memzone *rte_memzone_lookup(const char *name); - -/** - * Dump all reserved memzones to a file. - * - * @param f - * A pointer to a file for output - */ -void rte_memzone_dump(FILE *f); - -/** - * Walk list of all memzones - * - * @param func - * Iterator function - * @param arg - * Argument passed to iterator - */ -void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *arg), - void *arg); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_MEMZONE_H_ */ diff --git a/lib/librte_eal/common/include/rte_option.h b/lib/librte_eal/common/include/rte_option.h deleted file mode 100644 index 7ad65a4eb4..0000000000 --- a/lib/librte_eal/common/include/rte_option.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation. - */ - -#ifndef __INCLUDE_RTE_OPTION_H__ -#define __INCLUDE_RTE_OPTION_H__ - -/** - * @file - * - * This API offers the ability to register options to the EAL command line and - * map those options to functions that will be executed at the end of EAL - * initialization. These options will be available as part of the EAL command - * line of applications and are dynamically managed. - * - * This is used primarily by DPDK libraries offering command line options. - * Currently, this API is limited to registering options without argument. - * - * The register API can be used to resolve circular dependency issues - * between EAL and the library. The library uses EAL, but is also initialized - * by EAL. Hence, EAL depends on the init function of the library. The API - * introduced in rte_option allows us to register the library init with EAL - * (passing a function pointer) and avoid the circular dependency. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -typedef int (*rte_option_cb)(void); - -/** - * Structure describing an EAL command line option dynamically registered. - * - * Common EAL options are mostly statically defined. - * Some libraries need additional options to be dynamically added. - * This structure describes such options. - */ -struct rte_option { - TAILQ_ENTRY(rte_option) next; /**< Next entry in the list. */ - const char *name; /**< The option name. */ - const char *usage; /**< Option summary string. */ - rte_option_cb cb; /**< Function called when option is used. */ - int enabled; /**< Set when the option is used. */ -}; - -/** - * @warning - * @b EXPERIMENTAL: this API may change without prior notice - * - * Register an option to the EAL command line. - * When recognized, the associated function will be executed at the end of EAL - * initialization. - * - * The associated structure must be available the whole time this option is - * registered (i.e. not stack memory). - * - * @param opt - * Structure describing the option to parse. - * - * @return - * 0 on success, <0 otherwise. - */ -__rte_experimental -int -rte_option_register(struct rte_option *opt); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h b/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h deleted file mode 100644 index e12c22081f..0000000000 --- a/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_PCI_DEV_DEFS_H_ -#define _RTE_PCI_DEV_DEFS_H_ - -/* interrupt mode */ -enum rte_intr_mode { - RTE_INTR_MODE_NONE = 0, - RTE_INTR_MODE_LEGACY, - RTE_INTR_MODE_MSI, - RTE_INTR_MODE_MSIX -}; - -#endif /* _RTE_PCI_DEV_DEFS_H_ */ diff --git a/lib/librte_eal/common/include/rte_pci_dev_features.h b/lib/librte_eal/common/include/rte_pci_dev_features.h deleted file mode 100644 index 6104123d27..0000000000 --- a/lib/librte_eal/common/include/rte_pci_dev_features.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_PCI_DEV_FEATURES_H -#define _RTE_PCI_DEV_FEATURES_H - -#include - -#define RTE_INTR_MODE_NONE_NAME "none" -#define RTE_INTR_MODE_LEGACY_NAME "legacy" -#define RTE_INTR_MODE_MSI_NAME "msi" -#define RTE_INTR_MODE_MSIX_NAME "msix" - -#endif diff --git a/lib/librte_eal/common/include/rte_per_lcore.h b/lib/librte_eal/common/include/rte_per_lcore.h deleted file mode 100644 index eaedf0cb37..0000000000 --- a/lib/librte_eal/common/include/rte_per_lcore.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_PER_LCORE_H_ -#define _RTE_PER_LCORE_H_ - -/** - * @file - * - * Per-lcore variables in RTE - * - * This file defines an API for instantiating per-lcore "global - * variables" that are environment-specific. Note that in all - * environments, a "shared variable" is the default when you use a - * global variable. - * - * Parts of this are execution environment specific. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/** - * Macro to define a per lcore variable "var" of type "type", don't - * use keywords like "static" or "volatile" in type, just prefix the - * whole macro. - */ -#define RTE_DEFINE_PER_LCORE(type, name) \ - __thread __typeof__(type) per_lcore_##name - -/** - * Macro to declare an extern per lcore variable "var" of type "type" - */ -#define RTE_DECLARE_PER_LCORE(type, name) \ - extern __thread __typeof__(type) per_lcore_##name - -/** - * Read/write the per-lcore variable value - */ -#define RTE_PER_LCORE(name) (per_lcore_##name) - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_PER_LCORE_H_ */ diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h deleted file mode 100644 index 2b30ec85c1..0000000000 --- a/lib/librte_eal/common/include/rte_random.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_RANDOM_H_ -#define _RTE_RANDOM_H_ - -/** - * @file - * - * Pseudo-random Generators in RTE - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#include - -/** - * Seed the pseudo-random generator. - * - * The generator is automatically seeded by the EAL init with a timer - * value. It may need to be re-seeded by the user with a real random - * value. - * - * This function is not multi-thread safe in regards to other - * rte_srand() calls, nor is it in relation to concurrent rte_rand() - * calls. - * - * @param seedval - * The value of the seed. - */ -void -rte_srand(uint64_t seedval); - -/** - * Get a pseudo-random value. - * - * The generator is not cryptographically secure. - * - * If called from lcore threads, this function is thread-safe. - * - * @return - * A pseudo-random value between 0 and (1<<64)-1. - */ -uint64_t -rte_rand(void); - -/** - * Generates a pseudo-random number with an upper bound. - * - * This function returns an uniformly distributed (unbiased) random - * number less than a user-specified maximum value. - * - * If called from lcore threads, this function is thread-safe. - * - * @param upper_bound - * The upper bound of the generated number. - * @return - * A pseudo-random value between 0 and (upper_bound-1). - */ -__rte_experimental -uint64_t -rte_rand_max(uint64_t upper_bound); - -#ifdef __cplusplus -} -#endif - - -#endif /* _RTE_RANDOM_H_ */ diff --git a/lib/librte_eal/common/include/rte_reciprocal.h b/lib/librte_eal/common/include/rte_reciprocal.h deleted file mode 100644 index 63e16fde0a..0000000000 --- a/lib/librte_eal/common/include/rte_reciprocal.h +++ /dev/null @@ -1,90 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Cavium, Inc - */ -/* - * Reciprocal divide - * - * Used with permission from original authors - * Hannes Frederic Sowa and Daniel Borkmann - * - * This algorithm is based on the paper "Division by Invariant - * Integers Using Multiplication" by Torbjörn Granlund and Peter - * L. Montgomery. - * - * The assembler implementation from Agner Fog, which this code is - * based on, can be found here: - * http://www.agner.org/optimize/asmlib.zip - * - * This optimization for A/B is helpful if the divisor B is mostly - * runtime invariant. The reciprocal of B is calculated in the - * slow-path with reciprocal_value(). The fast-path can then just use - * a much faster multiplication operation with a variable dividend A - * to calculate the division A/B. - */ - -#ifndef _RTE_RECIPROCAL_H_ -#define _RTE_RECIPROCAL_H_ - -#include - -struct rte_reciprocal { - uint32_t m; - uint8_t sh1, sh2; -}; - -struct rte_reciprocal_u64 { - uint64_t m; - uint8_t sh1, sh2; -}; - -static inline uint32_t rte_reciprocal_divide(uint32_t a, struct rte_reciprocal R) -{ - uint32_t t = (uint32_t)(((uint64_t)a * R.m) >> 32); - - return (t + ((a - t) >> R.sh1)) >> R.sh2; -} - -static __rte_always_inline uint64_t -mullhi_u64(uint64_t x, uint64_t y) -{ -#ifdef __SIZEOF_INT128__ - __uint128_t xl = x; - __uint128_t rl = xl * y; - - return (rl >> 64); -#else - uint64_t u0, u1, v0, v1, k, t; - uint64_t w1, w2; - uint64_t whi; - - u1 = x >> 32; u0 = x & 0xFFFFFFFF; - v1 = y >> 32; v0 = y & 0xFFFFFFFF; - - t = u0*v0; - k = t >> 32; - - t = u1*v0 + k; - w1 = t & 0xFFFFFFFF; - w2 = t >> 32; - - t = u0*v1 + w1; - k = t >> 32; - - whi = u1*v1 + w2 + k; - - return whi; -#endif -} - -static __rte_always_inline uint64_t -rte_reciprocal_divide_u64(uint64_t a, const struct rte_reciprocal_u64 *R) -{ - uint64_t t = mullhi_u64(a, R->m); - - return (t + ((a - t) >> R->sh1)) >> R->sh2; -} - -struct rte_reciprocal rte_reciprocal_value(uint32_t d); -struct rte_reciprocal_u64 rte_reciprocal_value_u64(uint64_t d); - -#endif /* _RTE_RECIPROCAL_H_ */ diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h deleted file mode 100644 index d8701dd4cf..0000000000 --- a/lib/librte_eal/common/include/rte_service.h +++ /dev/null @@ -1,418 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#ifndef _RTE_SERVICE_H_ -#define _RTE_SERVICE_H_ - -/** - * @file - * - * Service functions - * - * The service functionality provided by this header allows a DPDK component - * to indicate that it requires a function call in order for it to perform - * its processing. - * - * An example usage of this functionality would be a component that registers - * a service to perform a particular packet processing duty: for example the - * eventdev software PMD. At startup the application requests all services - * that have been registered, and the cores in the service-coremask run the - * required services. The EAL removes these number of cores from the available - * runtime cores, and dedicates them to performing service-core workloads. The - * application has access to the remaining lcores as normal. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -#include -#include - -#define RTE_SERVICE_NAME_MAX 32 - -/* Capabilities of a service. - * - * Use the *rte_service_probe_capability* function to check if a service is - * capable of a specific capability. - */ -/** When set, the service is capable of having multiple threads run it at the - * same time. - */ -#define RTE_SERVICE_CAP_MT_SAFE (1 << 0) - -/** - * Return the number of services registered. - * - * The number of services registered can be passed to *rte_service_get_by_id*, - * enabling the application to retrieve the specification of each service. - * - * @return The number of services registered. - */ -uint32_t rte_service_get_count(void); - -/** - * Return the id of a service by name. - * - * This function provides the id of the service using the service name as - * lookup key. The service id is to be passed to other functions in the - * rte_service_* API. - * - * Example usage: - * @code - * uint32_t service_id; - * int32_t ret = rte_service_get_by_name("service_X", &service_id); - * if (ret) { - * // handle error - * } - * @endcode - * - * @param name The name of the service to retrieve - * @param[out] service_id A pointer to a uint32_t, to be filled in with the id. - * @retval 0 Success. The service id is provided in *service_id*. - * @retval -EINVAL Null *service_id* pointer provided - * @retval -ENODEV No such service registered - */ -int32_t rte_service_get_by_name(const char *name, uint32_t *service_id); - -/** - * Return the name of the service. - * - * @return A pointer to the name of the service. The returned pointer remains - * in ownership of the service, and the application must not free it. - */ -const char *rte_service_get_name(uint32_t id); - -/** - * Check if a service has a specific capability. - * - * This function returns if *service* has implements *capability*. - * See RTE_SERVICE_CAP_* defines for a list of valid capabilities. - * @retval 1 Capability supported by this service instance - * @retval 0 Capability not supported by this service instance - */ -int32_t rte_service_probe_capability(uint32_t id, uint32_t capability); - -/** - * Map or unmap a lcore to a service. - * - * Each core can be added or removed from running a specific service. This - * function enables or disables *lcore* to run *service_id*. - * - * If multiple cores are enabled on a service, an atomic is used to ensure that - * only one cores runs the service at a time. The exception to this is when - * a service indicates that it is multi-thread safe by setting the capability - * called RTE_SERVICE_CAP_MT_SAFE. With the multi-thread safe capability set, - * the service function can be run on multiple threads at the same time. - * - * @param service_id the service to apply the lcore to - * @param lcore The lcore that will be mapped to service - * @param enable Zero to unmap or disable the core, non-zero to enable - * - * @retval 0 lcore map updated successfully - * @retval -EINVAL An invalid service or lcore was provided. - */ -int32_t rte_service_map_lcore_set(uint32_t service_id, uint32_t lcore, - uint32_t enable); - -/** - * Retrieve the mapping of an lcore to a service. - * - * @param service_id the service to apply the lcore to - * @param lcore The lcore that will be mapped to service - * - * @retval 1 lcore is mapped to service - * @retval 0 lcore is not mapped to service - * @retval -EINVAL An invalid service or lcore was provided. - */ -int32_t rte_service_map_lcore_get(uint32_t service_id, uint32_t lcore); - -/** - * Set the runstate of the service. - * - * Each service is either running or stopped. Setting a non-zero runstate - * enables the service to run, while setting runstate zero disables it. - * - * @param id The id of the service - * @param runstate The run state to apply to the service - * - * @retval 0 The service was successfully started - * @retval -EINVAL Invalid service id - */ -int32_t rte_service_runstate_set(uint32_t id, uint32_t runstate); - -/** - * Get the runstate for the service with *id*. See *rte_service_runstate_set* - * for details of runstates. A service can call this function to ensure that - * the application has indicated that it will receive CPU cycles. Either a - * service-core is mapped (default case), or the application has explicitly - * disabled the check that a service-cores is mapped to the service and takes - * responsibility to run the service manually using the available function - * *rte_service_run_iter_on_app_lcore* to do so. - * - * @retval 1 Service is running - * @retval 0 Service is stopped - * @retval -EINVAL Invalid service id - */ -int32_t rte_service_runstate_get(uint32_t id); - -/** - * This function returns whether the service may be currently executing on - * at least one lcore, or definitely is not. This function can be used to - * determine if, after setting the service runstate to stopped, the service - * is still executing a service lcore. - * - * Care must be taken if calling this function when the service runstate is - * running, since the result of this function may be incorrect by the time the - * function returns due to service cores running in parallel. - * - * @retval 1 Service may be running on one or more lcores - * @retval 0 Service is not running on any lcore - * @retval -EINVAL Invalid service id - */ -int32_t -rte_service_may_be_active(uint32_t id); - -/** - * Enable or disable the check for a service-core being mapped to the service. - * An application can disable the check when takes the responsibility to run a - * service itself using *rte_service_run_iter_on_app_lcore*. - * - * @param id The id of the service to set the check on - * @param enable When zero, the check is disabled. Non-zero enables the check. - * - * @retval 0 Success - * @retval -EINVAL Invalid service ID - */ -int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enable); - -/** - * This function runs a service callback from a non-service lcore. - * - * This function is designed to enable gradual porting to service cores, and - * to enable unit tests to verify a service behaves as expected. - * - * When called, this function ensures that the service identified by *id* is - * safe to run on this lcore. Multi-thread safe services are invoked even if - * other cores are simultaneously running them as they are multi-thread safe. - * - * Multi-thread unsafe services are handled depending on the variable - * *serialize_multithread_unsafe*: - * - When set, the function will check if a service is already being invoked - * on another lcore, refusing to run it and returning -EBUSY. - * - When zero, the application takes responsibility to ensure that the service - * indicated by *id* is not going to be invoked by another lcore. This setting - * avoids atomic operations, so is likely to be more performant. - * - * @param id The ID of the service to run - * @param serialize_multithread_unsafe This parameter indicates to the service - * cores library if it is required to use atomics to serialize access - * to mult-thread unsafe services. As there is an overhead in using - * atomics, applications can choose to enable or disable this feature - * - * Note that any thread calling this function MUST be a DPDK EAL thread, as - * the *rte_lcore_id* function is used to access internal data structures. - * - * @retval 0 Service was run on the calling thread successfully - * @retval -EBUSY Another lcore is executing the service, and it is not a - * multi-thread safe service, so the service was not run on this lcore - * @retval -ENOEXEC Service is not in a run-able state - * @retval -EINVAL Invalid service id - */ -int32_t rte_service_run_iter_on_app_lcore(uint32_t id, - uint32_t serialize_multithread_unsafe); - -/** - * Start a service core. - * - * Starting a core makes the core begin polling. Any services assigned to it - * will be run as fast as possible. The application must ensure that the lcore - * is in a launchable state: e.g. call *rte_eal_lcore_wait* on the lcore_id - * before calling this function. - * - * @retval 0 Success - * @retval -EINVAL Failed to start core. The *lcore_id* passed in is not - * currently assigned to be a service core. - */ -int32_t rte_service_lcore_start(uint32_t lcore_id); - -/** - * Stop a service core. - * - * Stopping a core makes the core become idle, but remains assigned as a - * service core. - * - * @retval 0 Success - * @retval -EINVAL Invalid *lcore_id* provided - * @retval -EALREADY Already stopped core - * @retval -EBUSY Failed to stop core, as it would cause a service to not - * be run, as this is the only core currently running the service. - * The application must stop the service first, and then stop the - * lcore. - */ -int32_t rte_service_lcore_stop(uint32_t lcore_id); - -/** - * Adds lcore to the list of service cores. - * - * This functions can be used at runtime in order to modify the service core - * mask. - * - * @retval 0 Success - * @retval -EBUSY lcore is busy, and not available for service core duty - * @retval -EALREADY lcore is already added to the service core list - * @retval -EINVAL Invalid lcore provided - */ -int32_t rte_service_lcore_add(uint32_t lcore); - -/** - * Removes lcore from the list of service cores. - * - * This can fail if the core is not stopped, see *rte_service_core_stop*. - * - * @retval 0 Success - * @retval -EBUSY Lcore is not stopped, stop service core before removing. - * @retval -EINVAL failed to add lcore to service core mask. - */ -int32_t rte_service_lcore_del(uint32_t lcore); - -/** - * Retrieve the number of service cores currently available. - * - * This function returns the integer count of service cores available. The - * service core count can be used in mapping logic when creating mappings - * from service cores to services. - * - * See *rte_service_lcore_list* for details on retrieving the lcore_id of each - * service core. - * - * @return The number of service cores currently configured. - */ -int32_t rte_service_lcore_count(void); - -/** - * Resets all service core mappings. This does not remove the service cores - * from duty, just unmaps all services / cores, and stops() the service cores. - * The runstate of services is not modified. - * - * @retval 0 Success - */ -int32_t rte_service_lcore_reset_all(void); - -/** - * Enable or disable statistics collection for *service*. - * - * This function enables per core, per-service cycle count collection. - * @param id The service to enable statistics gathering on. - * @param enable Zero to disable statistics, non-zero to enable. - * @retval 0 Success - * @retval -EINVAL Invalid service pointer passed - */ -int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable); - -/** - * Retrieve the list of currently enabled service cores. - * - * This function fills in an application supplied array, with each element - * indicating the lcore_id of a service core. - * - * Adding and removing service cores can be performed using - * *rte_service_lcore_add* and *rte_service_lcore_del*. - * @param [out] array An array of at least *rte_service_lcore_count* items. - * If statically allocating the buffer, use RTE_MAX_LCORE. - * @param [out] n The size of *array*. - * @retval >=0 Number of service cores that have been populated in the array - * @retval -ENOMEM The provided array is not large enough to fill in the - * service core list. No items have been populated, call this function - * with a size of at least *rte_service_core_count* items. - */ -int32_t rte_service_lcore_list(uint32_t array[], uint32_t n); - -/** - * Get the number of services running on the supplied lcore. - * - * @param lcore Id of the service core. - * @retval >=0 Number of services registered to this core. - * @retval -EINVAL Invalid lcore provided - * @retval -ENOTSUP The provided lcore is not a service core. - */ -int32_t rte_service_lcore_count_services(uint32_t lcore); - -/** - * Dumps any information available about the service. When id is UINT32_MAX, - * this function dumps info for all services. - * - * @retval 0 Statistics have been successfully dumped - * @retval -EINVAL Invalid service id provided - */ -int32_t rte_service_dump(FILE *f, uint32_t id); - -/** - * Returns the number of cycles that this service has consumed - */ -#define RTE_SERVICE_ATTR_CYCLES 0 - -/** - * Returns the count of invocations of this service function - */ -#define RTE_SERVICE_ATTR_CALL_COUNT 1 - -/** - * Get an attribute from a service. - * - * @retval 0 Success, the attribute value has been written to *attr_value*. - * -EINVAL Invalid id, attr_id or attr_value was NULL. - */ -int32_t rte_service_attr_get(uint32_t id, uint32_t attr_id, - uint64_t *attr_value); - -/** - * Reset all attribute values of a service. - * - * @param id The service to reset all statistics of - * @retval 0 Successfully reset attributes - * -EINVAL Invalid service id provided - */ -int32_t rte_service_attr_reset_all(uint32_t id); - -/** - * Returns the number of times the service runner has looped. - */ -#define RTE_SERVICE_LCORE_ATTR_LOOPS 0 - -/** - * Get an attribute from a service core. - * - * @param lcore Id of the service core. - * @param attr_id Id of the attribute to be retrieved. - * @param [out] attr_value Pointer to storage in which to write retrieved value. - * @retval 0 Success, the attribute value has been written to *attr_value*. - * -EINVAL Invalid lcore, attr_id or attr_value was NULL. - * -ENOTSUP lcore is not a service core. - */ -int32_t -rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, - uint64_t *attr_value); - -/** - * Reset all attribute values of a service core. - * - * @param lcore The service core to reset all the statistics of - * @retval 0 Successfully reset attributes - * -EINVAL Invalid service id provided - * -ENOTSUP lcore is not a service core. - */ -int32_t -rte_service_lcore_attr_reset_all(uint32_t lcore); - -#ifdef __cplusplus -} -#endif - - -#endif /* _RTE_SERVICE_H_ */ diff --git a/lib/librte_eal/common/include/rte_service_component.h b/lib/librte_eal/common/include/rte_service_component.h deleted file mode 100644 index 16eab79eea..0000000000 --- a/lib/librte_eal/common/include/rte_service_component.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#ifndef _SERVICE_PRIVATE_H_ -#define _SERVICE_PRIVATE_H_ - -/* This file specifies the internal service specification. - * Include this file if you are writing a component that requires CPU cycles to - * operate, and you wish to run the component using service cores - */ -#include -#include - -/** - * Signature of callback function to run a service. - */ -typedef int32_t (*rte_service_func)(void *args); - -/** - * The specification of a service. - * - * This struct contains metadata about the service itself, the callback - * function to run one iteration of the service, a userdata pointer, flags etc. - */ -struct rte_service_spec { - /** The name of the service. This should be used by the application to - * understand what purpose this service provides. - */ - char name[RTE_SERVICE_NAME_MAX]; - /** The callback to invoke to run one iteration of the service. */ - rte_service_func callback; - /** The userdata pointer provided to the service callback. */ - void *callback_userdata; - /** Flags to indicate the capabilities of this service. See defines in - * the public header file for values of RTE_SERVICE_CAP_* - */ - uint32_t capabilities; - /** NUMA socket ID that this service is affinitized to */ - int socket_id; -}; - -/** - * Register a new service. - * - * A service represents a component that the requires CPU time periodically to - * achieve its purpose. - * - * For example the eventdev SW PMD requires CPU cycles to perform its - * scheduling. This can be achieved by registering it as a service, and the - * application can then assign CPU resources to that service. - * - * Note that when a service component registers itself, it is not permitted to - * add or remove service-core threads, or modify lcore-to-service mappings. The - * only API that may be called by the service-component is - * *rte_service_component_runstate_set*, which indicates that the service - * component is ready to be executed. - * - * @param spec The specification of the service to register - * @param[out] service_id A pointer to a uint32_t, which will be filled in - * during registration of the service. It is set to the integers - * service number given to the service. This parameter may be NULL. - * @retval 0 Successfully registered the service. - * -EINVAL Attempted to register an invalid service (eg, no callback - * set) - */ -int32_t rte_service_component_register(const struct rte_service_spec *spec, - uint32_t *service_id); - -/** - * Unregister a service component. - * - * The service being removed must be stopped before calling this function. - * - * @retval 0 The service was successfully unregistered. - * @retval -EBUSY The service is currently running, stop the service before - * calling unregister. No action has been taken. - */ -int32_t rte_service_component_unregister(uint32_t id); - -/** - * Private function to allow EAL to initialized default mappings. - * - * This function iterates all the services, and maps then to the available - * cores. Based on the capabilities of the services, they are set to run on the - * available cores in a round-robin manner. - * - * @retval 0 Success - * @retval -ENOTSUP No service lcores in use - * @retval -EINVAL Error while iterating over services - * @retval -ENODEV Error in enabling service lcore on a service - * @retval -ENOEXEC Error when starting services - */ -int32_t rte_service_start_with_defaults(void); - -/** - * Set the backend runstate of a component. - * - * This function allows services to be registered at startup, but not yet - * enabled to run by default. When the service has been configured (via the - * usual method; eg rte_eventdev_configure, the service can mark itself as - * ready to run. The differentiation between backend runstate and - * service_runstate is that the backend runstate is set by the service - * component while the service runstate is reserved for application usage. - * - * @retval 0 Success - */ -int32_t rte_service_component_runstate_set(uint32_t id, uint32_t runstate); - -/** - * Initialize the service library. - * - * In order to use the service library, it must be initialized. EAL initializes - * the library at startup. - * - * @retval 0 Success - * @retval -EALREADY Service library is already initialized - */ -int32_t rte_service_init(void); - -/** - * @internal Free up the memory that has been initialized. - * This routine is to be invoked prior to process termination. - * - * @retval None - */ -void rte_service_finalize(void); - -#endif /* _SERVICE_PRIVATE_H_ */ diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h deleted file mode 100644 index 8bac8243c9..0000000000 --- a/lib/librte_eal/common/include/rte_string_fns.h +++ /dev/null @@ -1,123 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2019 Intel Corporation - */ - -/** - * @file - * - * String-related functions as replacement for libc equivalents - */ - -#ifndef _RTE_STRING_FNS_H_ -#define _RTE_STRING_FNS_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#include - -/** - * Takes string "string" parameter and splits it at character "delim" - * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like - * strtok or strsep functions, this modifies its input string, by replacing - * instances of "delim" with '\\0'. All resultant tokens are returned in the - * "tokens" array which must have enough entries to hold "maxtokens". - * - * @param string - * The input string to be split into tokens - * - * @param stringlen - * The max length of the input buffer - * - * @param tokens - * The array to hold the pointers to the tokens in the string - * - * @param maxtokens - * The number of elements in the tokens array. At most, maxtokens-1 splits - * of the string will be done. - * - * @param delim - * The character on which the split of the data will be done - * - * @return - * The number of tokens in the tokens array. - */ -int -rte_strsplit(char *string, int stringlen, - char **tokens, int maxtokens, char delim); - -/** - * @internal - * DPDK-specific version of strlcpy for systems without - * libc or libbsd copies of the function - */ -static inline size_t -rte_strlcpy(char *dst, const char *src, size_t size) -{ - return (size_t)snprintf(dst, size, "%s", src); -} - -/** - * @internal - * DPDK-specific version of strlcat for systems without - * libc or libbsd copies of the function - */ -static inline size_t -rte_strlcat(char *dst, const char *src, size_t size) -{ - size_t l = strnlen(dst, size); - if (l < size) - return l + rte_strlcpy(&dst[l], src, size - l); - return l + strlen(src); -} - -/* pull in a strlcpy function */ -#ifdef RTE_EXEC_ENV_FREEBSD -#ifndef __BSD_VISIBLE /* non-standard functions are hidden */ -#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) -#define strlcat(dst, src, size) rte_strlcat(dst, src, size) -#endif - -#else /* non-BSD platforms */ -#ifdef RTE_USE_LIBBSD -#include - -#else /* no BSD header files, create own */ -#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) -#define strlcat(dst, src, size) rte_strlcat(dst, src, size) - -#endif /* RTE_USE_LIBBSD */ -#endif /* FREEBSD */ - -/** - * Copy string src to buffer dst of size dsize. - * At most dsize-1 chars will be copied. - * Always NUL-terminates, unless (dsize == 0). - * Returns number of bytes copied (terminating NUL-byte excluded) on success ; - * negative errno on error. - * - * @param dst - * The destination string. - * - * @param src - * The input string to be copied. - * - * @param dsize - * Length in bytes of the destination buffer. - * - * @return - * The number of bytes copied on success - * -E2BIG if the destination buffer is too small. - */ -ssize_t -rte_strscpy(char *dst, const char *src, size_t dsize); - -#ifdef __cplusplus -} -#endif - -#endif /* RTE_STRING_FNS_H */ diff --git a/lib/librte_eal/common/include/rte_tailq.h b/lib/librte_eal/common/include/rte_tailq.h deleted file mode 100644 index b6fe4e5f78..0000000000 --- a/lib/librte_eal/common/include/rte_tailq.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -#ifndef _RTE_TAILQ_H_ -#define _RTE_TAILQ_H_ - -/** - * @file - * Here defines rte_tailq APIs for only internal use - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** dummy structure type used by the rte_tailq APIs */ -struct rte_tailq_entry { - TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */ - void *data; /**< Pointer to the data referenced by this tailq entry */ -}; -/** dummy */ -TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry); - -#define RTE_TAILQ_NAMESIZE 32 - -/** - * The structure defining a tailq header entry for storing - * in the rte_config structure in shared memory. Each tailq - * is identified by name. - * Any library storing a set of objects e.g. rings, mempools, hash-tables, - * is recommended to use an entry here, so as to make it easy for - * a multi-process app to find already-created elements in shared memory. - */ -struct rte_tailq_head { - struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */ - char name[RTE_TAILQ_NAMESIZE]; -}; - -struct rte_tailq_elem { - /** - * Reference to head in shared mem, updated at init time by - * rte_eal_tailqs_init() - */ - struct rte_tailq_head *head; - TAILQ_ENTRY(rte_tailq_elem) next; - const char name[RTE_TAILQ_NAMESIZE]; -}; - -/** - * Return the first tailq entry cast to the right struct. - */ -#define RTE_TAILQ_CAST(tailq_entry, struct_name) \ - (struct struct_name *)&(tailq_entry)->tailq_head - -/** - * Utility macro to make looking up a tailqueue for a particular struct easier. - * - * @param name - * The name of tailq - * - * @param struct_name - * The name of the list type we are using. (Generally this is the same as the - * first parameter passed to TAILQ_HEAD macro) - * - * @return - * The return value from rte_eal_tailq_lookup, typecast to the appropriate - * structure pointer type. - * NULL on error, since the tailq_head is the first - * element in the rte_tailq_head structure. - */ -#define RTE_TAILQ_LOOKUP(name, struct_name) \ - RTE_TAILQ_CAST(rte_eal_tailq_lookup(name), struct_name) - -/** - * Dump tail queues to a file. - * - * @param f - * A pointer to a file for output - */ -void rte_dump_tailq(FILE *f); - -/** - * Lookup for a tail queue. - * - * Get a pointer to a tail queue header of a tail - * queue identified by the name given as an argument. - * Note: this function is not multi-thread safe, and should only be called from - * a single thread at a time - * - * @param name - * The name of the queue. - * @return - * A pointer to the tail queue head structure. - */ -struct rte_tailq_head *rte_eal_tailq_lookup(const char *name); - -/** - * Register a tail queue. - * - * Register a tail queue from shared memory. - * This function is mainly used by EAL_REGISTER_TAILQ macro which is used to - * register tailq from the different dpdk libraries. Since this macro is a - * constructor, the function has no access to dpdk shared memory, so the - * registered tailq can not be used before call to rte_eal_init() which calls - * rte_eal_tailqs_init(). - * - * @param t - * The tailq element which contains the name of the tailq you want to - * create (/retrieve when in secondary process). - * @return - * 0 on success or -1 in case of an error. - */ -int rte_eal_tailq_register(struct rte_tailq_elem *t); - -#define EAL_REGISTER_TAILQ(t) \ -RTE_INIT(tailqinitfn_ ##t) \ -{ \ - if (rte_eal_tailq_register(&t) < 0) \ - rte_panic("Cannot initialize tailq: %s\n", t.name); \ -} - -/* This macro permits both remove and free var within the loop safely.*/ -#ifndef TAILQ_FOREACH_SAFE -#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = TAILQ_FIRST((head)); \ - (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ - (var) = (tvar)) -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_TAILQ_H_ */ diff --git a/lib/librte_eal/common/include/rte_test.h b/lib/librte_eal/common/include/rte_test.h deleted file mode 100644 index 89e47f47a5..0000000000 --- a/lib/librte_eal/common/include/rte_test.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015 Cavium, Inc - */ - -#ifndef _RTE_TEST_H_ -#define _RTE_TEST_H_ - -#include - -/* Before including rte_test.h file you can define - * RTE_TEST_TRACE_FAILURE(_file, _line, _func) macro to better trace/debug test - * failures. Mostly useful in development phase. - */ -#ifndef RTE_TEST_TRACE_FAILURE -#define RTE_TEST_TRACE_FAILURE(_file, _line, _func) -#endif - - -#define RTE_TEST_ASSERT(cond, msg, ...) do { \ - if (!(cond)) { \ - RTE_LOG(DEBUG, EAL, "Test assert %s line %d failed: " \ - msg "\n", __func__, __LINE__, ##__VA_ARGS__); \ - RTE_TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); \ - return -1; \ - } \ -} while (0) - -#define RTE_TEST_ASSERT_EQUAL(a, b, msg, ...) \ - RTE_TEST_ASSERT(a == b, msg, ##__VA_ARGS__) - -#define RTE_TEST_ASSERT_NOT_EQUAL(a, b, msg, ...) \ - RTE_TEST_ASSERT(a != b, msg, ##__VA_ARGS__) - -#define RTE_TEST_ASSERT_SUCCESS(val, msg, ...) \ - RTE_TEST_ASSERT(val == 0, msg, ##__VA_ARGS__) - -#define RTE_TEST_ASSERT_FAIL(val, msg, ...) \ - RTE_TEST_ASSERT(val != 0, msg, ##__VA_ARGS__) - -#define RTE_TEST_ASSERT_NULL(val, msg, ...) \ - RTE_TEST_ASSERT(val == NULL, msg, ##__VA_ARGS__) - -#define RTE_TEST_ASSERT_NOT_NULL(val, msg, ...) \ - RTE_TEST_ASSERT(val != NULL, msg, ##__VA_ARGS__) - -#endif /* _RTE_TEST_H_ */ diff --git a/lib/librte_eal/common/include/rte_time.h b/lib/librte_eal/common/include/rte_time.h deleted file mode 100644 index 5ad7c8841a..0000000000 --- a/lib/librte_eal/common/include/rte_time.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015 Intel Corporation - */ - -#ifndef _RTE_TIME_H_ -#define _RTE_TIME_H_ - -#include -#include - -#define NSEC_PER_SEC 1000000000L - -/** - * Structure to hold the parameters of a running cycle counter to assist - * in converting cycles to nanoseconds. - */ -struct rte_timecounter { - /** Last cycle counter value read. */ - uint64_t cycle_last; - /** Nanoseconds count. */ - uint64_t nsec; - /** Bitmask separating nanosecond and sub-nanoseconds. */ - uint64_t nsec_mask; - /** Sub-nanoseconds count. */ - uint64_t nsec_frac; - /** Bitmask for two's complement subtraction of non-64 bit counters. */ - uint64_t cc_mask; - /** Cycle to nanosecond divisor (power of two). */ - uint32_t cc_shift; -}; - -/** - * Converts cyclecounter cycles to nanoseconds. - */ -static inline uint64_t -rte_cyclecounter_cycles_to_ns(struct rte_timecounter *tc, uint64_t cycles) -{ - uint64_t ns; - - /* Add fractional nanoseconds. */ - ns = cycles + tc->nsec_frac; - tc->nsec_frac = ns & tc->nsec_mask; - - /* Shift to get only nanoseconds. */ - return ns >> tc->cc_shift; -} - -/** - * Update the internal nanosecond count in the structure. - */ -static inline uint64_t -rte_timecounter_update(struct rte_timecounter *tc, uint64_t cycle_now) -{ - uint64_t cycle_delta, ns_offset; - - /* Calculate the delta since the last call. */ - if (tc->cycle_last <= cycle_now) - cycle_delta = (cycle_now - tc->cycle_last) & tc->cc_mask; - else - /* Handle cycle counts that have wrapped around . */ - cycle_delta = (~(tc->cycle_last - cycle_now) & tc->cc_mask) + 1; - - /* Convert to nanoseconds. */ - ns_offset = rte_cyclecounter_cycles_to_ns(tc, cycle_delta); - - /* Store current cycle counter for next call. */ - tc->cycle_last = cycle_now; - - /* Update the nanosecond count. */ - tc->nsec += ns_offset; - - return tc->nsec; -} - -/** - * Convert from timespec structure into nanosecond units. - */ -static inline uint64_t -rte_timespec_to_ns(const struct timespec *ts) -{ - return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; -} - -/** - * Convert from nanosecond units into timespec structure. - */ -static inline struct timespec -rte_ns_to_timespec(uint64_t nsec) -{ - struct timespec ts = {0, 0}; - - if (nsec == 0) - return ts; - - ts.tv_sec = nsec / NSEC_PER_SEC; - ts.tv_nsec = nsec % NSEC_PER_SEC; - - return ts; -} - -#endif /* _RTE_TIME_H_ */ diff --git a/lib/librte_eal/common/include/rte_uuid.h b/lib/librte_eal/common/include/rte_uuid.h deleted file mode 100644 index 044afbdfab..0000000000 --- a/lib/librte_eal/common/include/rte_uuid.h +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright (C) 1996, 1997, 1998 Theodore Ts'o. - */ -/** - * @file - * - * UUID related functions originally from libuuid - */ - -#ifndef _RTE_UUID_H_ -#define _RTE_UUID_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/** - * Struct describing a Universal Unique Identifier - */ -typedef unsigned char rte_uuid_t[16]; - -/** - * Helper for defining UUID values for id tables. - */ -#define RTE_UUID_INIT(a, b, c, d, e) { \ - ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, \ - ((a) >> 8) & 0xff, (a) & 0xff, \ - ((b) >> 8) & 0xff, (b) & 0xff, \ - ((c) >> 8) & 0xff, (c) & 0xff, \ - ((d) >> 8) & 0xff, (d) & 0xff, \ - ((e) >> 40) & 0xff, ((e) >> 32) & 0xff, \ - ((e) >> 24) & 0xff, ((e) >> 16) & 0xff, \ - ((e) >> 8) & 0xff, (e) & 0xff \ -} - -/** - * Test if UUID is all zeros. - * - * @param uu - * The uuid to check. - * @return - * true if uuid is NULL value, false otherwise - */ -bool rte_uuid_is_null(const rte_uuid_t uu); - -/** - * Copy uuid. - * - * @param dst - * Destination uuid - * @param src - * Source uuid - */ -static inline void rte_uuid_copy(rte_uuid_t dst, const rte_uuid_t src) -{ - memcpy(dst, src, sizeof(rte_uuid_t)); -} - -/** - * Compare two UUID's - * - * @param a - * A UUID to compare - * @param b - * A UUID to compare - * @return - * returns an integer less than, equal to, or greater than zero if UUID a is - * is less than, equal, or greater than UUID b. - */ -int rte_uuid_compare(const rte_uuid_t a, const rte_uuid_t b); - -/** - * Extract UUID from string - * - * @param in - * Pointer to string of characters to convert - * @param uu - * Destination UUID - * @return - * Returns 0 on success, and -1 if string is not a valid UUID. - */ -int rte_uuid_parse(const char *in, rte_uuid_t uu); - -/** - * Convert UUID to string - * - * @param uu - * UUID to format - * @param out - * Resulting string buffer - * @param len - * Sizeof the available string buffer - */ -#define RTE_UUID_STRLEN (36 + 1) -void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len); - -#ifdef __cplusplus -} -#endif - -#endif /* RTE_UUID_H */ diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h deleted file mode 100644 index f7a3a1ebcf..0000000000 --- a/lib/librte_eal/common/include/rte_version.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation - */ - -/** - * @file - * Definitions of DPDK version numbers - */ - -#ifndef _RTE_VERSION_H_ -#define _RTE_VERSION_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -/** - * Macro to compute a version number usable for comparisons - */ -#define RTE_VERSION_NUM(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d)) - -/** - * All version numbers in one to compare with RTE_VERSION_NUM() - */ -#define RTE_VERSION RTE_VERSION_NUM( \ - RTE_VER_YEAR, \ - RTE_VER_MONTH, \ - RTE_VER_MINOR, \ - RTE_VER_RELEASE) - -/** - * Function returning version string - * @return - * string - */ -static inline const char * -rte_version(void) -{ - static char version[32]; - if (version[0] != 0) - return version; - if (strlen(RTE_VER_SUFFIX) == 0) - snprintf(version, sizeof(version), "%s %d.%02d.%d", - RTE_VER_PREFIX, - RTE_VER_YEAR, - RTE_VER_MONTH, - RTE_VER_MINOR); - else - snprintf(version, sizeof(version), "%s %d.%02d.%d%s%d", - RTE_VER_PREFIX, - RTE_VER_YEAR, - RTE_VER_MONTH, - RTE_VER_MINOR, - RTE_VER_SUFFIX, - RTE_VER_RELEASE); - return version; -} - -#ifdef __cplusplus -} -#endif - -#endif /* RTE_VERSION_H */ diff --git a/lib/librte_eal/common/include/rte_vfio.h b/lib/librte_eal/common/include/rte_vfio.h deleted file mode 100644 index 20ed8c45a9..0000000000 --- a/lib/librte_eal/common/include/rte_vfio.h +++ /dev/null @@ -1,360 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 6WIND S.A. - */ - -#ifndef _RTE_VFIO_H_ -#define _RTE_VFIO_H_ - -/** - * @file - * RTE VFIO. This library provides various VFIO related utility functions. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/* - * determine if VFIO is present on the system - */ -#if !defined(VFIO_PRESENT) && defined(RTE_EAL_VFIO) -#include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) -#define VFIO_PRESENT -#endif /* kernel version >= 3.6.0 */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) -#define HAVE_VFIO_DEV_REQ_INTERFACE -#endif /* kernel version >= 4.0.0 */ -#endif /* RTE_EAL_VFIO */ - -#ifdef VFIO_PRESENT - -#include - -#define VFIO_DIR "/dev/vfio" -#define VFIO_CONTAINER_PATH "/dev/vfio/vfio" -#define VFIO_GROUP_FMT "/dev/vfio/%u" -#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u" -#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL) -#define VFIO_GET_REGION_IDX(x) (x >> 40) -#define VFIO_NOIOMMU_MODE \ - "/sys/module/vfio/parameters/enable_unsafe_noiommu_mode" - -/* NOIOMMU is defined from kernel version 4.5 onwards */ -#ifdef VFIO_NOIOMMU_IOMMU -#define RTE_VFIO_NOIOMMU VFIO_NOIOMMU_IOMMU -#else -#define RTE_VFIO_NOIOMMU 8 -#endif - -/* - * capabilities are only supported on kernel 4.6+. there were also some API - * changes as well, so add a macro to get cap offset. - */ -#ifdef VFIO_REGION_INFO_FLAG_CAPS -#define RTE_VFIO_INFO_FLAG_CAPS VFIO_REGION_INFO_FLAG_CAPS -#define VFIO_CAP_OFFSET(x) (x->cap_offset) -#else -#define RTE_VFIO_INFO_FLAG_CAPS (1 << 3) -#define VFIO_CAP_OFFSET(x) (x->resv) -struct vfio_info_cap_header { - uint16_t id; - uint16_t version; - uint32_t next; -}; -#endif - -/* kernels 4.16+ can map BAR containing MSI-X table */ -#ifdef VFIO_REGION_INFO_CAP_MSIX_MAPPABLE -#define RTE_VFIO_CAP_MSIX_MAPPABLE VFIO_REGION_INFO_CAP_MSIX_MAPPABLE -#else -#define RTE_VFIO_CAP_MSIX_MAPPABLE 3 -#endif - -#else /* not VFIO_PRESENT */ - -/* we don't need an actual definition, only pointer is used */ -struct vfio_device_info; - -#endif /* VFIO_PRESENT */ - -#define RTE_VFIO_DEFAULT_CONTAINER_FD (-1) - -/** - * Setup vfio_cfg for the device identified by its address. - * It discovers the configured I/O MMU groups or sets a new one for the device. - * If a new groups is assigned, the DMA mapping is performed. - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param sysfs_base - * sysfs path prefix. - * - * @param dev_addr - * device location. - * - * @param vfio_dev_fd - * VFIO fd. - * - * @param device_info - * Device information. - * - * @return - * 0 on success. - * <0 on failure. - * >1 if the device cannot be managed this way. - */ -int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr, - int *vfio_dev_fd, struct vfio_device_info *device_info); - -/** - * Release a device mapped to a VFIO-managed I/O MMU group. - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param sysfs_base - * sysfs path prefix. - * - * @param dev_addr - * device location. - * - * @param fd - * VFIO fd. - * - * @return - * 0 on success. - * <0 on failure. - */ -int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd); - -/** - * Enable a VFIO-related kmod. - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param modname - * kernel module name. - * - * @return - * 0 on success. - * <0 on failure. - */ -int rte_vfio_enable(const char *modname); - -/** - * Check whether a VFIO-related kmod is enabled. - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param modname - * kernel module name. - * - * @return - * !0 if true. - * 0 otherwise. - */ -int rte_vfio_is_enabled(const char *modname); - -/** - * Whether VFIO NOIOMMU mode is enabled. - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @return - * !0 if true. - * 0 otherwise. - */ -int rte_vfio_noiommu_is_enabled(void); - -/** - * Remove group fd from internal VFIO group fd array/ - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param vfio_group_fd - * VFIO Group FD. - * - * @return - * 0 on success. - * <0 on failure. - */ -int -rte_vfio_clear_group(int vfio_group_fd); - -/** - * Parse IOMMU group number for a device - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param sysfs_base - * sysfs path prefix. - * - * @param dev_addr - * device location. - * - * @param iommu_group_num - * iommu group number - * - * @return - * >0 on success - * 0 for non-existent group or VFIO - * <0 for errors - */ -int -rte_vfio_get_group_num(const char *sysfs_base, - const char *dev_addr, int *iommu_group_num); - -/** - * Open a new VFIO container fd - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @return - * > 0 container fd - * < 0 for errors - */ -int -rte_vfio_get_container_fd(void); - -/** - * Open VFIO group fd or get an existing one - * - * This function is only relevant to linux and will return - * an error on BSD. - * - * @param iommu_group_num - * iommu group number - * - * @return - * > 0 group fd - * < 0 for errors - */ -int -rte_vfio_get_group_fd(int iommu_group_num); - -/** - * Create a new container for device binding. - * - * @note Any newly allocated DPDK memory will not be mapped into these - * containers by default, user needs to manage DMA mappings for - * any container created by this API. - * - * @note When creating containers using this API, the container will only be - * available in the process that has created it. Sharing containers and - * devices between multiple processes is not supported. - * - * @return - * the container fd if successful - * <0 if failed - */ -int -rte_vfio_container_create(void); - -/** - * Destroy the container, unbind all vfio groups within it. - * - * @param container_fd - * the container fd to destroy - * - * @return - * 0 if successful - * <0 if failed - */ -int -rte_vfio_container_destroy(int container_fd); - -/** - * Bind a IOMMU group to a container. - * - * @param container_fd - * the container's fd - * - * @param iommu_group_num - * the iommu group number to bind to container - * - * @return - * group fd if successful - * <0 if failed - */ -int -rte_vfio_container_group_bind(int container_fd, int iommu_group_num); - -/** - * Unbind a IOMMU group from a container. - * - * @param container_fd - * the container fd of container - * - * @param iommu_group_num - * the iommu group number to delete from container - * - * @return - * 0 if successful - * <0 if failed - */ -int -rte_vfio_container_group_unbind(int container_fd, int iommu_group_num); - -/** - * Perform DMA mapping for devices in a container. - * - * @param container_fd - * the specified container fd. Use RTE_VFIO_DEFAULT_CONTAINER_FD to - * use the default container. - * - * @param vaddr - * Starting virtual address of memory to be mapped. - * - * @param iova - * Starting IOVA address of memory to be mapped. - * - * @param len - * Length of memory segment being mapped. - * - * @return - * 0 if successful - * <0 if failed - */ -int -rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, - uint64_t iova, uint64_t len); - -/** - * Perform DMA unmapping for devices in a container. - * - * @param container_fd - * the specified container fd. Use RTE_VFIO_DEFAULT_CONTAINER_FD to - * use the default container. - * - * @param vaddr - * Starting virtual address of memory to be unmapped. - * - * @param iova - * Starting IOVA address of memory to be unmapped. - * - * @param len - * Length of memory segment being unmapped. - * - * @return - * 0 if successful - * <0 if failed - */ -int -rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, - uint64_t iova, uint64_t len); - -#ifdef __cplusplus -} -#endif - -#endif /* _RTE_VFIO_H_ */ diff --git a/lib/librte_eal/common/meson.build b/lib/librte_eal/common/meson.build index 5885441b48..02d9280cc3 100644 --- a/lib/librte_eal/common/meson.build +++ b/lib/librte_eal/common/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -eal_inc += include_directories('.', 'include') +includes += include_directories('.') if is_windows sources += files( @@ -16,7 +16,9 @@ if is_windows 'eal_common_thread.c', 'rte_option.c', ) -else # temporary bad indent + subdir_done() +endif + sources += files( 'eal_common_bus.c', 'eal_common_cpuflags.c', @@ -52,62 +54,3 @@ sources += files( 'rte_reciprocal.c', 'rte_service.c', ) -endif - -common_headers = files( - 'include/rte_alarm.h', - 'include/rte_branch_prediction.h', - 'include/rte_bus.h', - 'include/rte_bitmap.h', - 'include/rte_class.h', - 'include/rte_common.h', - 'include/rte_compat.h', - 'include/rte_debug.h', - 'include/rte_devargs.h', - 'include/rte_dev.h', - 'include/rte_eal.h', - 'include/rte_eal_memconfig.h', - 'include/rte_eal_interrupts.h', - 'include/rte_errno.h', - 'include/rte_fbarray.h', - 'include/rte_hexdump.h', - 'include/rte_hypervisor.h', - 'include/rte_interrupts.h', - 'include/rte_keepalive.h', - 'include/rte_launch.h', - 'include/rte_lcore.h', - 'include/rte_log.h', - 'include/rte_malloc.h', - 'include/rte_memory.h', - 'include/rte_memzone.h', - 'include/rte_option.h', - 'include/rte_pci_dev_feature_defs.h', - 'include/rte_pci_dev_features.h', - 'include/rte_per_lcore.h', - 'include/rte_random.h', - 'include/rte_reciprocal.h', - 'include/rte_service.h', - 'include/rte_service_component.h', - 'include/rte_string_fns.h', - 'include/rte_tailq.h', - 'include/rte_time.h', - 'include/rte_uuid.h', - 'include/rte_version.h', - 'include/rte_vfio.h') - -# special case install the generic headers, since they go in a subdir -generic_headers = files( - 'include/generic/rte_atomic.h', - 'include/generic/rte_byteorder.h', - 'include/generic/rte_cpuflags.h', - 'include/generic/rte_cycles.h', - 'include/generic/rte_io.h', - 'include/generic/rte_mcslock.h', - 'include/generic/rte_memcpy.h', - 'include/generic/rte_pause.h', - 'include/generic/rte_prefetch.h', - 'include/generic/rte_rwlock.h', - 'include/generic/rte_spinlock.h', - 'include/generic/rte_ticketlock.h', - 'include/generic/rte_vect.h') -install_headers(generic_headers, subdir: 'generic') diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index b0b78baabd..70d17a5d79 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -10,7 +10,7 @@ #include #include -#include "include/rte_service_component.h" +#include #include #include diff --git a/lib/librte_eal/freebsd/eal/Makefile b/lib/librte_eal/freebsd/eal/Makefile index e3023f24fa..0c809d9872 100644 --- a/lib/librte_eal/freebsd/eal/Makefile +++ b/lib/librte_eal/freebsd/eal/Makefile @@ -12,7 +12,7 @@ VPATH += $(RTE_SDK)/lib/librte_eal/$(ARCH_DIR) CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -I$(SRCDIR)/include CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include CFLAGS += $(WERROR_FLAGS) -O3 LDLIBS += -lexecinfo diff --git a/lib/librte_eal/include/Makefile b/lib/librte_eal/include/Makefile new file mode 100644 index 0000000000..eb99190d10 --- /dev/null +++ b/lib/librte_eal/include/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := \ + $(sort $(notdir \ + $(wildcard $(RTE_SDK)/lib/librte_eal/include/*.h))) + +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/generic := \ + $(sort $(addprefix generic/, $(notdir \ + $(wildcard $(RTE_SDK)/lib/librte_eal/include/generic/*.h)))) + +ARCH_DIR ?= $(RTE_ARCH) +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include += \ + $(sort $(addprefix ../$(ARCH_DIR)/include/, $(notdir \ + $(wildcard $(RTE_SDK)/lib/librte_eal/$(ARCH_DIR)/include/*.h)))) + +include $(RTE_SDK)/mk/rte.install.mk diff --git a/lib/librte_eal/include/generic/rte_atomic.h b/lib/librte_eal/include/generic/rte_atomic.h new file mode 100644 index 0000000000..e6ab15a973 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_atomic.h @@ -0,0 +1,1150 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_ATOMIC_H_ +#define _RTE_ATOMIC_H_ + +/** + * @file + * Atomic Operations + * + * This file defines a generic API for atomic operations. + */ + +#include +#include + +#ifdef __DOXYGEN__ + +/** @name Memory Barrier + */ +///@{ +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + */ +static inline void rte_mb(void); + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + */ +static inline void rte_wmb(void); + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + */ +static inline void rte_rmb(void); +///@} + +/** @name SMP Memory Barrier + */ +///@{ +/** + * General memory barrier between lcores + * + * Guarantees that the LOAD and STORE operations that precede the + * rte_smp_mb() call are globally visible across the lcores + * before the LOAD and STORE operations that follows it. + */ +static inline void rte_smp_mb(void); + +/** + * Write memory barrier between lcores + * + * Guarantees that the STORE operations that precede the + * rte_smp_wmb() call are globally visible across the lcores + * before the STORE operations that follows it. + */ +static inline void rte_smp_wmb(void); + +/** + * Read memory barrier between lcores + * + * Guarantees that the LOAD operations that precede the + * rte_smp_rmb() call are globally visible across the lcores + * before the LOAD operations that follows it. + */ +static inline void rte_smp_rmb(void); +///@} + +/** @name I/O Memory Barrier + */ +///@{ +/** + * General memory barrier for I/O device + * + * Guarantees that the LOAD and STORE operations that precede the + * rte_io_mb() call are visible to I/O device or CPU before the + * LOAD and STORE operations that follow it. + */ +static inline void rte_io_mb(void); + +/** + * Write memory barrier for I/O device + * + * Guarantees that the STORE operations that precede the + * rte_io_wmb() call are visible to I/O device before the STORE + * operations that follow it. + */ +static inline void rte_io_wmb(void); + +/** + * Read memory barrier for IO device + * + * Guarantees that the LOAD operations on I/O device that precede the + * rte_io_rmb() call are visible to CPU before the LOAD + * operations that follow it. + */ +static inline void rte_io_rmb(void); +///@} + +/** @name Coherent I/O Memory Barrier + * + * Coherent I/O memory barrier is a lightweight version of I/O memory + * barriers which are system-wide data synchronization barriers. This + * is for only coherent memory domain between lcore and I/O device but + * it is same as the I/O memory barriers in most of architectures. + * However, some architecture provides even lighter barriers which are + * somewhere in between I/O memory barriers and SMP memory barriers. + * For example, in case of ARMv8, DMB(data memory barrier) instruction + * can have different shareability domains - inner-shareable and + * outer-shareable. And inner-shareable DMB fits for SMP memory + * barriers and outer-shareable DMB for coherent I/O memory barriers, + * which acts on coherent memory. + * + * In most cases, I/O memory barriers are safer but if operations are + * on coherent memory instead of incoherent MMIO region of a device, + * then coherent I/O memory barriers can be used and this could bring + * performance gain depending on architectures. + */ +///@{ +/** + * Write memory barrier for coherent memory between lcore and I/O device + * + * Guarantees that the STORE operations on coherent memory that + * precede the rte_cio_wmb() call are visible to I/O device before the + * STORE operations that follow it. + */ +static inline void rte_cio_wmb(void); + +/** + * Read memory barrier for coherent memory between lcore and I/O device + * + * Guarantees that the LOAD operations on coherent memory updated by + * I/O device that precede the rte_cio_rmb() call are visible to CPU + * before the LOAD operations that follow it. + */ +static inline void rte_cio_rmb(void); +///@} + +#endif /* __DOXYGEN__ */ + +/** + * Compiler barrier. + * + * Guarantees that operation reordering does not occur at compile time + * for operations directly before and after the barrier. + */ +#define rte_compiler_barrier() do { \ + asm volatile ("" : : : "memory"); \ +} while(0) + +/*------------------------- 16 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 16-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src); + +#ifdef RTE_FORCE_INTRINSICS +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} +#endif + +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int16_t cnt; /**< An internal counter value. */ +} rte_atomic16_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC16_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_init(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 16-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int16_t +rte_atomic16_read(const rte_atomic16_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 16-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 16-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic16_add(rte_atomic16_t *v, int16_t inc) +{ + __sync_fetch_and_add(&v->cnt, inc); +} + +/** + * Atomically subtract a 16-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) +{ + __sync_fetch_and_sub(&v->cnt, dec); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_inc(rte_atomic16_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + rte_atomic16_add(v, 1); +} +#endif + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_dec(rte_atomic16_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + rte_atomic16_sub(v, 1); +} +#endif + +/** + * Atomically add a 16-bit value to a counter and return the result. + * + * Atomically adds the 16-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int16_t +rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) +{ + return __sync_add_and_fetch(&v->cnt, inc); +} + +/** + * Atomically subtract a 16-bit value from a counter and return + * the result. + * + * Atomically subtracts the 16-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int16_t +rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) +{ + return __sync_sub_and_fetch(&v->cnt, dec); +} + +/** + * Atomically increment a 16-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} +#endif + +/** + * Atomically decrement a 16-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} +#endif + +/** + * Atomically test and set a 16-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} +#endif + +/** + * Atomically set a 16-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic16_clear(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 32 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 32-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src); + +#ifdef RTE_FORCE_INTRINSICS +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} +#endif + +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int32_t cnt; /**< An internal counter value. */ +} rte_atomic32_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC32_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_init(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 32-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int32_t +rte_atomic32_read(const rte_atomic32_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 32-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 32-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic32_add(rte_atomic32_t *v, int32_t inc) +{ + __sync_fetch_and_add(&v->cnt, inc); +} + +/** + * Atomically subtract a 32-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) +{ + __sync_fetch_and_sub(&v->cnt, dec); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_inc(rte_atomic32_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + rte_atomic32_add(v, 1); +} +#endif + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_dec(rte_atomic32_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + rte_atomic32_sub(v,1); +} +#endif + +/** + * Atomically add a 32-bit value to a counter and return the result. + * + * Atomically adds the 32-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int32_t +rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) +{ + return __sync_add_and_fetch(&v->cnt, inc); +} + +/** + * Atomically subtract a 32-bit value from a counter and return + * the result. + * + * Atomically subtracts the 32-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int32_t +rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) +{ + return __sync_sub_and_fetch(&v->cnt, dec); +} + +/** + * Atomically increment a 32-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} +#endif + +/** + * Atomically decrement a 32-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} +#endif + +/** + * Atomically test and set a 32-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} +#endif + +/** + * Atomically set a 32-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic32_clear(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 64 bit atomic operations -------------------------*/ + +/** + * An atomic compare and set function used by the mutex functions. + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 64-bit words) + * + * @param dst + * The destination into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src); + +#ifdef RTE_FORCE_INTRINSICS +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} +#endif + +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int64_t cnt; /**< Internal counter value. */ +} rte_atomic64_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC64_INIT(val) { (val) } + +/** + * Initialize the atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_init(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ +#ifdef __LP64__ + v->cnt = 0; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, 0); + } +#endif +} +#endif + +/** + * Atomically read a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ +#ifdef __LP64__ + return v->cnt; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + /* replace the value by itself */ + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp); + } + return tmp; +#endif +} +#endif + +/** + * Atomically set a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value of the counter. + */ +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ +#ifdef __LP64__ + v->cnt = new_value; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, new_value); + } +#endif +} +#endif + +/** + * Atomically add a 64-bit value to a counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + __sync_fetch_and_add(&v->cnt, inc); +} +#endif + +/** + * Atomically subtract a 64-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + __sync_fetch_and_sub(&v->cnt, dec); +} +#endif + +/** + * Atomically increment a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_inc(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + rte_atomic64_add(v, 1); +} +#endif + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_dec(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + rte_atomic64_sub(v, 1); +} +#endif + +/** + * Add a 64-bit value to an atomic counter and return the result. + * + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and + * returns the value of v after the addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc); + +#ifdef RTE_FORCE_INTRINSICS +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + return __sync_add_and_fetch(&v->cnt, inc); +} +#endif + +/** + * Subtract a 64-bit value from an atomic counter and return the result. + * + * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) + * and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec); + +#ifdef RTE_FORCE_INTRINSICS +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + return __sync_sub_and_fetch(&v->cnt, dec); +} +#endif + +/** + * Atomically increment a 64-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns + * true if the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the addition is 0; false otherwise. + */ +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_add_return(v, 1) == 0; +} +#endif + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after subtraction is 0; false otherwise. + */ +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_sub_return(v, 1) == 0; +} +#endif + +/** + * Atomically test and set a 64-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} +#endif + +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic64_clear(rte_atomic64_t *v); + +#ifdef RTE_FORCE_INTRINSICS +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + rte_atomic64_set(v, 0); +} +#endif + +/*------------------------ 128 bit atomic operations -------------------------*/ + +/** + * 128-bit integer structure. + */ +RTE_STD_C11 +typedef struct { + RTE_STD_C11 + union { + uint64_t val[2]; +#ifdef RTE_ARCH_64 + __extension__ __int128 int128; +#endif + }; +} __rte_aligned(16) rte_int128_t; + +#ifdef __DOXYGEN__ + +/** + * An atomic compare and set function used by the mutex functions. + * (Atomically) Equivalent to: + * @code + * if (*dst == *exp) + * *dst = *src + * else + * *exp = *dst + * @endcode + * + * @note This function is currently available for the x86-64 and aarch64 + * platforms. + * + * @note The success and failure arguments must be one of the __ATOMIC_* values + * defined in the C++11 standard. For details on their behavior, refer to the + * standard. + * + * @param dst + * The destination into which the value will be written. + * @param exp + * Pointer to the expected value. If the operation fails, this memory is + * updated with the actual value. + * @param src + * Pointer to the new value. + * @param weak + * A value of true allows the comparison to spuriously fail and allows the + * 'exp' update to occur non-atomically (i.e. a torn read may occur). + * Implementations may ignore this argument and only implement the strong + * variant. + * @param success + * If successful, the operation's memory behavior conforms to this (or a + * stronger) model. + * @param failure + * If unsuccessful, the operation's memory behavior conforms to this (or a + * stronger) model. This argument cannot be __ATOMIC_RELEASE, + * __ATOMIC_ACQ_REL, or a stronger model than success. + * @return + * Non-zero on success; 0 on failure. + */ +__rte_experimental +static inline int +rte_atomic128_cmp_exchange(rte_int128_t *dst, + rte_int128_t *exp, + const rte_int128_t *src, + unsigned int weak, + int success, + int failure); + +#endif /* __DOXYGEN__ */ + +#endif /* _RTE_ATOMIC_H_ */ diff --git a/lib/librte_eal/include/generic/rte_byteorder.h b/lib/librte_eal/include/generic/rte_byteorder.h new file mode 100644 index 0000000000..38e8cfd32b --- /dev/null +++ b/lib/librte_eal/include/generic/rte_byteorder.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_BYTEORDER_H_ +#define _RTE_BYTEORDER_H_ + +/** + * @file + * + * Byte Swap Operations + * + * This file defines a generic API for byte swap operations. Part of + * the implementation is architecture-specific. + */ + +#include +#ifdef RTE_EXEC_ENV_FREEBSD +#include +#else +#include +#endif + +#include +#include + +/* + * Compile-time endianness detection + */ +#define RTE_BIG_ENDIAN 1 +#define RTE_LITTLE_ENDIAN 2 +#if defined __BYTE_ORDER__ +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define RTE_BYTE_ORDER RTE_BIG_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN +#endif /* __BYTE_ORDER__ */ +#elif defined __BYTE_ORDER +#if __BYTE_ORDER == __BIG_ENDIAN +#define RTE_BYTE_ORDER RTE_BIG_ENDIAN +#elif __BYTE_ORDER == __LITTLE_ENDIAN +#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN +#endif /* __BYTE_ORDER */ +#elif defined __BIG_ENDIAN__ +#define RTE_BYTE_ORDER RTE_BIG_ENDIAN +#elif defined __LITTLE_ENDIAN__ +#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN +#endif +#if !defined(RTE_BYTE_ORDER) +#error Unknown endianness. +#endif + +#define RTE_STATIC_BSWAP16(v) \ + ((((uint16_t)(v) & UINT16_C(0x00ff)) << 8) | \ + (((uint16_t)(v) & UINT16_C(0xff00)) >> 8)) + +#define RTE_STATIC_BSWAP32(v) \ + ((((uint32_t)(v) & UINT32_C(0x000000ff)) << 24) | \ + (((uint32_t)(v) & UINT32_C(0x0000ff00)) << 8) | \ + (((uint32_t)(v) & UINT32_C(0x00ff0000)) >> 8) | \ + (((uint32_t)(v) & UINT32_C(0xff000000)) >> 24)) + +#define RTE_STATIC_BSWAP64(v) \ + ((((uint64_t)(v) & UINT64_C(0x00000000000000ff)) << 56) | \ + (((uint64_t)(v) & UINT64_C(0x000000000000ff00)) << 40) | \ + (((uint64_t)(v) & UINT64_C(0x0000000000ff0000)) << 24) | \ + (((uint64_t)(v) & UINT64_C(0x00000000ff000000)) << 8) | \ + (((uint64_t)(v) & UINT64_C(0x000000ff00000000)) >> 8) | \ + (((uint64_t)(v) & UINT64_C(0x0000ff0000000000)) >> 24) | \ + (((uint64_t)(v) & UINT64_C(0x00ff000000000000)) >> 40) | \ + (((uint64_t)(v) & UINT64_C(0xff00000000000000)) >> 56)) + +/* + * These macros are functionally similar to rte_cpu_to_(be|le)(16|32|64)(), + * they take values in host CPU order and return them converted to the + * intended endianness. + * + * They resolve at compilation time to integer constants which can safely be + * used with static initializers, since those cannot involve function calls. + * + * On the other hand, they are not as optimized as their rte_cpu_to_*() + * counterparts, therefore applications should refrain from using them on + * variable values, particularly inside performance-sensitive code. + */ +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#define RTE_BE16(v) (rte_be16_t)(v) +#define RTE_BE32(v) (rte_be32_t)(v) +#define RTE_BE64(v) (rte_be64_t)(v) +#define RTE_LE16(v) (rte_le16_t)(RTE_STATIC_BSWAP16(v)) +#define RTE_LE32(v) (rte_le32_t)(RTE_STATIC_BSWAP32(v)) +#define RTE_LE64(v) (rte_le64_t)(RTE_STATIC_BSWAP64(v)) +#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define RTE_BE16(v) (rte_be16_t)(RTE_STATIC_BSWAP16(v)) +#define RTE_BE32(v) (rte_be32_t)(RTE_STATIC_BSWAP32(v)) +#define RTE_BE64(v) (rte_be64_t)(RTE_STATIC_BSWAP64(v)) +#define RTE_LE16(v) (rte_be16_t)(v) +#define RTE_LE32(v) (rte_be32_t)(v) +#define RTE_LE64(v) (rte_be64_t)(v) +#else +#error Unsupported endianness. +#endif + +/* + * The following types should be used when handling values according to a + * specific byte ordering, which may differ from that of the host CPU. + * + * Libraries, public APIs and applications are encouraged to use them for + * documentation purposes. + */ +typedef uint16_t rte_be16_t; /**< 16-bit big-endian value. */ +typedef uint32_t rte_be32_t; /**< 32-bit big-endian value. */ +typedef uint64_t rte_be64_t; /**< 64-bit big-endian value. */ +typedef uint16_t rte_le16_t; /**< 16-bit little-endian value. */ +typedef uint32_t rte_le32_t; /**< 32-bit little-endian value. */ +typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */ + +/* + * An internal function to swap bytes in a 16-bit value. + * + * It is used by rte_bswap16() when the value is constant. Do not use + * this function directly; rte_bswap16() is preferred. + */ +static inline uint16_t +rte_constant_bswap16(uint16_t x) +{ + return (uint16_t)RTE_STATIC_BSWAP16(x); +} + +/* + * An internal function to swap bytes in a 32-bit value. + * + * It is used by rte_bswap32() when the value is constant. Do not use + * this function directly; rte_bswap32() is preferred. + */ +static inline uint32_t +rte_constant_bswap32(uint32_t x) +{ + return (uint32_t)RTE_STATIC_BSWAP32(x); +} + +/* + * An internal function to swap bytes of a 64-bit value. + * + * It is used by rte_bswap64() when the value is constant. Do not use + * this function directly; rte_bswap64() is preferred. + */ +static inline uint64_t +rte_constant_bswap64(uint64_t x) +{ + return (uint64_t)RTE_STATIC_BSWAP64(x); +} + + +#ifdef __DOXYGEN__ + +/** + * Swap bytes in a 16-bit value. + */ +static uint16_t rte_bswap16(uint16_t _x); + +/** + * Swap bytes in a 32-bit value. + */ +static uint32_t rte_bswap32(uint32_t x); + +/** + * Swap bytes in a 64-bit value. + */ +static uint64_t rte_bswap64(uint64_t x); + +/** + * Convert a 16-bit value from CPU order to little endian. + */ +static rte_le16_t rte_cpu_to_le_16(uint16_t x); + +/** + * Convert a 32-bit value from CPU order to little endian. + */ +static rte_le32_t rte_cpu_to_le_32(uint32_t x); + +/** + * Convert a 64-bit value from CPU order to little endian. + */ +static rte_le64_t rte_cpu_to_le_64(uint64_t x); + + +/** + * Convert a 16-bit value from CPU order to big endian. + */ +static rte_be16_t rte_cpu_to_be_16(uint16_t x); + +/** + * Convert a 32-bit value from CPU order to big endian. + */ +static rte_be32_t rte_cpu_to_be_32(uint32_t x); + +/** + * Convert a 64-bit value from CPU order to big endian. + */ +static rte_be64_t rte_cpu_to_be_64(uint64_t x); + + +/** + * Convert a 16-bit value from little endian to CPU order. + */ +static uint16_t rte_le_to_cpu_16(rte_le16_t x); + +/** + * Convert a 32-bit value from little endian to CPU order. + */ +static uint32_t rte_le_to_cpu_32(rte_le32_t x); + +/** + * Convert a 64-bit value from little endian to CPU order. + */ +static uint64_t rte_le_to_cpu_64(rte_le64_t x); + + +/** + * Convert a 16-bit value from big endian to CPU order. + */ +static uint16_t rte_be_to_cpu_16(rte_be16_t x); + +/** + * Convert a 32-bit value from big endian to CPU order. + */ +static uint32_t rte_be_to_cpu_32(rte_be32_t x); + +/** + * Convert a 64-bit value from big endian to CPU order. + */ +static uint64_t rte_be_to_cpu_64(rte_be64_t x); + +#endif /* __DOXYGEN__ */ + +#ifdef RTE_FORCE_INTRINSICS +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) +#define rte_bswap16(x) __builtin_bswap16(x) +#endif + +#define rte_bswap32(x) __builtin_bswap32(x) + +#define rte_bswap64(x) __builtin_bswap64(x) + +#endif + +#endif /* _RTE_BYTEORDER_H_ */ diff --git a/lib/librte_eal/include/generic/rte_cpuflags.h b/lib/librte_eal/include/generic/rte_cpuflags.h new file mode 100644 index 0000000000..872f0ebe3e --- /dev/null +++ b/lib/librte_eal/include/generic/rte_cpuflags.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_CPUFLAGS_H_ +#define _RTE_CPUFLAGS_H_ + +/** + * @file + * Architecture specific API to determine available CPU features at runtime. + */ + +#include "rte_common.h" +#include + +/** + * Enumeration of all CPU features supported + */ +__extension__ +enum rte_cpu_flag_t; + +/** + * Get name of CPU flag + * + * @param feature + * CPU flag ID + * @return + * flag name + * NULL if flag ID is invalid + */ +__extension__ +const char * +rte_cpu_get_flag_name(enum rte_cpu_flag_t feature); + +/** + * Function for checking a CPU flag availability + * + * @param feature + * CPU flag to query CPU for + * @return + * 1 if flag is available + * 0 if flag is not available + * -ENOENT if flag is invalid + */ +__extension__ +int +rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature); + +/** + * This function checks that the currently used CPU supports the CPU features + * that were specified at compile time. It is called automatically within the + * EAL, so does not need to be used by applications. This version returns a + * result so that decisions may be made (for instance, graceful shutdowns). + */ +int +rte_cpu_is_supported(void); + +/** + * This function attempts to retrieve a value from the auxiliary vector. + * If it is unsuccessful, the result will be 0, and errno will be set. + * + * @return A value from the auxiliary vector. When the value is 0, check + * errno to determine if an error occurred. + */ +unsigned long +rte_cpu_getauxval(unsigned long type); + +/** + * This function retrieves a value from the auxiliary vector, and compares it + * as a string against the value retrieved. + * + * @return The result of calling strcmp() against the value retrieved from + * the auxiliary vector. When the value is 0 (meaning a match is found), + * check errno to determine if an error occurred. + */ +int +rte_cpu_strcmp_auxval(unsigned long type, const char *str); + +#endif /* _RTE_CPUFLAGS_H_ */ diff --git a/lib/librte_eal/include/generic/rte_cycles.h b/lib/librte_eal/include/generic/rte_cycles.h new file mode 100644 index 0000000000..73d1fa7b92 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_cycles.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2013 6WIND S.A. + */ + +#ifndef _RTE_CYCLES_H_ +#define _RTE_CYCLES_H_ + +/** + * @file + * + * Simple Time Reference Functions (Cycles and HPET). + */ + +#include +#include +#include +#include + +#define MS_PER_S 1000 +#define US_PER_S 1000000 +#define NS_PER_S 1000000000 + +enum timer_source { + EAL_TIMER_TSC = 0, + EAL_TIMER_HPET +}; +extern enum timer_source eal_timer_source; + +/** + * Get the measured frequency of the RDTSC counter + * + * @return + * The TSC frequency for this lcore + */ +uint64_t +rte_get_tsc_hz(void); + +/** + * Return the number of TSC cycles since boot + * + * @return + * the number of cycles + */ +static inline uint64_t +rte_get_tsc_cycles(void); + +#ifdef RTE_LIBEAL_USE_HPET +/** + * Return the number of HPET cycles since boot + * + * This counter is global for all execution units. The number of + * cycles in one second can be retrieved using rte_get_hpet_hz(). + * + * @return + * the number of cycles + */ +uint64_t +rte_get_hpet_cycles(void); + +/** + * Get the number of HPET cycles in one second. + * + * @return + * The number of cycles in one second. + */ +uint64_t +rte_get_hpet_hz(void); + +/** + * Initialise the HPET for use. This must be called before the rte_get_hpet_hz + * and rte_get_hpet_cycles APIs are called. If this function does not succeed, + * then the HPET functions are unavailable and should not be called. + * + * @param make_default + * If set, the hpet timer becomes the default timer whose values are + * returned by the rte_get_timer_hz/cycles API calls + * + * @return + * 0 on success, + * -1 on error, and the make_default parameter is ignored. + */ +int rte_eal_hpet_init(int make_default); + +#endif + +/** + * Get the number of cycles since boot from the default timer. + * + * @return + * The number of cycles + */ +static inline uint64_t +rte_get_timer_cycles(void) +{ +#ifdef RTE_LIBEAL_USE_HPET + switch(eal_timer_source) { + case EAL_TIMER_TSC: +#endif + return rte_get_tsc_cycles(); +#ifdef RTE_LIBEAL_USE_HPET + case EAL_TIMER_HPET: + return rte_get_hpet_cycles(); + default: rte_panic("Invalid timer source specified\n"); + } +#endif +} + +/** + * Get the number of cycles in one second for the default timer. + * + * @return + * The number of cycles in one second. + */ +static inline uint64_t +rte_get_timer_hz(void) +{ +#ifdef RTE_LIBEAL_USE_HPET + switch(eal_timer_source) { + case EAL_TIMER_TSC: +#endif + return rte_get_tsc_hz(); +#ifdef RTE_LIBEAL_USE_HPET + case EAL_TIMER_HPET: + return rte_get_hpet_hz(); + default: rte_panic("Invalid timer source specified\n"); + } +#endif +} +/** + * Wait at least us microseconds. + * This function can be replaced with user-defined function. + * @see rte_delay_us_callback_register + * + * @param us + * The number of microseconds to wait. + */ +extern void +(*rte_delay_us)(unsigned int us); + +/** + * Wait at least ms milliseconds. + * + * @param ms + * The number of milliseconds to wait. + */ +static inline void +rte_delay_ms(unsigned ms) +{ + rte_delay_us(ms * 1000); +} + +/** + * Blocking delay function. + * + * @param us + * Number of microseconds to wait. + */ +void rte_delay_us_block(unsigned int us); + +/** + * Delay function that uses system sleep. + * Does not block the CPU core. + * + * @param us + * Number of microseconds to wait. + */ +__rte_experimental +void +rte_delay_us_sleep(unsigned int us); + +/** + * Replace rte_delay_us with user defined function. + * + * @param userfunc + * User function which replaces rte_delay_us. rte_delay_us_block restores + * builtin block delay function. + */ +void rte_delay_us_callback_register(void(*userfunc)(unsigned int)); + +#endif /* _RTE_CYCLES_H_ */ diff --git a/lib/librte_eal/include/generic/rte_io.h b/lib/librte_eal/include/generic/rte_io.h new file mode 100644 index 0000000000..da457f7f7e --- /dev/null +++ b/lib/librte_eal/include/generic/rte_io.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc + */ + +#ifndef _RTE_IO_H_ +#define _RTE_IO_H_ + +/** + * @file + * I/O device memory operations + * + * This file defines the generic API for I/O device memory read/write operations + */ + +#include +#include +#include + +#ifdef __DOXYGEN__ + +/** + * Read a 8-bit value from I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint8_t +rte_read8_relaxed(const volatile void *addr); + +/** + * Read a 16-bit value from I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint16_t +rte_read16_relaxed(const volatile void *addr); + +/** + * Read a 32-bit value from I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint32_t +rte_read32_relaxed(const volatile void *addr); + +/** + * Read a 64-bit value from I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint64_t +rte_read64_relaxed(const volatile void *addr); + +/** + * Write a 8-bit value to I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ + +static inline void +rte_write8_relaxed(uint8_t value, volatile void *addr); + +/** + * Write a 16-bit value to I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write16_relaxed(uint16_t value, volatile void *addr); + +/** + * Write a 32-bit value to I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write32_relaxed(uint32_t value, volatile void *addr); + +/** + * Write a 64-bit value to I/O device memory address *addr*. + * + * The relaxed version does not have additional I/O memory barrier, useful in + * accessing the device registers of integrated controllers which implicitly + * strongly ordered with respect to memory access. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write64_relaxed(uint64_t value, volatile void *addr); + +/** + * Read a 8-bit value from I/O device memory address *addr*. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint8_t +rte_read8(const volatile void *addr); + +/** + * Read a 16-bit value from I/O device memory address *addr*. + * + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint16_t +rte_read16(const volatile void *addr); + +/** + * Read a 32-bit value from I/O device memory address *addr*. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint32_t +rte_read32(const volatile void *addr); + +/** + * Read a 64-bit value from I/O device memory address *addr*. + * + * @param addr + * I/O memory address to read the value from + * @return + * read value + */ +static inline uint64_t +rte_read64(const volatile void *addr); + +/** + * Write a 8-bit value to I/O device memory address *addr*. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ + +static inline void +rte_write8(uint8_t value, volatile void *addr); + +/** + * Write a 16-bit value to I/O device memory address *addr*. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write16(uint16_t value, volatile void *addr); + +/** + * Write a 32-bit value to I/O device memory address *addr*. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write32(uint32_t value, volatile void *addr); + +/** + * Write a 64-bit value to I/O device memory address *addr*. + * + * @param value + * Value to write + * @param addr + * I/O memory address to write the value to + */ +static inline void +rte_write64(uint64_t value, volatile void *addr); + +#endif /* __DOXYGEN__ */ + +#ifndef RTE_OVERRIDE_IO_H + +static __rte_always_inline uint8_t +rte_read8_relaxed(const volatile void *addr) +{ + return *(const volatile uint8_t *)addr; +} + +static __rte_always_inline uint16_t +rte_read16_relaxed(const volatile void *addr) +{ + return *(const volatile uint16_t *)addr; +} + +static __rte_always_inline uint32_t +rte_read32_relaxed(const volatile void *addr) +{ + return *(const volatile uint32_t *)addr; +} + +static __rte_always_inline uint64_t +rte_read64_relaxed(const volatile void *addr) +{ + return *(const volatile uint64_t *)addr; +} + +static __rte_always_inline void +rte_write8_relaxed(uint8_t value, volatile void *addr) +{ + *(volatile uint8_t *)addr = value; +} + +static __rte_always_inline void +rte_write16_relaxed(uint16_t value, volatile void *addr) +{ + *(volatile uint16_t *)addr = value; +} + +static __rte_always_inline void +rte_write32_relaxed(uint32_t value, volatile void *addr) +{ + *(volatile uint32_t *)addr = value; +} + +static __rte_always_inline void +rte_write64_relaxed(uint64_t value, volatile void *addr) +{ + *(volatile uint64_t *)addr = value; +} + +static __rte_always_inline uint8_t +rte_read8(const volatile void *addr) +{ + uint8_t val; + val = rte_read8_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint16_t +rte_read16(const volatile void *addr) +{ + uint16_t val; + val = rte_read16_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint32_t +rte_read32(const volatile void *addr) +{ + uint32_t val; + val = rte_read32_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline uint64_t +rte_read64(const volatile void *addr) +{ + uint64_t val; + val = rte_read64_relaxed(addr); + rte_io_rmb(); + return val; +} + +static __rte_always_inline void +rte_write8(uint8_t value, volatile void *addr) +{ + rte_io_wmb(); + rte_write8_relaxed(value, addr); +} + +static __rte_always_inline void +rte_write16(uint16_t value, volatile void *addr) +{ + rte_io_wmb(); + rte_write16_relaxed(value, addr); +} + +static __rte_always_inline void +rte_write32(uint32_t value, volatile void *addr) +{ + rte_io_wmb(); + rte_write32_relaxed(value, addr); +} + +static __rte_always_inline void +rte_write64(uint64_t value, volatile void *addr) +{ + rte_io_wmb(); + rte_write64_relaxed(value, addr); +} + +#endif /* RTE_OVERRIDE_IO_H */ + +#endif /* _RTE_IO_H_ */ diff --git a/lib/librte_eal/include/generic/rte_mcslock.h b/lib/librte_eal/include/generic/rte_mcslock.h new file mode 100644 index 0000000000..2bef28351c --- /dev/null +++ b/lib/librte_eal/include/generic/rte_mcslock.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Arm Limited + */ + +#ifndef _RTE_MCSLOCK_H_ +#define _RTE_MCSLOCK_H_ + +/** + * @file + * + * RTE MCS lock + * + * This file defines the main data structure and APIs for MCS queued lock. + * + * The MCS lock (proposed by John M. Mellor-Crummey and Michael L. Scott) + * provides scalability by spinning on a CPU/thread local variable which + * avoids expensive cache bouncings. It provides fairness by maintaining + * a list of acquirers and passing the lock to each CPU/thread in the order + * they acquired the lock. + */ + +#include +#include +#include + +/** + * The rte_mcslock_t type. + */ +typedef struct rte_mcslock { + struct rte_mcslock *next; + int locked; /* 1 if the queue locked, 0 otherwise */ +} rte_mcslock_t; + +/** + * @warning + * @b EXPERIMENTAL: This API may change without prior notice + * + * Take the MCS lock. + * + * @param msl + * A pointer to the pointer of a MCS lock. + * When the lock is initialized or declared, the msl pointer should be + * set to NULL. + * @param me + * A pointer to a new node of MCS lock. Each CPU/thread acquiring the + * lock should use its 'own node'. + */ +__rte_experimental +static inline void +rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me) +{ + rte_mcslock_t *prev; + + /* Init me node */ + __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED); + __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); + + /* If the queue is empty, the exchange operation is enough to acquire + * the lock. Hence, the exchange operation requires acquire semantics. + * The store to me->next above should complete before the node is + * visible to other CPUs/threads. Hence, the exchange operation requires + * release semantics as well. + */ + prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL); + if (likely(prev == NULL)) { + /* Queue was empty, no further action required, + * proceed with lock taken. + */ + return; + } + __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED); + + /* The while-load of me->locked should not move above the previous + * store to prev->next. Otherwise it will cause a deadlock. Need a + * store-load barrier. + */ + __atomic_thread_fence(__ATOMIC_ACQ_REL); + /* If the lock has already been acquired, it first atomically + * places the node at the end of the queue and then proceeds + * to spin on me->locked until the previous lock holder resets + * the me->locked using mcslock_unlock(). + */ + while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE)) + rte_pause(); +} + +/** + * @warning + * @b EXPERIMENTAL: This API may change without prior notice + * + * Release the MCS lock. + * + * @param msl + * A pointer to the pointer of a MCS lock. + * @param me + * A pointer to the node of MCS lock passed in rte_mcslock_lock. + */ +__rte_experimental +static inline void +rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me) +{ + /* Check if there are more nodes in the queue. */ + if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) { + /* No, last member in the queue. */ + rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED); + + /* Release the lock by setting it to NULL */ + if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0, + __ATOMIC_RELEASE, __ATOMIC_RELAXED))) + return; + + /* Speculative execution would be allowed to read in the + * while-loop first. This has the potential to cause a + * deadlock. Need a load barrier. + */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); + /* More nodes added to the queue by other CPUs. + * Wait until the next pointer is set. + */ + while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL) + rte_pause(); + } + + /* Pass lock to next waiter. */ + __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE); +} + +/** + * @warning + * @b EXPERIMENTAL: This API may change without prior notice + * + * Try to take the lock. + * + * @param msl + * A pointer to the pointer of a MCS lock. + * @param me + * A pointer to a new node of MCS lock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +__rte_experimental +static inline int +rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me) +{ + /* Init me node */ + __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED); + + /* Try to lock */ + rte_mcslock_t *expected = NULL; + + /* The lock can be taken only when the queue is empty. Hence, + * the compare-exchange operation requires acquire semantics. + * The store to me->next above should complete before the node + * is visible to other CPUs/threads. Hence, the compare-exchange + * operation requires release semantics as well. + */ + return __atomic_compare_exchange_n(msl, &expected, me, 0, + __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); +} + +/** + * @warning + * @b EXPERIMENTAL: This API may change without prior notice + * + * Test if the lock is taken. + * + * @param msl + * A pointer to a MCS lock node. + * @return + * 1 if the lock is currently taken; 0 otherwise. + */ +__rte_experimental +static inline int +rte_mcslock_is_locked(rte_mcslock_t *msl) +{ + return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL); +} + +#endif /* _RTE_MCSLOCK_H_ */ diff --git a/lib/librte_eal/include/generic/rte_memcpy.h b/lib/librte_eal/include/generic/rte_memcpy.h new file mode 100644 index 0000000000..701e550c31 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_memcpy.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_MEMCPY_H_ +#define _RTE_MEMCPY_H_ + +/** + * @file + * + * Functions for vectorised implementation of memcpy(). + */ + +/** + * Copy 16 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov16(uint8_t *dst, const uint8_t *src); + +/** + * Copy 32 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov32(uint8_t *dst, const uint8_t *src); + +#ifdef __DOXYGEN__ + +/** + * Copy 48 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov48(uint8_t *dst, const uint8_t *src); + +#endif /* __DOXYGEN__ */ + +/** + * Copy 64 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov64(uint8_t *dst, const uint8_t *src); + +/** + * Copy 128 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov128(uint8_t *dst, const uint8_t *src); + +/** + * Copy 256 bytes from one location to another using optimised + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov256(uint8_t *dst, const uint8_t *src); + +#ifdef __DOXYGEN__ + +/** + * Copy bytes from one location to another. The locations must not overlap. + * + * @note This is implemented as a macro, so it's address should not be taken + * and care is needed as parameter expressions may be evaluated multiple times. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + * @param n + * Number of bytes to copy. + * @return + * Pointer to the destination data. + */ +static void * +rte_memcpy(void *dst, const void *src, size_t n); + +#endif /* __DOXYGEN__ */ + +#endif /* _RTE_MEMCPY_H_ */ diff --git a/lib/librte_eal/include/generic/rte_pause.h b/lib/librte_eal/include/generic/rte_pause.h new file mode 100644 index 0000000000..7422785f1a --- /dev/null +++ b/lib/librte_eal/include/generic/rte_pause.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + * Copyright(c) 2019 Arm Limited + */ + +#ifndef _RTE_PAUSE_H_ +#define _RTE_PAUSE_H_ + +/** + * @file + * + * CPU pause operation. + * + */ + +#include +#include +#include +#include +#include + +/** + * Pause CPU execution for a short while + * + * This call is intended for tight loops which poll a shared resource or wait + * for an event. A short pause within the loop may reduce the power consumption. + */ +static inline void rte_pause(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Wait for *addr to be updated with a 16-bit expected value, with a relaxed + * memory ordering model meaning the loads around this API can be reordered. + * + * @param addr + * A pointer to the memory location. + * @param expected + * A 16-bit expected value to be in the memory location. + * @param memorder + * Two different memory orders that can be specified: + * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to + * C++11 memory orders with the same names, see the C++11 standard or + * the GCC wiki on atomic synchronization for detailed definition. + */ +__rte_experimental +static __rte_always_inline void +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, + int memorder); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Wait for *addr to be updated with a 32-bit expected value, with a relaxed + * memory ordering model meaning the loads around this API can be reordered. + * + * @param addr + * A pointer to the memory location. + * @param expected + * A 32-bit expected value to be in the memory location. + * @param memorder + * Two different memory orders that can be specified: + * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to + * C++11 memory orders with the same names, see the C++11 standard or + * the GCC wiki on atomic synchronization for detailed definition. + */ +__rte_experimental +static __rte_always_inline void +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, + int memorder); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Wait for *addr to be updated with a 64-bit expected value, with a relaxed + * memory ordering model meaning the loads around this API can be reordered. + * + * @param addr + * A pointer to the memory location. + * @param expected + * A 64-bit expected value to be in the memory location. + * @param memorder + * Two different memory orders that can be specified: + * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to + * C++11 memory orders with the same names, see the C++11 standard or + * the GCC wiki on atomic synchronization for detailed definition. + */ +__rte_experimental +static __rte_always_inline void +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, + int memorder); + +#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED +static __rte_always_inline void +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected, + int memorder) +{ + assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + + while (__atomic_load_n(addr, memorder) != expected) + rte_pause(); +} + +static __rte_always_inline void +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, + int memorder) +{ + assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + + while (__atomic_load_n(addr, memorder) != expected) + rte_pause(); +} + +static __rte_always_inline void +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected, + int memorder) +{ + assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED); + + while (__atomic_load_n(addr, memorder) != expected) + rte_pause(); +} +#endif + +#endif /* _RTE_PAUSE_H_ */ diff --git a/lib/librte_eal/include/generic/rte_prefetch.h b/lib/librte_eal/include/generic/rte_prefetch.h new file mode 100644 index 0000000000..6e47bdfbad --- /dev/null +++ b/lib/librte_eal/include/generic/rte_prefetch.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + +#ifndef _RTE_PREFETCH_H_ +#define _RTE_PREFETCH_H_ + +/** + * @file + * + * Prefetch operations. + * + * This file defines an API for prefetch macros / inline-functions, + * which are architecture-dependent. Prefetching occurs when a + * processor requests an instruction or data from memory to cache + * before it is actually needed, potentially speeding up the execution of the + * program. + */ + +/** + * Prefetch a cache line into all cache levels. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch0(const volatile void *p); + +/** + * Prefetch a cache line into all cache levels except the 0th cache level. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch1(const volatile void *p); + +/** + * Prefetch a cache line into all cache levels except the 0th and 1th cache + * levels. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch2(const volatile void *p); + +/** + * Prefetch a cache line into all cache levels (non-temporal/transient version) + * + * The non-temporal prefetch is intended as a prefetch hint that processor will + * use the prefetched data only once or short period, unlike the + * rte_prefetch0() function which imply that prefetched data to use repeatedly. + * + * @param p + * Address to prefetch + */ +static inline void rte_prefetch_non_temporal(const volatile void *p); + +#endif /* _RTE_PREFETCH_H_ */ diff --git a/lib/librte_eal/include/generic/rte_rwlock.h b/lib/librte_eal/include/generic/rte_rwlock.h new file mode 100644 index 0000000000..da9bc3e9c0 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_rwlock.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_RWLOCK_H_ +#define _RTE_RWLOCK_H_ + +/** + * @file + * + * RTE Read-Write Locks + * + * This file defines an API for read-write locks. The lock is used to + * protect data that allows multiple readers in parallel, but only + * one writer. All readers are blocked until the writer is finished + * writing. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** + * The rte_rwlock_t type. + * + * cnt is -1 when write lock is held, and > 0 when read locks are held. + */ +typedef struct { + volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */ +} rte_rwlock_t; + +/** + * A static rwlock initializer. + */ +#define RTE_RWLOCK_INITIALIZER { 0 } + +/** + * Initialize the rwlock to an unlocked state. + * + * @param rwl + * A pointer to the rwlock structure. + */ +static inline void +rte_rwlock_init(rte_rwlock_t *rwl) +{ + rwl->cnt = 0; +} + +/** + * Take a read lock. Loop until the lock is held. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_read_lock(rte_rwlock_t *rwl) +{ + int32_t x; + int success = 0; + + while (success == 0) { + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + /* write lock is held */ + if (x < 0) { + rte_pause(); + continue; + } + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + } +} + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * try to take a read lock. + * + * @param rwl + * A pointer to a rwlock structure. + * @return + * - zero if the lock is successfully taken + * - -EBUSY if lock could not be acquired for reading because a + * writer holds the lock + */ +__rte_experimental +static inline int +rte_rwlock_read_trylock(rte_rwlock_t *rwl) +{ + int32_t x; + int success = 0; + + while (success == 0) { + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + /* write lock is held */ + if (x < 0) + return -EBUSY; + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + } + + return 0; +} + +/** + * Release a read lock. + * + * @param rwl + * A pointer to the rwlock structure. + */ +static inline void +rte_rwlock_read_unlock(rte_rwlock_t *rwl) +{ + __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE); +} + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * try to take a write lock. + * + * @param rwl + * A pointer to a rwlock structure. + * @return + * - zero if the lock is successfully taken + * - -EBUSY if lock could not be acquired for writing because + * it was already locked for reading or writing + */ +__rte_experimental +static inline int +rte_rwlock_write_trylock(rte_rwlock_t *rwl) +{ + int32_t x; + + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0) + return -EBUSY; + + return 0; +} + +/** + * Take a write lock. Loop until the lock is held. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_lock(rte_rwlock_t *rwl) +{ + int32_t x; + int success = 0; + + while (success == 0) { + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + /* a lock is held */ + if (x != 0) { + rte_pause(); + continue; + } + success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + } +} + +/** + * Release a write lock. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_unlock(rte_rwlock_t *rwl) +{ + __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE); +} + +/** + * Try to execute critical section in a hardware memory transaction, if it + * fails or not available take a read lock + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_read_lock_tm(rte_rwlock_t *rwl); + +/** + * Commit hardware memory transaction or release the read lock if the lock is used as a fall-back + * + * @param rwl + * A pointer to the rwlock structure. + */ +static inline void +rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl); + +/** + * Try to execute critical section in a hardware memory transaction, if it + * fails or not available take a write lock + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_lock_tm(rte_rwlock_t *rwl); + +/** + * Commit hardware memory transaction or release the write lock if the lock is used as a fall-back + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_RWLOCK_H_ */ diff --git a/lib/librte_eal/include/generic/rte_spinlock.h b/lib/librte_eal/include/generic/rte_spinlock.h new file mode 100644 index 0000000000..87ae7a4f18 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_spinlock.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_SPINLOCK_H_ +#define _RTE_SPINLOCK_H_ + +/** + * @file + * + * RTE Spinlocks + * + * This file defines an API for read-write locks, which are implemented + * in an architecture-specific way. This kind of lock simply waits in + * a loop repeatedly checking until the lock becomes available. + * + * All locks must be initialised before use, and only initialised once. + * + */ + +#include +#ifdef RTE_FORCE_INTRINSICS +#include +#endif +#include + +/** + * The rte_spinlock_t type. + */ +typedef struct { + volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ +} rte_spinlock_t; + +/** + * A static spinlock initializer. + */ +#define RTE_SPINLOCK_INITIALIZER { 0 } + +/** + * Initialize the spinlock to an unlocked state. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_init(rte_spinlock_t *sl) +{ + sl->locked = 0; +} + +/** + * Take the spinlock. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_lock(rte_spinlock_t *sl); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_spinlock_lock(rte_spinlock_t *sl) +{ + int exp = 0; + + while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { + while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED)) + rte_pause(); + exp = 0; + } +} +#endif + +/** + * Release the spinlock. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_unlock (rte_spinlock_t *sl); + +#ifdef RTE_FORCE_INTRINSICS +static inline void +rte_spinlock_unlock (rte_spinlock_t *sl) +{ + __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE); +} +#endif + +/** + * Try to take the lock. + * + * @param sl + * A pointer to the spinlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +static inline int +rte_spinlock_trylock (rte_spinlock_t *sl); + +#ifdef RTE_FORCE_INTRINSICS +static inline int +rte_spinlock_trylock (rte_spinlock_t *sl) +{ + int exp = 0; + return __atomic_compare_exchange_n(&sl->locked, &exp, 1, + 0, /* disallow spurious failure */ + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); +} +#endif + +/** + * Test if the lock is taken. + * + * @param sl + * A pointer to the spinlock. + * @return + * 1 if the lock is currently taken; 0 otherwise. + */ +static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) +{ + return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE); +} + +/** + * Test if hardware transactional memory (lock elision) is supported + * + * @return + * 1 if the hardware transactional memory is supported; 0 otherwise. + */ +static inline int rte_tm_supported(void); + +/** + * Try to execute critical section in a hardware memory transaction, + * if it fails or not available take the spinlock. + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_lock_tm(rte_spinlock_t *sl); + +/** + * Commit hardware memory transaction or release the spinlock if + * the spinlock is used as a fall-back + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_unlock_tm(rte_spinlock_t *sl); + +/** + * Try to execute critical section in a hardware memory transaction, + * if it fails or not available try to take the lock. + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param sl + * A pointer to the spinlock. + * @return + * 1 if the hardware memory transaction is successfully started + * or lock is successfully taken; 0 otherwise. + */ +static inline int +rte_spinlock_trylock_tm(rte_spinlock_t *sl); + +/** + * The rte_spinlock_recursive_t type. + */ +typedef struct { + rte_spinlock_t sl; /**< the actual spinlock */ + volatile int user; /**< core id using lock, -1 for unused */ + volatile int count; /**< count of time this lock has been called */ +} rte_spinlock_recursive_t; + +/** + * A static recursive spinlock initializer. + */ +#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0} + +/** + * Initialize the recursive spinlock to an unlocked state. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr) +{ + rte_spinlock_init(&slr->sl); + slr->user = -1; + slr->count = 0; +} + +/** + * Take the recursive spinlock. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr) +{ + int id = rte_gettid(); + + if (slr->user != id) { + rte_spinlock_lock(&slr->sl); + slr->user = id; + } + slr->count++; +} +/** + * Release the recursive spinlock. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr) +{ + if (--(slr->count) == 0) { + slr->user = -1; + rte_spinlock_unlock(&slr->sl); + } + +} + +/** + * Try to take the recursive lock. + * + * @param slr + * A pointer to the recursive spinlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr) +{ + int id = rte_gettid(); + + if (slr->user != id) { + if (rte_spinlock_trylock(&slr->sl) == 0) + return 0; + slr->user = id; + } + slr->count++; + return 1; +} + + +/** + * Try to execute critical section in a hardware memory transaction, + * if it fails or not available take the recursive spinlocks + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_lock_tm( + rte_spinlock_recursive_t *slr); + +/** + * Commit hardware memory transaction or release the recursive spinlock + * if the recursive spinlock is used as a fall-back + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_unlock_tm( + rte_spinlock_recursive_t *slr); + +/** + * Try to execute critical section in a hardware memory transaction, + * if it fails or not available try to take the recursive lock + * + * NOTE: An attempt to perform a HW I/O operation inside a hardware memory + * transaction always aborts the transaction since the CPU is not able to + * roll-back should the transaction fail. Therefore, hardware transactional + * locks are not advised to be used around rte_eth_rx_burst() and + * rte_eth_tx_burst() calls. + * + * @param slr + * A pointer to the recursive spinlock. + * @return + * 1 if the hardware memory transaction is successfully started + * or lock is successfully taken; 0 otherwise. + */ +static inline int rte_spinlock_recursive_trylock_tm( + rte_spinlock_recursive_t *slr); + +#endif /* _RTE_SPINLOCK_H_ */ diff --git a/lib/librte_eal/include/generic/rte_ticketlock.h b/lib/librte_eal/include/generic/rte_ticketlock.h new file mode 100644 index 0000000000..c295ae7f7e --- /dev/null +++ b/lib/librte_eal/include/generic/rte_ticketlock.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Arm Limited + */ + +#ifndef _RTE_TICKETLOCK_H_ +#define _RTE_TICKETLOCK_H_ + +/** + * @file + * + * RTE ticket locks + * + * This file defines an API for ticket locks, which give each waiting + * thread a ticket and take the lock one by one, first come, first + * serviced. + * + * All locks must be initialised before use, and only initialised once. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** + * The rte_ticketlock_t type. + */ +typedef union { + uint32_t tickets; + struct { + uint16_t current; + uint16_t next; + } s; +} rte_ticketlock_t; + +/** + * A static ticketlock initializer. + */ +#define RTE_TICKETLOCK_INITIALIZER { 0 } + +/** + * Initialize the ticketlock to an unlocked state. + * + * @param tl + * A pointer to the ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_init(rte_ticketlock_t *tl) +{ + __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED); +} + +/** + * Take the ticketlock. + * + * @param tl + * A pointer to the ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_lock(rte_ticketlock_t *tl) +{ + uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED); + rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE); +} + +/** + * Release the ticketlock. + * + * @param tl + * A pointer to the ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_unlock(rte_ticketlock_t *tl) +{ + uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED); + __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE); +} + +/** + * Try to take the lock. + * + * @param tl + * A pointer to the ticketlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +__rte_experimental +static inline int +rte_ticketlock_trylock(rte_ticketlock_t *tl) +{ + rte_ticketlock_t old, new; + old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED); + new.tickets = old.tickets; + new.s.next++; + if (old.s.next == old.s.current) { + if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets, + new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + return 1; + } + + return 0; +} + +/** + * Test if the lock is taken. + * + * @param tl + * A pointer to the ticketlock. + * @return + * 1 if the lock is currently taken; 0 otherwise. + */ +__rte_experimental +static inline int +rte_ticketlock_is_locked(rte_ticketlock_t *tl) +{ + rte_ticketlock_t tic; + tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE); + return (tic.s.current != tic.s.next); +} + +/** + * The rte_ticketlock_recursive_t type. + */ +#define TICKET_LOCK_INVALID_ID -1 + +typedef struct { + rte_ticketlock_t tl; /**< the actual ticketlock */ + int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */ + unsigned int count; /**< count of time this lock has been called */ +} rte_ticketlock_recursive_t; + +/** + * A static recursive ticketlock initializer. + */ +#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \ + TICKET_LOCK_INVALID_ID, 0} + +/** + * Initialize the recursive ticketlock to an unlocked state. + * + * @param tlr + * A pointer to the recursive ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr) +{ + rte_ticketlock_init(&tlr->tl); + __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED); + tlr->count = 0; +} + +/** + * Take the recursive ticketlock. + * + * @param tlr + * A pointer to the recursive ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr) +{ + int id = rte_gettid(); + + if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { + rte_ticketlock_lock(&tlr->tl); + __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); + } + tlr->count++; +} + +/** + * Release the recursive ticketlock. + * + * @param tlr + * A pointer to the recursive ticketlock. + */ +__rte_experimental +static inline void +rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr) +{ + if (--(tlr->count) == 0) { + __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, + __ATOMIC_RELAXED); + rte_ticketlock_unlock(&tlr->tl); + } +} + +/** + * Try to take the recursive lock. + * + * @param tlr + * A pointer to the recursive ticketlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +__rte_experimental +static inline int +rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr) +{ + int id = rte_gettid(); + + if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) { + if (rte_ticketlock_trylock(&tlr->tl) == 0) + return 0; + __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED); + } + tlr->count++; + return 1; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_TICKETLOCK_H_ */ diff --git a/lib/librte_eal/include/generic/rte_vect.h b/lib/librte_eal/include/generic/rte_vect.h new file mode 100644 index 0000000000..3fc47979f8 --- /dev/null +++ b/lib/librte_eal/include/generic/rte_vect.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + */ + +#ifndef _RTE_VECT_H_ +#define _RTE_VECT_H_ + +/** + * @file + * SIMD vector types + * + * This file defines types to use vector instructions with generic C code. + */ + +#include + +/* Unsigned vector types */ + +/** + * 64 bits vector size to use with unsigned 8 bits elements. + * + * a = (rte_v64u8_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef uint8_t rte_v64u8_t __attribute__((vector_size(8), aligned(8))); + +/** + * 64 bits vector size to use with unsigned 16 bits elements. + * + * a = (rte_v64u16_t){ a0, a1, a2, a3 } + */ +typedef uint16_t rte_v64u16_t __attribute__((vector_size(8), aligned(8))); + +/** + * 64 bits vector size to use with unsigned 32 bits elements. + * + * a = (rte_v64u32_t){ a0, a1 } + */ +typedef uint32_t rte_v64u32_t __attribute__((vector_size(8), aligned(8))); + +/** + * 128 bits vector size to use with unsigned 8 bits elements. + * + * a = (rte_v128u8_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15 } + */ +typedef uint8_t rte_v128u8_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with unsigned 16 bits elements. + * + * a = (rte_v128u16_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef uint16_t rte_v128u16_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with unsigned 32 bits elements. + * + * a = (rte_v128u32_t){ a0, a1, a2, a3 } + */ +typedef uint32_t rte_v128u32_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with unsigned 64 bits elements. + * + * a = (rte_v128u64_t){ a0, a1 } + */ +typedef uint64_t rte_v128u64_t __attribute__((vector_size(16), aligned(16))); + +/** + * 256 bits vector size to use with unsigned 8 bits elements. + * + * a = (rte_v256u8_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15, + * a16, a17, a18, a19, a20, a21, a22, a23, + * a24, a25, a26, a27, a28, a29, a30, a31 } + */ +typedef uint8_t rte_v256u8_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with unsigned 16 bits elements. + * + * a = (rte_v256u16_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15 } + */ +typedef uint16_t rte_v256u16_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with unsigned 32 bits elements. + * + * a = (rte_v256u32_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef uint32_t rte_v256u32_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with unsigned 64 bits elements. + * + * a = (rte_v256u64_t){ a0, a1, a2, a3 } + */ +typedef uint64_t rte_v256u64_t __attribute__((vector_size(32), aligned(32))); + + +/* Signed vector types */ + +/** + * 64 bits vector size to use with 8 bits elements. + * + * a = (rte_v64s8_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef int8_t rte_v64s8_t __attribute__((vector_size(8), aligned(8))); + +/** + * 64 bits vector size to use with 16 bits elements. + * + * a = (rte_v64s16_t){ a0, a1, a2, a3 } + */ +typedef int16_t rte_v64s16_t __attribute__((vector_size(8), aligned(8))); + +/** + * 64 bits vector size to use with 32 bits elements. + * + * a = (rte_v64s32_t){ a0, a1 } + */ +typedef int32_t rte_v64s32_t __attribute__((vector_size(8), aligned(8))); + +/** + * 128 bits vector size to use with 8 bits elements. + * + * a = (rte_v128s8_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15 } + */ +typedef int8_t rte_v128s8_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with 16 bits elements. + * + * a = (rte_v128s16_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef int16_t rte_v128s16_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with 32 bits elements. + * + * a = (rte_v128s32_t){ a0, a1, a2, a3 } + */ +typedef int32_t rte_v128s32_t __attribute__((vector_size(16), aligned(16))); + +/** + * 128 bits vector size to use with 64 bits elements. + * + * a = (rte_v128s64_t){ a1, a2 } + */ +typedef int64_t rte_v128s64_t __attribute__((vector_size(16), aligned(16))); + +/** + * 256 bits vector size to use with 8 bits elements. + * + * a = (rte_v256s8_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15, + * a16, a17, a18, a19, a20, a21, a22, a23, + * a24, a25, a26, a27, a28, a29, a30, a31 } + */ +typedef int8_t rte_v256s8_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with 16 bits elements. + * + * a = (rte_v256s16_t){ a00, a01, a02, a03, a04, a05, a06, a07, + * a08, a09, a10, a11, a12, a13, a14, a15 } + */ +typedef int16_t rte_v256s16_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with 32 bits elements. + * + * a = (rte_v256s32_t){ a0, a1, a2, a3, a4, a5, a6, a7 } + */ +typedef int32_t rte_v256s32_t __attribute__((vector_size(32), aligned(32))); + +/** + * 256 bits vector size to use with 64 bits elements. + * + * a = (rte_v256s64_t){ a0, a1, a2, a3 } + */ +typedef int64_t rte_v256s64_t __attribute__((vector_size(32), aligned(32))); + +#endif /* _RTE_VECT_H_ */ diff --git a/lib/librte_eal/include/meson.build b/lib/librte_eal/include/meson.build new file mode 100644 index 0000000000..6fd4274941 --- /dev/null +++ b/lib/librte_eal/include/meson.build @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +includes += include_directories('.') + +headers += files( + 'rte_alarm.h', + 'rte_bitmap.h', + 'rte_branch_prediction.h', + 'rte_bus.h', + 'rte_class.h', + 'rte_common.h', + 'rte_compat.h', + 'rte_debug.h', + 'rte_dev.h', + 'rte_devargs.h', + 'rte_eal.h', + 'rte_eal_interrupts.h', + 'rte_eal_memconfig.h', + 'rte_errno.h', + 'rte_fbarray.h', + 'rte_hexdump.h', + 'rte_hypervisor.h', + 'rte_interrupts.h', + 'rte_keepalive.h', + 'rte_launch.h', + 'rte_lcore.h', + 'rte_log.h', + 'rte_malloc.h', + 'rte_memory.h', + 'rte_memzone.h', + 'rte_option.h', + 'rte_pci_dev_feature_defs.h', + 'rte_pci_dev_features.h', + 'rte_per_lcore.h', + 'rte_random.h', + 'rte_reciprocal.h', + 'rte_service.h', + 'rte_service_component.h', + 'rte_string_fns.h', + 'rte_tailq.h', + 'rte_time.h', + 'rte_uuid.h', + 'rte_version.h', + 'rte_vfio.h', +) + +# special case install the generic headers, since they go in a subdir +generic_headers = files( + 'generic/rte_atomic.h', + 'generic/rte_byteorder.h', + 'generic/rte_cpuflags.h', + 'generic/rte_cycles.h', + 'generic/rte_io.h', + 'generic/rte_mcslock.h', + 'generic/rte_memcpy.h', + 'generic/rte_pause.h', + 'generic/rte_prefetch.h', + 'generic/rte_rwlock.h', + 'generic/rte_spinlock.h', + 'generic/rte_ticketlock.h', + 'generic/rte_vect.h', +) +install_headers(generic_headers, subdir: 'generic') diff --git a/lib/librte_eal/include/rte_alarm.h b/lib/librte_eal/include/rte_alarm.h new file mode 100644 index 0000000000..7e4d0b2407 --- /dev/null +++ b/lib/librte_eal/include/rte_alarm.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_ALARM_H_ +#define _RTE_ALARM_H_ + +/** + * @file + * + * Alarm functions + * + * Simple alarm-clock functionality supplied by eal. + * Does not require hpet support. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Signature of callback back function called when an alarm goes off. + */ +typedef void (*rte_eal_alarm_callback)(void *arg); + +/** + * Function to set a callback to be triggered when us microseconds + * have expired. Accuracy of timing to the microsecond is not guaranteed. The + * alarm function will not be called *before* the requested time, but may + * be called a short period of time afterwards. + * The alarm handler will be called only once. There is no need to call + * "rte_eal_alarm_cancel" from within the callback function. + * + * @param us + * The time in microseconds before the callback is called + * @param cb + * The function to be called when the alarm expires + * @param cb_arg + * Pointer parameter to be passed to the callback function + * + * @return + * On success, zero. + * On failure, a negative error number + */ +int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg); + +/** + * Function to cancel an alarm callback which has been registered before. If + * used outside alarm callback it wait for all callbacks to finish execution. + * + * @param cb_fn + * alarm callback + * @param cb_arg + * Pointer parameter to be passed to the callback function. To remove all + * copies of a given callback function, irrespective of parameter, (void *)-1 + * can be used here. + * + * @return + * - value greater than 0 and rte_errno not changed - returned value is + * the number of canceled alarm callback functions + * - value greater or equal 0 and rte_errno set to EINPROGRESS, at least one + * alarm could not be canceled because cancellation was requested from alarm + * callback context. Returned value is the number of successfully canceled + * alarm callbacks + * - 0 and rte_errno set to ENOENT - no alarm found + * - -1 and rte_errno set to EINVAL - invalid parameter (NULL callback) + */ +int rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg); + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_ALARM_H_ */ diff --git a/lib/librte_eal/include/rte_bitmap.h b/lib/librte_eal/include/rte_bitmap.h new file mode 100644 index 0000000000..6b846f251b --- /dev/null +++ b/lib/librte_eal/include/rte_bitmap.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef __INCLUDE_RTE_BITMAP_H__ +#define __INCLUDE_RTE_BITMAP_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file + * RTE Bitmap + * + * The bitmap component provides a mechanism to manage large arrays of bits + * through bit get/set/clear and bit array scan operations. + * + * The bitmap scan operation is optimized for 64-bit CPUs using 64/128 byte cache + * lines. The bitmap is hierarchically organized using two arrays (array1 and + * array2), with each bit in array1 being associated with a full cache line + * (512/1024 bits) of bitmap bits, which are stored in array2: the bit in array1 + * is set only when there is at least one bit set within its associated array2 + * bits, otherwise the bit in array1 is cleared. The read and write operations + * for array1 and array2 are always done in slabs of 64 bits. + * + * This bitmap is not thread safe. For lock free operation on a specific bitmap + * instance, a single writer thread performing bit set/clear operations is + * allowed, only the writer thread can do bitmap scan operations, while there + * can be several reader threads performing bit get operations in parallel with + * the writer thread. When the use of locking primitives is acceptable, the + * serialization of the bit set/clear and bitmap scan operations needs to be + * enforced by the caller, while the bit get operation does not require locking + * the bitmap. + * + ***/ + +#include +#include +#include +#include +#include +#include +#include + +/* Slab */ +#define RTE_BITMAP_SLAB_BIT_SIZE 64 +#define RTE_BITMAP_SLAB_BIT_SIZE_LOG2 6 +#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1) + +/* Cache line (CL) */ +#define RTE_BITMAP_CL_BIT_SIZE (RTE_CACHE_LINE_SIZE * 8) +#define RTE_BITMAP_CL_BIT_SIZE_LOG2 (RTE_CACHE_LINE_SIZE_LOG2 + 3) +#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1) + +#define RTE_BITMAP_CL_SLAB_SIZE (RTE_BITMAP_CL_BIT_SIZE / RTE_BITMAP_SLAB_BIT_SIZE) +#define RTE_BITMAP_CL_SLAB_SIZE_LOG2 (RTE_BITMAP_CL_BIT_SIZE_LOG2 - RTE_BITMAP_SLAB_BIT_SIZE_LOG2) +#define RTE_BITMAP_CL_SLAB_MASK (RTE_BITMAP_CL_SLAB_SIZE - 1) + +/** Bitmap data structure */ +struct rte_bitmap { + /* Context for array1 and array2 */ + uint64_t *array1; /**< Bitmap array1 */ + uint64_t *array2; /**< Bitmap array2 */ + uint32_t array1_size; /**< Number of 64-bit slabs in array1 that are actually used */ + uint32_t array2_size; /**< Number of 64-bit slabs in array2 */ + + /* Context for the "scan next" operation */ + uint32_t index1; /**< Bitmap scan: Index of current array1 slab */ + uint32_t offset1; /**< Bitmap scan: Offset of current bit within current array1 slab */ + uint32_t index2; /**< Bitmap scan: Index of current array2 slab */ + uint32_t go2; /**< Bitmap scan: Go/stop condition for current array2 cache line */ + + /* Storage space for array1 and array2 */ + uint8_t memory[]; +}; + +static inline void +__rte_bitmap_index1_inc(struct rte_bitmap *bmp) +{ + bmp->index1 = (bmp->index1 + 1) & (bmp->array1_size - 1); +} + +static inline uint64_t +__rte_bitmap_mask1_get(struct rte_bitmap *bmp) +{ + return (~1llu) << bmp->offset1; +} + +static inline void +__rte_bitmap_index2_set(struct rte_bitmap *bmp) +{ + bmp->index2 = (((bmp->index1 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2) + bmp->offset1) << RTE_BITMAP_CL_SLAB_SIZE_LOG2); +} + +static inline uint32_t +__rte_bitmap_get_memory_footprint(uint32_t n_bits, + uint32_t *array1_byte_offset, uint32_t *array1_slabs, + uint32_t *array2_byte_offset, uint32_t *array2_slabs) +{ + uint32_t n_slabs_context, n_slabs_array1, n_cache_lines_context_and_array1; + uint32_t n_cache_lines_array2; + uint32_t n_bytes_total; + + n_cache_lines_array2 = (n_bits + RTE_BITMAP_CL_BIT_SIZE - 1) / RTE_BITMAP_CL_BIT_SIZE; + n_slabs_array1 = (n_cache_lines_array2 + RTE_BITMAP_SLAB_BIT_SIZE - 1) / RTE_BITMAP_SLAB_BIT_SIZE; + n_slabs_array1 = rte_align32pow2(n_slabs_array1); + n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8); + n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE; + n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * RTE_CACHE_LINE_SIZE; + + if (array1_byte_offset) { + *array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8); + } + if (array1_slabs) { + *array1_slabs = n_slabs_array1; + } + if (array2_byte_offset) { + *array2_byte_offset = n_cache_lines_context_and_array1 * RTE_CACHE_LINE_SIZE; + } + if (array2_slabs) { + *array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE; + } + + return n_bytes_total; +} + +static inline void +__rte_bitmap_scan_init(struct rte_bitmap *bmp) +{ + bmp->index1 = bmp->array1_size - 1; + bmp->offset1 = RTE_BITMAP_SLAB_BIT_SIZE - 1; + __rte_bitmap_index2_set(bmp); + bmp->index2 += RTE_BITMAP_CL_SLAB_SIZE; + + bmp->go2 = 0; +} + +/** + * Bitmap memory footprint calculation + * + * @param n_bits + * Number of bits in the bitmap + * @return + * Bitmap memory footprint measured in bytes on success, 0 on error + */ +static inline uint32_t +rte_bitmap_get_memory_footprint(uint32_t n_bits) { + /* Check input arguments */ + if (n_bits == 0) { + return 0; + } + + return __rte_bitmap_get_memory_footprint(n_bits, NULL, NULL, NULL, NULL); +} + +/** + * Bitmap initialization + * + * @param n_bits + * Number of pre-allocated bits in array2. + * @param mem + * Base address of array1 and array2. + * @param mem_size + * Minimum expected size of bitmap. + * @return + * Handle to bitmap instance. + */ +static inline struct rte_bitmap * +rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size) +{ + struct rte_bitmap *bmp; + uint32_t array1_byte_offset, array1_slabs, array2_byte_offset, array2_slabs; + uint32_t size; + + /* Check input arguments */ + if (n_bits == 0) { + return NULL; + } + + if ((mem == NULL) || (((uintptr_t) mem) & RTE_CACHE_LINE_MASK)) { + return NULL; + } + + size = __rte_bitmap_get_memory_footprint(n_bits, + &array1_byte_offset, &array1_slabs, + &array2_byte_offset, &array2_slabs); + if (size < mem_size) { + return NULL; + } + + /* Setup bitmap */ + memset(mem, 0, size); + bmp = (struct rte_bitmap *) mem; + + bmp->array1 = (uint64_t *) &mem[array1_byte_offset]; + bmp->array1_size = array1_slabs; + bmp->array2 = (uint64_t *) &mem[array2_byte_offset]; + bmp->array2_size = array2_slabs; + + __rte_bitmap_scan_init(bmp); + + return bmp; +} + +/** + * Bitmap free + * + * @param bmp + * Handle to bitmap instance + * @return + * 0 upon success, error code otherwise + */ +static inline int +rte_bitmap_free(struct rte_bitmap *bmp) +{ + /* Check input arguments */ + if (bmp == NULL) { + return -1; + } + + return 0; +} + +/** + * Bitmap reset + * + * @param bmp + * Handle to bitmap instance + */ +static inline void +rte_bitmap_reset(struct rte_bitmap *bmp) +{ + memset(bmp->array1, 0, bmp->array1_size * sizeof(uint64_t)); + memset(bmp->array2, 0, bmp->array2_size * sizeof(uint64_t)); + __rte_bitmap_scan_init(bmp); +} + +/** + * Bitmap location prefetch into CPU L1 cache + * + * @param bmp + * Handle to bitmap instance + * @param pos + * Bit position + * @return + * 0 upon success, error code otherwise + */ +static inline void +rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos) +{ + uint64_t *slab2; + uint32_t index2; + + index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + slab2 = bmp->array2 + index2; + rte_prefetch0((void *) slab2); +} + +/** + * Bitmap bit get + * + * @param bmp + * Handle to bitmap instance + * @param pos + * Bit position + * @return + * 0 when bit is cleared, non-zero when bit is set + */ +static inline uint64_t +rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos) +{ + uint64_t *slab2; + uint32_t index2, offset2; + + index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; + slab2 = bmp->array2 + index2; + return (*slab2) & (1llu << offset2); +} + +/** + * Bitmap bit set + * + * @param bmp + * Handle to bitmap instance + * @param pos + * Bit position + */ +static inline void +rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos) +{ + uint64_t *slab1, *slab2; + uint32_t index1, index2, offset1, offset2; + + /* Set bit in array2 slab and set bit in array1 slab */ + index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; + index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); + offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; + slab2 = bmp->array2 + index2; + slab1 = bmp->array1 + index1; + + *slab2 |= 1llu << offset2; + *slab1 |= 1llu << offset1; +} + +/** + * Bitmap slab set + * + * @param bmp + * Handle to bitmap instance + * @param pos + * Bit position identifying the array2 slab + * @param slab + * Value to be assigned to the 64-bit slab in array2 + */ +static inline void +rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab) +{ + uint64_t *slab1, *slab2; + uint32_t index1, index2, offset1; + + /* Set bits in array2 slab and set bit in array1 slab */ + index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); + offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; + slab2 = bmp->array2 + index2; + slab1 = bmp->array1 + index1; + + *slab2 |= slab; + *slab1 |= 1llu << offset1; +} + +static inline uint64_t +__rte_bitmap_line_not_empty(uint64_t *slab2) +{ + uint64_t v1, v2, v3, v4; + + v1 = slab2[0] | slab2[1]; + v2 = slab2[2] | slab2[3]; + v3 = slab2[4] | slab2[5]; + v4 = slab2[6] | slab2[7]; + v1 |= v2; + v3 |= v4; + + return v1 | v3; +} + +/** + * Bitmap bit clear + * + * @param bmp + * Handle to bitmap instance + * @param pos + * Bit position + */ +static inline void +rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos) +{ + uint64_t *slab1, *slab2; + uint32_t index1, index2, offset1, offset2; + + /* Clear bit in array2 slab */ + index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK; + slab2 = bmp->array2 + index2; + + /* Return if array2 slab is not all-zeros */ + *slab2 &= ~(1llu << offset2); + if (*slab2){ + return; + } + + /* Check the entire cache line of array2 for all-zeros */ + index2 &= ~ RTE_BITMAP_CL_SLAB_MASK; + slab2 = bmp->array2 + index2; + if (__rte_bitmap_line_not_empty(slab2)) { + return; + } + + /* The array2 cache line is all-zeros, so clear bit in array1 slab */ + index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2); + offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK; + slab1 = bmp->array1 + index1; + *slab1 &= ~(1llu << offset1); + + return; +} + +static inline int +__rte_bitmap_scan_search(struct rte_bitmap *bmp) +{ + uint64_t value1; + uint32_t i; + + /* Check current array1 slab */ + value1 = bmp->array1[bmp->index1]; + value1 &= __rte_bitmap_mask1_get(bmp); + + if (rte_bsf64_safe(value1, &bmp->offset1)) + return 1; + + __rte_bitmap_index1_inc(bmp); + bmp->offset1 = 0; + + /* Look for another array1 slab */ + for (i = 0; i < bmp->array1_size; i ++, __rte_bitmap_index1_inc(bmp)) { + value1 = bmp->array1[bmp->index1]; + + if (rte_bsf64_safe(value1, &bmp->offset1)) + return 1; + } + + return 0; +} + +static inline void +__rte_bitmap_scan_read_init(struct rte_bitmap *bmp) +{ + __rte_bitmap_index2_set(bmp); + bmp->go2 = 1; + rte_prefetch1((void *)(bmp->array2 + bmp->index2 + 8)); +} + +static inline int +__rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) +{ + uint64_t *slab2; + + slab2 = bmp->array2 + bmp->index2; + for ( ; bmp->go2 ; bmp->index2 ++, slab2 ++, bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK) { + if (*slab2) { + *pos = bmp->index2 << RTE_BITMAP_SLAB_BIT_SIZE_LOG2; + *slab = *slab2; + + bmp->index2 ++; + slab2 ++; + bmp->go2 = bmp->index2 & RTE_BITMAP_CL_SLAB_MASK; + return 1; + } + } + + return 0; +} + +/** + * Bitmap scan (with automatic wrap-around) + * + * @param bmp + * Handle to bitmap instance + * @param pos + * When function call returns 1, pos contains the position of the next set + * bit, otherwise not modified + * @param slab + * When function call returns 1, slab contains the value of the entire 64-bit + * slab where the bit indicated by pos is located. Slabs are always 64-bit + * aligned, so the position of the first bit of the slab (this bit is not + * necessarily set) is pos / 64. Once a slab has been returned by the bitmap + * scan operation, the internal pointers of the bitmap are updated to point + * after this slab, so the same slab will not be returned again if it + * contains more than one bit which is set. When function call returns 0, + * slab is not modified. + * @return + * 0 if there is no bit set in the bitmap, 1 otherwise + */ +static inline int +rte_bitmap_scan(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab) +{ + /* Return data from current array2 line if available */ + if (__rte_bitmap_scan_read(bmp, pos, slab)) { + return 1; + } + + /* Look for non-empty array2 line */ + if (__rte_bitmap_scan_search(bmp)) { + __rte_bitmap_scan_read_init(bmp); + __rte_bitmap_scan_read(bmp, pos, slab); + return 1; + } + + /* Empty bitmap */ + return 0; +} + +#ifdef __cplusplus +} +#endif + +#endif /* __INCLUDE_RTE_BITMAP_H__ */ diff --git a/lib/librte_eal/include/rte_branch_prediction.h b/lib/librte_eal/include/rte_branch_prediction.h new file mode 100644 index 0000000000..854ef9e5dd --- /dev/null +++ b/lib/librte_eal/include/rte_branch_prediction.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +/** + * @file + * Branch Prediction Helpers in RTE + */ + +#ifndef _RTE_BRANCH_PREDICTION_H_ +#define _RTE_BRANCH_PREDICTION_H_ + +/** + * Check if a branch is likely to be taken. + * + * This compiler builtin allows the developer to indicate if a branch is + * likely to be taken. Example: + * + * if (likely(x > 1)) + * do_stuff(); + * + */ +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif /* likely */ + +/** + * Check if a branch is unlikely to be taken. + * + * This compiler builtin allows the developer to indicate if a branch is + * unlikely to be taken. Example: + * + * if (unlikely(x < 1)) + * do_stuff(); + * + */ +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif /* unlikely */ + +#endif /* _RTE_BRANCH_PREDICTION_H_ */ diff --git a/lib/librte_eal/include/rte_bus.h b/lib/librte_eal/include/rte_bus.h new file mode 100644 index 0000000000..d3034d0edf --- /dev/null +++ b/lib/librte_eal/include/rte_bus.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 NXP + */ + +#ifndef _RTE_BUS_H_ +#define _RTE_BUS_H_ + +/** + * @file + * + * DPDK device bus interface + * + * This file exposes API and interfaces for bus abstraction + * over the devices and drivers in EAL. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include +#include + +/** Double linked list of buses */ +TAILQ_HEAD(rte_bus_list, rte_bus); + + +/** + * IOVA mapping mode. + * + * IOVA mapping mode is iommu programming mode of a device. + * That device (for example: IOMMU backed DMA device) based + * on rte_iova_mode will generate physical or virtual address. + * + */ +enum rte_iova_mode { + RTE_IOVA_DC = 0, /* Don't care mode */ + RTE_IOVA_PA = (1 << 0), /* DMA using physical address */ + RTE_IOVA_VA = (1 << 1) /* DMA using virtual address */ +}; + +/** + * Bus specific scan for devices attached on the bus. + * For each bus object, the scan would be responsible for finding devices and + * adding them to its private device list. + * + * A bus should mandatorily implement this method. + * + * @return + * 0 for successful scan + * <0 for unsuccessful scan with error value + */ +typedef int (*rte_bus_scan_t)(void); + +/** + * Implementation specific probe function which is responsible for linking + * devices on that bus with applicable drivers. + * + * This is called while iterating over each registered bus. + * + * @return + * 0 for successful probe + * !0 for any error while probing + */ +typedef int (*rte_bus_probe_t)(void); + +/** + * Device iterator to find a device on a bus. + * + * This function returns an rte_device if one of those held by the bus + * matches the data passed as parameter. + * + * If the comparison function returns zero this function should stop iterating + * over any more devices. To continue a search the device of a previous search + * can be passed via the start parameter. + * + * @param cmp + * Comparison function. + * + * @param data + * Data to compare each device against. + * + * @param start + * starting point for the iteration + * + * @return + * The first device matching the data, NULL if none exists. + */ +typedef struct rte_device * +(*rte_bus_find_device_t)(const struct rte_device *start, rte_dev_cmp_t cmp, + const void *data); + +/** + * Implementation specific probe function which is responsible for linking + * devices on that bus with applicable drivers. + * + * @param dev + * Device pointer that was returned by a previous call to find_device. + * + * @return + * 0 on success. + * !0 on error. + */ +typedef int (*rte_bus_plug_t)(struct rte_device *dev); + +/** + * Implementation specific remove function which is responsible for unlinking + * devices on that bus from assigned driver. + * + * @param dev + * Device pointer that was returned by a previous call to find_device. + * + * @return + * 0 on success. + * !0 on error. + */ +typedef int (*rte_bus_unplug_t)(struct rte_device *dev); + +/** + * Bus specific parsing function. + * Validates the syntax used in the textual representation of a device, + * If the syntax is valid and ``addr`` is not NULL, writes the bus-specific + * device representation to ``addr``. + * + * @param[in] name + * device textual description + * + * @param[out] addr + * device information location address, into which parsed info + * should be written. If NULL, nothing should be written, which + * is not an error. + * + * @return + * 0 if parsing was successful. + * !0 for any error. + */ +typedef int (*rte_bus_parse_t)(const char *name, void *addr); + +/** + * Device level DMA map function. + * After a successful call, the memory segment will be mapped to the + * given device. + * + * @param dev + * Device pointer. + * @param addr + * Virtual address to map. + * @param iova + * IOVA address to map. + * @param len + * Length of the memory segment being mapped. + * + * @return + * 0 if mapping was successful. + * Negative value and rte_errno is set otherwise. + */ +typedef int (*rte_dev_dma_map_t)(struct rte_device *dev, void *addr, + uint64_t iova, size_t len); + +/** + * Device level DMA unmap function. + * After a successful call, the memory segment will no longer be + * accessible by the given device. + * + * @param dev + * Device pointer. + * @param addr + * Virtual address to unmap. + * @param iova + * IOVA address to unmap. + * @param len + * Length of the memory segment being mapped. + * + * @return + * 0 if un-mapping was successful. + * Negative value and rte_errno is set otherwise. + */ +typedef int (*rte_dev_dma_unmap_t)(struct rte_device *dev, void *addr, + uint64_t iova, size_t len); + +/** + * Implement a specific hot-unplug handler, which is responsible for + * handle the failure when device be hot-unplugged. When the event of + * hot-unplug be detected, it could call this function to handle + * the hot-unplug failure and avoid app crash. + * @param dev + * Pointer of the device structure. + * + * @return + * 0 on success. + * !0 on error. + */ +typedef int (*rte_bus_hot_unplug_handler_t)(struct rte_device *dev); + +/** + * Implement a specific sigbus handler, which is responsible for handling + * the sigbus error which is either original memory error, or specific memory + * error that caused of device be hot-unplugged. When sigbus error be captured, + * it could call this function to handle sigbus error. + * @param failure_addr + * Pointer of the fault address of the sigbus error. + * + * @return + * 0 for success handle the sigbus for hot-unplug. + * 1 for not process it, because it is a generic sigbus error. + * -1 for failed to handle the sigbus for hot-unplug. + */ +typedef int (*rte_bus_sigbus_handler_t)(const void *failure_addr); + +/** + * Bus scan policies + */ +enum rte_bus_scan_mode { + RTE_BUS_SCAN_UNDEFINED, + RTE_BUS_SCAN_WHITELIST, + RTE_BUS_SCAN_BLACKLIST, +}; + +/** + * A structure used to configure bus operations. + */ +struct rte_bus_conf { + enum rte_bus_scan_mode scan_mode; /**< Scan policy. */ +}; + + +/** + * Get common iommu class of the all the devices on the bus. The bus may + * check that those devices are attached to iommu driver. + * If no devices are attached to the bus. The bus may return with don't care + * (_DC) value. + * Otherwise, The bus will return appropriate _pa or _va iova mode. + * + * @return + * enum rte_iova_mode value. + */ +typedef enum rte_iova_mode (*rte_bus_get_iommu_class_t)(void); + + +/** + * A structure describing a generic bus. + */ +struct rte_bus { + TAILQ_ENTRY(rte_bus) next; /**< Next bus object in linked list */ + const char *name; /**< Name of the bus */ + rte_bus_scan_t scan; /**< Scan for devices attached to bus */ + rte_bus_probe_t probe; /**< Probe devices on bus */ + rte_bus_find_device_t find_device; /**< Find a device on the bus */ + rte_bus_plug_t plug; /**< Probe single device for drivers */ + rte_bus_unplug_t unplug; /**< Remove single device from driver */ + rte_bus_parse_t parse; /**< Parse a device name */ + rte_dev_dma_map_t dma_map; /**< DMA map for device in the bus */ + rte_dev_dma_unmap_t dma_unmap; /**< DMA unmap for device in the bus */ + struct rte_bus_conf conf; /**< Bus configuration */ + rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */ + rte_dev_iterate_t dev_iterate; /**< Device iterator. */ + rte_bus_hot_unplug_handler_t hot_unplug_handler; + /**< handle hot-unplug failure on the bus */ + rte_bus_sigbus_handler_t sigbus_handler; + /**< handle sigbus error on the bus */ + +}; + +/** + * Register a Bus handler. + * + * @param bus + * A pointer to a rte_bus structure describing the bus + * to be registered. + */ +void rte_bus_register(struct rte_bus *bus); + +/** + * Unregister a Bus handler. + * + * @param bus + * A pointer to a rte_bus structure describing the bus + * to be unregistered. + */ +void rte_bus_unregister(struct rte_bus *bus); + +/** + * Scan all the buses. + * + * @return + * 0 in case of success in scanning all buses + * !0 in case of failure to scan + */ +int rte_bus_scan(void); + +/** + * For each device on the buses, perform a driver 'match' and call the + * driver-specific probe for device initialization. + * + * @return + * 0 for successful match/probe + * !0 otherwise + */ +int rte_bus_probe(void); + +/** + * Dump information of all the buses registered with EAL. + * + * @param f + * A valid and open output stream handle + */ +void rte_bus_dump(FILE *f); + +/** + * Bus comparison function. + * + * @param bus + * Bus under test. + * + * @param data + * Data to compare against. + * + * @return + * 0 if the bus matches the data. + * !0 if the bus does not match. + * <0 if ordering is possible and the bus is lower than the data. + * >0 if ordering is possible and the bus is greater than the data. + */ +typedef int (*rte_bus_cmp_t)(const struct rte_bus *bus, const void *data); + +/** + * Bus iterator to find a particular bus. + * + * This function compares each registered bus to find one that matches + * the data passed as parameter. + * + * If the comparison function returns zero this function will stop iterating + * over any more buses. To continue a search the bus of a previous search can + * be passed via the start parameter. + * + * @param start + * Starting point for the iteration. + * + * @param cmp + * Comparison function. + * + * @param data + * Data to pass to comparison function. + * + * @return + * A pointer to a rte_bus structure or NULL in case no bus matches + */ +struct rte_bus *rte_bus_find(const struct rte_bus *start, rte_bus_cmp_t cmp, + const void *data); + +/** + * Find the registered bus for a particular device. + */ +struct rte_bus *rte_bus_find_by_device(const struct rte_device *dev); + +/** + * Find the registered bus for a given name. + */ +struct rte_bus *rte_bus_find_by_name(const char *busname); + + +/** + * Get the common iommu class of devices bound on to buses available in the + * system. RTE_IOVA_DC means that no preference has been expressed. + * + * @return + * enum rte_iova_mode value. + */ +enum rte_iova_mode rte_bus_get_iommu_class(void); + +/** + * Helper for Bus registration. + * The constructor has higher priority than PMD constructors. + */ +#define RTE_REGISTER_BUS(nm, bus) \ +RTE_INIT_PRIO(businitfn_ ##nm, BUS) \ +{\ + (bus).name = RTE_STR(nm);\ + rte_bus_register(&bus); \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_BUS_H */ diff --git a/lib/librte_eal/include/rte_class.h b/lib/librte_eal/include/rte_class.h new file mode 100644 index 0000000000..856d09b22d --- /dev/null +++ b/lib/librte_eal/include/rte_class.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Gaëtan Rivet + */ + +#ifndef _RTE_CLASS_H_ +#define _RTE_CLASS_H_ + +/** + * @file + * + * DPDK device class interface. + * + * This file describes the interface of the device class + * abstraction layer. + * + * A device class defines the type of function a device + * will be used for e.g.: Ethernet adapter (eth), + * cryptographic co-processor (crypto), etc. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include + +/** Double linked list of classes */ +TAILQ_HEAD(rte_class_list, rte_class); + +/** + * A structure describing a generic device class. + */ +struct rte_class { + TAILQ_ENTRY(rte_class) next; /**< Next device class in linked list */ + const char *name; /**< Name of the class */ + rte_dev_iterate_t dev_iterate; /**< Device iterator. */ +}; + +/** + * Class comparison function. + * + * @param cls + * Class under test. + * + * @param data + * Data to compare against. + * + * @return + * 0 if the class matches the data. + * !0 if the class does not match. + * <0 if ordering is possible and the class is lower than the data. + * >0 if ordering is possible and the class is greater than the data. + */ +typedef int (*rte_class_cmp_t)(const struct rte_class *cls, const void *data); + +/** + * Class iterator to find a particular class. + * + * This function compares each registered class to find one that matches + * the data passed as parameter. + * + * If the comparison function returns zero this function will stop iterating + * over any more classes. To continue a search the class of a previous search + * can be passed via the start parameter. + * + * @param start + * Starting point for the iteration. + * + * @param cmp + * Comparison function. + * + * @param data + * Data to pass to comparison function. + * + * @return + * A pointer to a rte_class structure or NULL in case no class matches + */ +__rte_experimental +struct rte_class * +rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp, + const void *data); + +/** + * Find the registered class for a given name. + */ +__rte_experimental +struct rte_class * +rte_class_find_by_name(const char *name); + +/** + * Register a Class handle. + * + * @param cls + * A pointer to a rte_class structure describing the class + * to be registered. + */ +__rte_experimental +void rte_class_register(struct rte_class *cls); + +/** + * Unregister a Class handle. + * + * @param cls + * A pointer to a rte_class structure describing the class + * to be unregistered. + */ +__rte_experimental +void rte_class_unregister(struct rte_class *cls); + +/** + * Helper for Class registration. + * The constructor has lower priority than Bus constructors. + * The constructor has higher priority than PMD constructors. + */ +#define RTE_REGISTER_CLASS(nm, cls) \ +RTE_INIT_PRIO(classinitfn_ ##nm, CLASS) \ +{\ + (cls).name = RTE_STR(nm); \ + rte_class_register(&cls); \ +} + +#define RTE_UNREGISTER_CLASS(nm, cls) \ +RTE_FINI_PRIO(classfinifn_ ##nm, CLASS) \ +{ \ + rte_class_unregister(&cls); \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CLASS_H_ */ diff --git a/lib/librte_eal/include/rte_common.h b/lib/librte_eal/include/rte_common.h new file mode 100644 index 0000000000..f820c2eae2 --- /dev/null +++ b/lib/librte_eal/include/rte_common.h @@ -0,0 +1,823 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2019 Intel Corporation + */ + +#ifndef _RTE_COMMON_H_ +#define _RTE_COMMON_H_ + +/** + * @file + * + * Generic, commonly-used macro and inline function definitions + * for DPDK. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#include + +/* OS specific include */ +#include + +#ifndef typeof +#define typeof __typeof__ +#endif + +#ifndef asm +#define asm __asm__ +#endif + +/** C extension macro for environments lacking C11 features. */ +#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L +#define RTE_STD_C11 __extension__ +#else +#define RTE_STD_C11 +#endif + +/* + * RTE_TOOLCHAIN_GCC is defined if the target is built with GCC, + * while a host application (like pmdinfogen) may have another compiler. + * RTE_CC_IS_GNU is true if the file is compiled with GCC, + * no matter it is a target or host application. + */ +#define RTE_CC_IS_GNU 0 +#if defined __clang__ +#define RTE_CC_CLANG +#elif defined __INTEL_COMPILER +#define RTE_CC_ICC +#elif defined __GNUC__ +#define RTE_CC_GCC +#undef RTE_CC_IS_GNU +#define RTE_CC_IS_GNU 1 +#endif +#if RTE_CC_IS_GNU +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \ + __GNUC_PATCHLEVEL__) +#endif + +#ifdef RTE_ARCH_STRICT_ALIGN +typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1))); +typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1))); +typedef uint16_t unaligned_uint16_t __attribute__ ((aligned(1))); +#else +typedef uint64_t unaligned_uint64_t; +typedef uint32_t unaligned_uint32_t; +typedef uint16_t unaligned_uint16_t; +#endif + +/** + * Force alignment + */ +#define __rte_aligned(a) __attribute__((__aligned__(a))) + +/** + * Force a structure to be packed + */ +#define __rte_packed __attribute__((__packed__)) + +/******* Macro to mark functions and fields scheduled for removal *****/ +#define __rte_deprecated __attribute__((__deprecated__)) + +/** + * Mark a function or variable to a weak reference. + */ +#define __rte_weak __attribute__((__weak__)) + +/*********** Macros to eliminate unused variable warnings ********/ + +/** + * short definition to mark a function parameter unused + */ +#define __rte_unused __attribute__((__unused__)) + +/** + * definition to mark a variable or function parameter as used so + * as to avoid a compiler warning + */ +#define RTE_SET_USED(x) (void)(x) + +/** + * Check format string and its arguments at compile-time. + * + * GCC on Windows assumes MS-specific format string by default, + * even if the underlying stdio implementation is ANSI-compliant, + * so this must be overridden. + */ +#if RTE_CC_IS_GNU +#define __rte_format_printf(format_index, first_arg) \ + __attribute__((format(gnu_printf, format_index, first_arg))) +#else +#define __rte_format_printf(format_index, first_arg) \ + __attribute__((format(printf, format_index, first_arg))) +#endif + +#define RTE_PRIORITY_LOG 101 +#define RTE_PRIORITY_BUS 110 +#define RTE_PRIORITY_CLASS 120 +#define RTE_PRIORITY_LAST 65535 + +#define RTE_PRIO(prio) \ + RTE_PRIORITY_ ## prio + +/** + * Run function before main() with high priority. + * + * @param func + * Constructor function. + * @param prio + * Priority number must be above 100. + * Lowest number is the first to run. + */ +#ifndef RTE_INIT_PRIO /* Allow to override from EAL */ +#define RTE_INIT_PRIO(func, prio) \ +static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void) +#endif + +/** + * Run function before main() with low priority. + * + * The constructor will be run after prioritized constructors. + * + * @param func + * Constructor function. + */ +#define RTE_INIT(func) \ + RTE_INIT_PRIO(func, LAST) + +/** + * Run after main() with low priority. + * + * @param func + * Destructor function name. + * @param prio + * Priority number must be above 100. + * Lowest number is the last to run. + */ +#ifndef RTE_FINI_PRIO /* Allow to override from EAL */ +#define RTE_FINI_PRIO(func, prio) \ +static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) +#endif + +/** + * Run after main() with high priority. + * + * The destructor will be run *before* prioritized destructors. + * + * @param func + * Destructor function name. + */ +#define RTE_FINI(func) \ + RTE_FINI_PRIO(func, LAST) + +/** + * Force a function to be inlined + */ +#define __rte_always_inline inline __attribute__((always_inline)) + +/** + * Force a function to be noinlined + */ +#define __rte_noinline __attribute__((noinline)) + +/*********** Macros for pointer arithmetic ********/ + +/** + * add a byte-value offset to a pointer + */ +#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x))) + +/** + * subtract a byte-value offset from a pointer + */ +#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x))) + +/** + * get the difference between two pointer values, i.e. how far apart + * in bytes are the locations they point two. It is assumed that + * ptr1 is greater than ptr2. + */ +#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2)) + +/** + * Workaround to cast a const field of a structure to non-const type. + */ +#define RTE_CAST_FIELD(var, field, type) \ + (*(type *)((uintptr_t)(var) + offsetof(typeof(*(var)), field))) + +/*********** Macros/static functions for doing alignment ********/ + + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no higher than the first parameter. Second parameter + * must be a power-of-two value. + */ +#define RTE_PTR_ALIGN_FLOOR(ptr, align) \ + ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align)) + +/** + * Macro to align a value to a given power-of-two. The resultant value + * will be of the same type as the first parameter, and will be no + * bigger than the first parameter. Second parameter must be a + * power-of-two value. + */ +#define RTE_ALIGN_FLOOR(val, align) \ + (typeof(val))((val) & (~((typeof(val))((align) - 1)))) + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no lower than the first parameter. Second parameter + * must be a power-of-two value. + */ +#define RTE_PTR_ALIGN_CEIL(ptr, align) \ + RTE_PTR_ALIGN_FLOOR((typeof(ptr))RTE_PTR_ADD(ptr, (align) - 1), align) + +/** + * Macro to align a value to a given power-of-two. The resultant value + * will be of the same type as the first parameter, and will be no lower + * than the first parameter. Second parameter must be a power-of-two + * value. + */ +#define RTE_ALIGN_CEIL(val, align) \ + RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align) + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no lower than the first parameter. Second parameter + * must be a power-of-two value. + * This function is the same as RTE_PTR_ALIGN_CEIL + */ +#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align) + +/** + * Macro to align a value to a given power-of-two. The resultant + * value will be of the same type as the first parameter, and + * will be no lower than the first parameter. Second parameter + * must be a power-of-two value. + * This function is the same as RTE_ALIGN_CEIL + */ +#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align) + +/** + * Macro to align a value to the multiple of given value. The resultant + * value will be of the same type as the first parameter and will be no lower + * than the first parameter. + */ +#define RTE_ALIGN_MUL_CEIL(v, mul) \ + (((v + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul)) + +/** + * Macro to align a value to the multiple of given value. The resultant + * value will be of the same type as the first parameter and will be no higher + * than the first parameter. + */ +#define RTE_ALIGN_MUL_FLOOR(v, mul) \ + ((v / ((typeof(v))(mul))) * (typeof(v))(mul)) + +/** + * Macro to align value to the nearest multiple of the given value. + * The resultant value might be greater than or less than the first parameter + * whichever difference is the lowest. + */ +#define RTE_ALIGN_MUL_NEAR(v, mul) \ + ({ \ + typeof(v) ceil = RTE_ALIGN_MUL_CEIL(v, mul); \ + typeof(v) floor = RTE_ALIGN_MUL_FLOOR(v, mul); \ + (ceil - v) > (v - floor) ? floor : ceil; \ + }) + +/** + * Checks if a pointer is aligned to a given power-of-two value + * + * @param ptr + * The pointer whose alignment is to be checked + * @param align + * The power-of-two value to which the ptr should be aligned + * + * @return + * True(1) where the pointer is correctly aligned, false(0) otherwise + */ +static inline int +rte_is_aligned(void *ptr, unsigned align) +{ + return RTE_PTR_ALIGN(ptr, align) == ptr; +} + +/*********** Macros for compile type checks ********/ + +/** + * Triggers an error at compilation time if the condition is true. + */ +#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) + +/*********** Cache line related macros ********/ + +/** Cache line mask. */ +#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) + +/** Return the first cache-aligned value greater or equal to size. */ +#define RTE_CACHE_LINE_ROUNDUP(size) \ + (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / \ + RTE_CACHE_LINE_SIZE)) + +/** Cache line size in terms of log2 */ +#if RTE_CACHE_LINE_SIZE == 64 +#define RTE_CACHE_LINE_SIZE_LOG2 6 +#elif RTE_CACHE_LINE_SIZE == 128 +#define RTE_CACHE_LINE_SIZE_LOG2 7 +#else +#error "Unsupported cache line size" +#endif + +/** Minimum Cache line size. */ +#define RTE_CACHE_LINE_MIN_SIZE 64 + +/** Force alignment to cache line. */ +#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE) + +/** Force minimum cache line alignment. */ +#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) + +/*********** PA/IOVA type definitions ********/ + +/** Physical address */ +typedef uint64_t phys_addr_t; +#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1) + +/** + * IO virtual address type. + * When the physical addressing mode (IOVA as PA) is in use, + * the translation from an IO virtual address (IOVA) to a physical address + * is a direct mapping, i.e. the same value. + * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation. + */ +typedef uint64_t rte_iova_t; +#define RTE_BAD_IOVA ((rte_iova_t)-1) + +/*********** Structure alignment markers ********/ + +/** Generic marker for any place in a structure. */ +__extension__ typedef void *RTE_MARKER[0]; +/** Marker for 1B alignment in a structure. */ +__extension__ typedef uint8_t RTE_MARKER8[0]; +/** Marker for 2B alignment in a structure. */ +__extension__ typedef uint16_t RTE_MARKER16[0]; +/** Marker for 4B alignment in a structure. */ +__extension__ typedef uint32_t RTE_MARKER32[0]; +/** Marker for 8B alignment in a structure. */ +__extension__ typedef uint64_t RTE_MARKER64[0]; + +/** + * Combines 32b inputs most significant set bits into the least + * significant bits to construct a value with the same MSBs as x + * but all 1's under it. + * + * @param x + * The integer whose MSBs need to be combined with its LSBs + * @return + * The combined value. + */ +static inline uint32_t +rte_combine32ms1b(register uint32_t x) +{ + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + return x; +} + +/** + * Combines 64b inputs most significant set bits into the least + * significant bits to construct a value with the same MSBs as x + * but all 1's under it. + * + * @param v + * The integer whose MSBs need to be combined with its LSBs + * @return + * The combined value. + */ +static inline uint64_t +rte_combine64ms1b(register uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + + return v; +} + +/*********** Macros to work with powers of 2 ********/ + +/** + * Macro to return 1 if n is a power of 2, 0 otherwise + */ +#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n))) + +/** + * Returns true if n is a power of 2 + * @param n + * Number to check + * @return 1 if true, 0 otherwise + */ +static inline int +rte_is_power_of_2(uint32_t n) +{ + return n && !(n & (n - 1)); +} + +/** + * Aligns input parameter to the next power of 2 + * + * @param x + * The integer value to align + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint32_t +rte_align32pow2(uint32_t x) +{ + x--; + x = rte_combine32ms1b(x); + + return x + 1; +} + +/** + * Aligns input parameter to the previous power of 2 + * + * @param x + * The integer value to align + * + * @return + * Input parameter aligned to the previous power of 2 + */ +static inline uint32_t +rte_align32prevpow2(uint32_t x) +{ + x = rte_combine32ms1b(x); + + return x - (x >> 1); +} + +/** + * Aligns 64b input parameter to the next power of 2 + * + * @param v + * The 64b value to align + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint64_t +rte_align64pow2(uint64_t v) +{ + v--; + v = rte_combine64ms1b(v); + + return v + 1; +} + +/** + * Aligns 64b input parameter to the previous power of 2 + * + * @param v + * The 64b value to align + * + * @return + * Input parameter aligned to the previous power of 2 + */ +static inline uint64_t +rte_align64prevpow2(uint64_t v) +{ + v = rte_combine64ms1b(v); + + return v - (v >> 1); +} + +/*********** Macros for calculating min and max **********/ + +/** + * Macro to return the minimum of two numbers + */ +#define RTE_MIN(a, b) \ + __extension__ ({ \ + typeof (a) _a = (a); \ + typeof (b) _b = (b); \ + _a < _b ? _a : _b; \ + }) + +/** + * Macro to return the maximum of two numbers + */ +#define RTE_MAX(a, b) \ + __extension__ ({ \ + typeof (a) _a = (a); \ + typeof (b) _b = (b); \ + _a > _b ? _a : _b; \ + }) + +/*********** Other general functions / macros ********/ + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). + * If a least significant 1 bit is found, its bit index is returned. + * If the content of the input parameter is zero, then the content of the return + * value is undefined. + * @param v + * input parameter, should not be zero. + * @return + * least significant set bit in the input parameter. + */ +static inline uint32_t +rte_bsf32(uint32_t v) +{ + return (uint32_t)__builtin_ctz(v); +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). Safe version (checks for input parameter being zero). + * + * @warning ``pos`` must be a valid pointer. It is not checked! + * + * @param v + * The input parameter. + * @param pos + * If ``v`` was not 0, this value will contain position of least significant + * bit within the input parameter. + * @return + * Returns 0 if ``v`` was 0, otherwise returns 1. + */ +static inline int +rte_bsf32_safe(uint64_t v, uint32_t *pos) +{ + if (v == 0) + return 0; + + *pos = rte_bsf32(v); + return 1; +} + +/** + * Return the rounded-up log2 of a integer. + * + * @note Contrary to the logarithm mathematical operation, + * rte_log2_u32(0) == 0 and not -inf. + * + * @param v + * The input parameter. + * @return + * The rounded-up log2 of the input, or 0 if the input is 0. + */ +static inline uint32_t +rte_log2_u32(uint32_t v) +{ + if (v == 0) + return 0; + v = rte_align32pow2(v); + return rte_bsf32(v); +} + + +/** + * Return the last (most-significant) bit set. + * + * @note The last (most significant) bit is at position 32. + * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32 + * + * @param x + * The input parameter. + * @return + * The last (most-significant) bit set, or 0 if the input is 0. + */ +static inline int +rte_fls_u32(uint32_t x) +{ + return (x == 0) ? 0 : 32 - __builtin_clz(x); +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). + * If a least significant 1 bit is found, its bit index is returned. + * If the content of the input parameter is zero, then the content of the return + * value is undefined. + * @param v + * input parameter, should not be zero. + * @return + * least significant set bit in the input parameter. + */ +static inline int +rte_bsf64(uint64_t v) +{ + return (uint32_t)__builtin_ctzll(v); +} + +/** + * Searches the input parameter for the least significant set bit + * (starting from zero). Safe version (checks for input parameter being zero). + * + * @warning ``pos`` must be a valid pointer. It is not checked! + * + * @param v + * The input parameter. + * @param pos + * If ``v`` was not 0, this value will contain position of least significant + * bit within the input parameter. + * @return + * Returns 0 if ``v`` was 0, otherwise returns 1. + */ +static inline int +rte_bsf64_safe(uint64_t v, uint32_t *pos) +{ + if (v == 0) + return 0; + + *pos = rte_bsf64(v); + return 1; +} + +/** + * Return the last (most-significant) bit set. + * + * @note The last (most significant) bit is at position 64. + * @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1, + * rte_fls_u64(0x8000000000000000) = 64 + * + * @param x + * The input parameter. + * @return + * The last (most-significant) bit set, or 0 if the input is 0. + */ +static inline int +rte_fls_u64(uint64_t x) +{ + return (x == 0) ? 0 : 64 - __builtin_clzll(x); +} + +/** + * Return the rounded-up log2 of a 64-bit integer. + * + * @note Contrary to the logarithm mathematical operation, + * rte_log2_u64(0) == 0 and not -inf. + * + * @param v + * The input parameter. + * @return + * The rounded-up log2 of the input, or 0 if the input is 0. + */ +static inline uint32_t +rte_log2_u64(uint64_t v) +{ + if (v == 0) + return 0; + v = rte_align64pow2(v); + /* we checked for v being 0 already, so no undefined behavior */ + return rte_bsf64(v); +} + +#ifndef offsetof +/** Return the offset of a field in a structure. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) +#endif + +/** + * Return pointer to the wrapping struct instance. + * + * Example: + * + * struct wrapper { + * ... + * struct child c; + * ... + * }; + * + * struct child *x = obtain(...); + * struct wrapper *w = container_of(x, struct wrapper, c); + */ +#ifndef container_of +#define container_of(ptr, type, member) __extension__ ({ \ + const typeof(((type *)0)->member) *_ptr = (ptr); \ + __attribute__((unused)) type *_target_ptr = \ + (type *)(ptr); \ + (type *)(((uintptr_t)_ptr) - offsetof(type, member)); \ + }) +#endif + +/** + * Get the size of a field in a structure. + * + * @param type + * The type of the structure. + * @param field + * The field in the structure. + * @return + * The size of the field in the structure, in bytes. + */ +#define RTE_SIZEOF_FIELD(type, field) (sizeof(((type *)0)->field)) + +#define _RTE_STR(x) #x +/** Take a macro value and get a string version of it */ +#define RTE_STR(x) _RTE_STR(x) + +/** + * ISO C helpers to modify format strings using variadic macros. + * This is a replacement for the ", ## __VA_ARGS__" GNU extension. + * An empty %s argument is appended to avoid a dangling comma. + */ +#define RTE_FMT(fmt, ...) fmt "%.0s", __VA_ARGS__ "" +#define RTE_FMT_HEAD(fmt, ...) fmt +#define RTE_FMT_TAIL(fmt, ...) __VA_ARGS__ + +/** Mask value of type "tp" for the first "ln" bit set. */ +#define RTE_LEN2MASK(ln, tp) \ + ((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln)))) + +/** Number of elements in the array. */ +#define RTE_DIM(a) (sizeof (a) / sizeof ((a)[0])) + +/** + * Converts a numeric string to the equivalent uint64_t value. + * As well as straight number conversion, also recognises the suffixes + * k, m and g for kilobytes, megabytes and gigabytes respectively. + * + * If a negative number is passed in i.e. a string with the first non-black + * character being "-", zero is returned. Zero is also returned in the case of + * an error with the strtoull call in the function. + * + * @param str + * String containing number to convert. + * @return + * Number. + */ +static inline uint64_t +rte_str_to_size(const char *str) +{ + char *endptr; + unsigned long long size; + + while (isspace((int)*str)) + str++; + if (*str == '-') + return 0; + + errno = 0; + size = strtoull(str, &endptr, 0); + if (errno) + return 0; + + if (*endptr == ' ') + endptr++; /* allow 1 space gap */ + + switch (*endptr){ + case 'G': case 'g': size *= 1024; /* fall-through */ + case 'M': case 'm': size *= 1024; /* fall-through */ + case 'K': case 'k': size *= 1024; /* fall-through */ + default: + break; + } + return size; +} + +/** + * Function to terminate the application immediately, printing an error + * message and returning the exit_code back to the shell. + * + * This function never returns + * + * @param exit_code + * The exit code to be returned by the application + * @param format + * The format string to be used for printing the message. This can include + * printf format characters which will be expanded using any further parameters + * to the function. + */ +void +rte_exit(int exit_code, const char *format, ...) + __attribute__((noreturn)) + __rte_format_printf(2, 3); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_eal/include/rte_compat.h b/lib/librte_eal/include/rte_compat.h new file mode 100644 index 0000000000..3eb33784b3 --- /dev/null +++ b/lib/librte_eal/include/rte_compat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Neil Horman . + * All rights reserved. + */ + +#ifndef _RTE_COMPAT_H_ +#define _RTE_COMPAT_H_ + +#ifndef ALLOW_EXPERIMENTAL_API + +#define __rte_experimental \ +__attribute__((deprecated("Symbol is not yet part of stable ABI"), \ +section(".text.experimental"))) + +#else + +#define __rte_experimental \ +__attribute__((section(".text.experimental"))) + +#endif + +#endif /* _RTE_COMPAT_H_ */ diff --git a/lib/librte_eal/include/rte_debug.h b/lib/librte_eal/include/rte_debug.h new file mode 100644 index 0000000000..7edd4b89cc --- /dev/null +++ b/lib/librte_eal/include/rte_debug.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_DEBUG_H_ +#define _RTE_DEBUG_H_ + +/** + * @file + * + * Debug Functions in RTE + * + * This file defines a generic API for debug operations. Part of + * the implementation is architecture-specific. + */ + +#include "rte_log.h" +#include "rte_branch_prediction.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Dump the stack of the calling core to the console. + */ +void rte_dump_stack(void); + +/** + * Dump the registers of the calling core to the console. + * + * Note: Not implemented in a userapp environment; use gdb instead. + */ +void rte_dump_registers(void); + +/** + * Provide notification of a critical non-recoverable error and terminate + * execution abnormally. + * + * Display the format string and its expanded arguments (printf-like). + * + * In a linux environment, this function dumps the stack and calls + * abort() resulting in a core dump if enabled. + * + * The function never returns. + * + * @param ... + * The format string, followed by the variable list of arguments. + */ +#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy") +#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__) + +#ifdef RTE_ENABLE_ASSERT +#define RTE_ASSERT(exp) RTE_VERIFY(exp) +#else +#define RTE_ASSERT(exp) do {} while (0) +#endif +#define RTE_VERIFY(exp) do { \ + if (unlikely(!(exp))) \ + rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \ +} while (0) + +/* + * Provide notification of a critical non-recoverable error and stop. + * + * This function should not be called directly. Refer to rte_panic() macro + * documentation. + */ +void __rte_panic(const char *funcname , const char *format, ...) +#ifdef __GNUC__ +#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) + __attribute__((cold)) +#endif +#endif + __attribute__((noreturn)) + __rte_format_printf(2, 3); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_DEBUG_H_ */ diff --git a/lib/librte_eal/include/rte_dev.h b/lib/librte_eal/include/rte_dev.h new file mode 100644 index 0000000000..a5c35f00c0 --- /dev/null +++ b/lib/librte_eal/include/rte_dev.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014 6WIND S.A. + */ + +#ifndef _RTE_DEV_H_ +#define _RTE_DEV_H_ + +/** + * @file + * + * RTE PMD Driver Registration Interface + * + * This file manages the list of device drivers. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include +#include +#include + +/** + * The device event type. + */ +enum rte_dev_event_type { + RTE_DEV_EVENT_ADD, /**< device being added */ + RTE_DEV_EVENT_REMOVE, /**< device being removed */ + RTE_DEV_EVENT_MAX /**< max value of this enum */ +}; + +struct rte_dev_event { + enum rte_dev_event_type type; /**< device event type */ + int subsystem; /**< subsystem id */ + char *devname; /**< device name */ +}; + +typedef void (*rte_dev_event_cb_fn)(const char *device_name, + enum rte_dev_event_type event, + void *cb_arg); + +/* Macros to check for invalid function pointers */ +#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \ + if ((func) == NULL) \ + return retval; \ +} while (0) + +#define RTE_FUNC_PTR_OR_RET(func) do { \ + if ((func) == NULL) \ + return; \ +} while (0) + +/** + * Device driver. + */ +enum rte_kernel_driver { + RTE_KDRV_UNKNOWN = 0, + RTE_KDRV_IGB_UIO, + RTE_KDRV_VFIO, + RTE_KDRV_UIO_GENERIC, + RTE_KDRV_NIC_UIO, + RTE_KDRV_NONE, +}; + +/** + * Device policies. + */ +enum rte_dev_policy { + RTE_DEV_WHITELISTED, + RTE_DEV_BLACKLISTED, +}; + +/** + * A generic memory resource representation. + */ +struct rte_mem_resource { + uint64_t phys_addr; /**< Physical address, 0 if not resource. */ + uint64_t len; /**< Length of the resource. */ + void *addr; /**< Virtual address, NULL when not mapped. */ +}; + +/** + * A structure describing a device driver. + */ +struct rte_driver { + TAILQ_ENTRY(rte_driver) next; /**< Next in list. */ + const char *name; /**< Driver name. */ + const char *alias; /**< Driver alias. */ +}; + +/* + * Internal identifier length + * Sufficiently large to allow for UUID or PCI address + */ +#define RTE_DEV_NAME_MAX_LEN 64 + +/** + * A structure describing a generic device. + */ +struct rte_device { + TAILQ_ENTRY(rte_device) next; /**< Next device */ + const char *name; /**< Device name */ + const struct rte_driver *driver; /**< Driver assigned after probing */ + const struct rte_bus *bus; /**< Bus handle assigned on scan */ + int numa_node; /**< NUMA node connection */ + struct rte_devargs *devargs; /**< Arguments for latest probing */ +}; + +/** + * Query status of a device. + * + * @param dev + * Generic device pointer. + * @return + * (int)true if already probed successfully, 0 otherwise. + */ +int rte_dev_is_probed(const struct rte_device *dev); + +/** + * Hotplug add a given device to a specific bus. + * + * In multi-process, it will request other processes to add the same device. + * A failure, in any process, will rollback the action + * + * @param busname + * The bus name the device is added to. + * @param devname + * The device name. Based on this device name, eal will identify a driver + * capable of handling it and pass it to the driver probing function. + * @param drvargs + * Device arguments to be passed to the driver. + * @return + * 0 on success, negative on error. + */ +int rte_eal_hotplug_add(const char *busname, const char *devname, + const char *drvargs); + +/** + * Add matching devices. + * + * In multi-process, it will request other processes to add the same device. + * A failure, in any process, will rollback the action + * + * @param devargs + * Device arguments including bus, class and driver properties. + * @return + * 0 on success, negative on error. + */ +int rte_dev_probe(const char *devargs); + +/** + * Hotplug remove a given device from a specific bus. + * + * In multi-process, it will request other processes to remove the same device. + * A failure, in any process, will rollback the action + * + * @param busname + * The bus name the device is removed from. + * @param devname + * The device name being removed. + * @return + * 0 on success, negative on error. + */ +int rte_eal_hotplug_remove(const char *busname, const char *devname); + +/** + * Remove one device. + * + * In multi-process, it will request other processes to remove the same device. + * A failure, in any process, will rollback the action + * + * @param dev + * Data structure of the device to remove. + * @return + * 0 on success, negative on error. + */ +int rte_dev_remove(struct rte_device *dev); + +/** + * Device comparison function. + * + * This type of function is used to compare an rte_device with arbitrary + * data. + * + * @param dev + * Device handle. + * + * @param data + * Data to compare against. The type of this parameter is determined by + * the kind of comparison performed by the function. + * + * @return + * 0 if the device matches the data. + * !0 if the device does not match. + * <0 if ordering is possible and the device is lower than the data. + * >0 if ordering is possible and the device is greater than the data. + */ +typedef int (*rte_dev_cmp_t)(const struct rte_device *dev, const void *data); + +#define RTE_PMD_EXPORT_NAME_ARRAY(n, idx) n##idx[] + +#define RTE_PMD_EXPORT_NAME(name, idx) \ +static const char RTE_PMD_EXPORT_NAME_ARRAY(this_pmd_name, idx) \ +__attribute__((used)) = RTE_STR(name) + +#define DRV_EXP_TAG(name, tag) __##name##_##tag + +#define RTE_PMD_REGISTER_PCI_TABLE(name, table) \ +static const char DRV_EXP_TAG(name, pci_tbl_export)[] __attribute__((used)) = \ +RTE_STR(table) + +#define RTE_PMD_REGISTER_PARAM_STRING(name, str) \ +static const char DRV_EXP_TAG(name, param_string_export)[] \ +__attribute__((used)) = str + +/** + * Advertise the list of kernel modules required to run this driver + * + * This string lists the kernel modules required for the devices + * associated to a PMD. The format of each line of the string is: + * " ". + * + * The possible formats for the device pattern are: + * "*" all devices supported by this driver + * "pci:*" all PCI devices supported by this driver + * "pci:v8086:d*:sv*:sd*" all PCI devices supported by this driver + * whose vendor id is 0x8086. + * + * The format of the kernel modules list is a parenthesized expression + * containing logical-and (&) and logical-or (|). + * + * The device pattern and the kmod expression are separated by a space. + * + * Example: + * - "* igb_uio | uio_pci_generic | vfio" + */ +#define RTE_PMD_REGISTER_KMOD_DEP(name, str) \ +static const char DRV_EXP_TAG(name, kmod_dep_export)[] \ +__attribute__((used)) = str + +/** + * Iteration context. + * + * This context carries over the current iteration state. + */ +struct rte_dev_iterator { + const char *dev_str; /**< device string. */ + const char *bus_str; /**< bus-related part of device string. */ + const char *cls_str; /**< class-related part of device string. */ + struct rte_bus *bus; /**< bus handle. */ + struct rte_class *cls; /**< class handle. */ + struct rte_device *device; /**< current position. */ + void *class_device; /**< additional specialized context. */ +}; + +/** + * Device iteration function. + * + * Find the next device matching properties passed in parameters. + * The function takes an additional ``start`` parameter, that is + * used as starting context when relevant. + * + * The function returns the current element in the iteration. + * This return value will potentially be used as a start parameter + * in subsequent calls to the function. + * + * The additional iterator parameter is only there if a specific + * implementation needs additional context. It must not be modified by + * the iteration function itself. + * + * @param start + * Starting iteration context. + * + * @param devstr + * Device description string. + * + * @param it + * Device iterator. + * + * @return + * The address of the current element matching the device description + * string. + */ +typedef void *(*rte_dev_iterate_t)(const void *start, + const char *devstr, + const struct rte_dev_iterator *it); + +/** + * Initializes a device iterator. + * + * This iterator allows accessing a list of devices matching a criteria. + * The device matching is made among all buses and classes currently registered, + * filtered by the device description given as parameter. + * + * This function will not allocate any memory. It is safe to stop the + * iteration at any moment and let the iterator go out of context. + * + * @param it + * Device iterator handle. + * + * @param str + * Device description string. + * + * @return + * 0 on successful initialization. + * <0 on error. + */ +__rte_experimental +int +rte_dev_iterator_init(struct rte_dev_iterator *it, const char *str); + +/** + * Iterates on a device iterator. + * + * Generates a new rte_device handle corresponding to the next element + * in the list described in comprehension by the iterator. + * + * The next object is returned, and the iterator is updated. + * + * @param it + * Device iterator handle. + * + * @return + * An rte_device handle if found. + * NULL if an error occurred (rte_errno is set). + * NULL if no device could be found (rte_errno is not set). + */ +__rte_experimental +struct rte_device * +rte_dev_iterator_next(struct rte_dev_iterator *it); + +#define RTE_DEV_FOREACH(dev, devstr, it) \ + for (rte_dev_iterator_init(it, devstr), \ + dev = rte_dev_iterator_next(it); \ + dev != NULL; \ + dev = rte_dev_iterator_next(it)) + +#ifdef __cplusplus +} +#endif + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * It registers the callback for the specific device. + * Multiple callbacks can be registered at the same time. + * + * @param device_name + * The device name, that is the param name of the struct rte_device, + * null value means for all devices. + * @param cb_fn + * callback address. + * @param cb_arg + * address of parameter for callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_event_callback_register(const char *device_name, + rte_dev_event_cb_fn cb_fn, + void *cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * It unregisters the callback according to the specified device. + * + * @param device_name + * The device name, that is the param name of the struct rte_device, + * null value means for all devices and their callbacks. + * @param cb_fn + * callback address. + * @param cb_arg + * address of parameter for callback, (void *)-1 means to remove all + * registered which has the same callback address. + * + * @return + * - On success, return the number of callback entities removed. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_event_callback_unregister(const char *device_name, + rte_dev_event_cb_fn cb_fn, + void *cb_arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Executes all the user application registered callbacks for + * the specific device. + * + * @param device_name + * The device name. + * @param event + * the device event type. + */ +__rte_experimental +void +rte_dev_event_callback_process(const char *device_name, + enum rte_dev_event_type event); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Start the device event monitoring. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_event_monitor_start(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Stop the device event monitoring. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_event_monitor_stop(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Enable hotplug handling for devices. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_hotplug_handle_enable(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Disable hotplug handling for devices. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_dev_hotplug_handle_disable(void); + +/** + * Device level DMA map function. + * After a successful call, the memory segment will be mapped to the + * given device. + * + * @note: Memory must be registered in advance using rte_extmem_* APIs. + * + * @param dev + * Device pointer. + * @param addr + * Virtual address to map. + * @param iova + * IOVA address to map. + * @param len + * Length of the memory segment being mapped. + * + * @return + * 0 if mapping was successful. + * Negative value and rte_errno is set otherwise. + */ +__rte_experimental +int +rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len); + +/** + * Device level DMA unmap function. + * After a successful call, the memory segment will no longer be + * accessible by the given device. + * + * @note: Memory must be registered in advance using rte_extmem_* APIs. + * + * @param dev + * Device pointer. + * @param addr + * Virtual address to unmap. + * @param iova + * IOVA address to unmap. + * @param len + * Length of the memory segment being mapped. + * + * @return + * 0 if un-mapping was successful. + * Negative value and rte_errno is set otherwise. + */ +__rte_experimental +int +rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, + size_t len); + +#endif /* _RTE_DEV_H_ */ diff --git a/lib/librte_eal/include/rte_devargs.h b/lib/librte_eal/include/rte_devargs.h new file mode 100644 index 0000000000..898efa0d66 --- /dev/null +++ b/lib/librte_eal/include/rte_devargs.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2014 6WIND S.A. + */ + +#ifndef _RTE_DEVARGS_H_ +#define _RTE_DEVARGS_H_ + +/** + * @file + * + * RTE devargs: list of devices and their user arguments + * + * This file stores a list of devices and their arguments given by + * the user when a DPDK application is started. These devices can be PCI + * devices or virtual devices. These devices are stored at startup in a + * list of rte_devargs structures. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/** + * Type of generic device + */ +enum rte_devtype { + RTE_DEVTYPE_WHITELISTED_PCI, + RTE_DEVTYPE_BLACKLISTED_PCI, + RTE_DEVTYPE_VIRTUAL, +}; + +/** + * Structure that stores a device given by the user with its arguments + * + * A user device is a physical or a virtual device given by the user to + * the DPDK application at startup through command line arguments. + * + * The structure stores the configuration of the device, its PCI + * identifier if it's a PCI device or the driver name if it's a virtual + * device. + */ +struct rte_devargs { + /** Next in list. */ + TAILQ_ENTRY(rte_devargs) next; + /** Type of device. */ + enum rte_devtype type; + /** Device policy. */ + enum rte_dev_policy policy; + /** Name of the device. */ + char name[RTE_DEV_NAME_MAX_LEN]; + RTE_STD_C11 + union { + /** Arguments string as given by user or "" for no argument. */ + char *args; + const char *drv_str; + }; + struct rte_bus *bus; /**< bus handle. */ + struct rte_class *cls; /**< class handle. */ + const char *bus_str; /**< bus-related part of device string. */ + const char *cls_str; /**< class-related part of device string. */ + const char *data; /**< Device string storage. */ +}; + +/** + * Parse a device string. + * + * Verify that a bus is capable of handling the device passed + * in argument. Store which bus will handle the device, its name + * and the eventual device parameters. + * + * The syntax is: + * + * bus:device_identifier,arg1=val1,arg2=val2 + * + * where "bus:" is the bus name followed by any character separator. + * The bus name is optional. If no bus name is specified, each bus + * will attempt to recognize the device identifier. The first one + * to succeed will be used. + * + * Examples: + * + * pci:0000:05.00.0,arg=val + * 05.00.0,arg=val + * vdev:net_ring0 + * + * @param da + * The devargs structure holding the device information. + * + * @param dev + * String describing a device. + * + * @return + * - 0 on success. + * - Negative errno on error. + */ +int +rte_devargs_parse(struct rte_devargs *da, const char *dev); + +/** + * Parse a device string. + * + * Verify that a bus is capable of handling the device passed + * in argument. Store which bus will handle the device, its name + * and the eventual device parameters. + * + * The device string is built with a printf-like syntax. + * + * The syntax is: + * + * bus:device_identifier,arg1=val1,arg2=val2 + * + * where "bus:" is the bus name followed by any character separator. + * The bus name is optional. If no bus name is specified, each bus + * will attempt to recognize the device identifier. The first one + * to succeed will be used. + * + * Examples: + * + * pci:0000:05.00.0,arg=val + * 05.00.0,arg=val + * vdev:net_ring0 + * + * @param da + * The devargs structure holding the device information. + * @param format + * Format string describing a device. + * + * @return + * - 0 on success. + * - Negative errno on error. + */ +int +rte_devargs_parsef(struct rte_devargs *da, + const char *format, ...) +__rte_format_printf(2, 0); + +/** + * Insert an rte_devargs in the global list. + * + * @param da + * The devargs structure to insert. + * If a devargs for the same device is already inserted, + * it will be updated and returned. It means *da pointer can change. + * + * @return + * - 0 on success + * - Negative on error. + */ +int +rte_devargs_insert(struct rte_devargs **da); + +/** + * Add a device to the user device list + * See rte_devargs_parse() for details. + * + * @param devtype + * The type of the device. + * @param devargs_str + * The arguments as given by the user. + * + * @return + * - 0 on success + * - A negative value on error + */ +int rte_devargs_add(enum rte_devtype devtype, const char *devargs_str); + +/** + * Remove a device from the user device list. + * Its resources are freed. + * If the devargs cannot be found, nothing happens. + * + * @param devargs + * The instance or a copy of devargs to remove. + * + * @return + * 0 on success. + * <0 on error. + * >0 if the devargs was not within the user device list. + */ +int rte_devargs_remove(struct rte_devargs *devargs); + +/** + * Count the number of user devices of a specified type + * + * @param devtype + * The type of the devices to counted. + * + * @return + * The number of devices. + */ +unsigned int +rte_devargs_type_count(enum rte_devtype devtype); + +/** + * This function dumps the list of user device and their arguments. + * + * @param f + * A pointer to a file for output + */ +void rte_devargs_dump(FILE *f); + +/** + * Find next rte_devargs matching the provided bus name. + * + * @param busname + * Limit the iteration to devargs related to buses + * matching this name. + * Will return any next rte_devargs if NULL. + * + * @param start + * Starting iteration point. The iteration will start at + * the first rte_devargs if NULL. + * + * @return + * Next rte_devargs entry matching the requested bus, + * NULL if there is none. + */ +struct rte_devargs * +rte_devargs_next(const char *busname, const struct rte_devargs *start); + +/** + * Iterate over all rte_devargs for a specific bus. + */ +#define RTE_EAL_DEVARGS_FOREACH(busname, da) \ + for (da = rte_devargs_next(busname, NULL); \ + da != NULL; \ + da = rte_devargs_next(busname, da)) \ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_DEVARGS_H_ */ diff --git a/lib/librte_eal/include/rte_eal.h b/lib/librte_eal/include/rte_eal.h new file mode 100644 index 0000000000..2f9ed298de --- /dev/null +++ b/lib/librte_eal/include/rte_eal.h @@ -0,0 +1,495 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef _RTE_EAL_H_ +#define _RTE_EAL_H_ + +/** + * @file + * + * EAL Configuration API + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */ + +/* Maximum thread_name length. */ +#define RTE_MAX_THREAD_NAME_LEN 16 + +/** + * The lcore role (used in RTE or not). + */ +enum rte_lcore_role_t { + ROLE_RTE, + ROLE_OFF, + ROLE_SERVICE, +}; + +/** + * The type of process in a linux, multi-process setup + */ +enum rte_proc_type_t { + RTE_PROC_AUTO = -1, /* allow auto-detection of primary/secondary */ + RTE_PROC_PRIMARY = 0, /* set to zero, so primary is the default */ + RTE_PROC_SECONDARY, + + RTE_PROC_INVALID +}; + +/** + * Get the process type in a multi-process setup + * + * @return + * The process type + */ +enum rte_proc_type_t rte_eal_process_type(void); + +/** + * Request iopl privilege for all RPL. + * + * This function should be called by pmds which need access to ioports. + + * @return + * - On success, returns 0. + * - On failure, returns -1. + */ +int rte_eal_iopl_init(void); + +/** + * Initialize the Environment Abstraction Layer (EAL). + * + * This function is to be executed on the MASTER lcore only, as soon + * as possible in the application's main() function. + * + * The function finishes the initialization process before main() is called. + * It puts the SLAVE lcores in the WAIT state. + * + * When the multi-partition feature is supported, depending on the + * configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this + * function waits to ensure that the magic number is set before + * returning. See also the rte_eal_get_configuration() function. Note: + * This behavior may change in the future. + * + * @param argc + * A non-negative value. If it is greater than 0, the array members + * for argv[0] through argv[argc] (non-inclusive) shall contain pointers + * to strings. + * @param argv + * An array of strings. The contents of the array, as well as the strings + * which are pointed to by the array, may be modified by this function. + * @return + * - On success, the number of parsed arguments, which is greater or + * equal to zero. After the call to rte_eal_init(), + * all arguments argv[x] with x < ret may have been modified by this + * function call and should not be further interpreted by the + * application. The EAL does not take any ownership of the memory used + * for either the argv array, or its members. + * - On failure, -1 and rte_errno is set to a value indicating the cause + * for failure. In some instances, the application will need to be + * restarted as part of clearing the issue. + * + * Error codes returned via rte_errno: + * EACCES indicates a permissions issue. + * + * EAGAIN indicates either a bus or system resource was not available, + * setup may be attempted again. + * + * EALREADY indicates that the rte_eal_init function has already been + * called, and cannot be called again. + * + * EFAULT indicates the tailq configuration name was not found in + * memory configuration. + * + * EINVAL indicates invalid parameters were passed as argv/argc. + * + * ENOMEM indicates failure likely caused by an out-of-memory condition. + * + * ENODEV indicates memory setup issues. + * + * ENOTSUP indicates that the EAL cannot initialize on this system. + * + * EPROTO indicates that the PCI bus is either not present, or is not + * readable by the eal. + * + * ENOEXEC indicates that a service core failed to launch successfully. + */ +int rte_eal_init(int argc, char **argv); + +/** + * Clean up the Environment Abstraction Layer (EAL) + * + * This function must be called to release any internal resources that EAL has + * allocated during rte_eal_init(). After this call, no DPDK function calls may + * be made. It is expected that common usage of this function is to call it + * just before terminating the process. + * + * @return 0 Successfully released all internal EAL resources + * @return -EFAULT There was an error in releasing all resources. + */ +int rte_eal_cleanup(void); + +/** + * Check if a primary process is currently alive + * + * This function returns true when a primary process is currently + * active. + * + * @param config_file_path + * The config_file_path argument provided should point at the location + * that the primary process will create its config file. If NULL, the default + * config file path is used. + * + * @return + * - If alive, returns 1. + * - If dead, returns 0. + */ +int rte_eal_primary_proc_alive(const char *config_file_path); + +#define RTE_MP_MAX_FD_NUM 8 /* The max amount of fds */ +#define RTE_MP_MAX_NAME_LEN 64 /* The max length of action name */ +#define RTE_MP_MAX_PARAM_LEN 256 /* The max length of param */ +struct rte_mp_msg { + char name[RTE_MP_MAX_NAME_LEN]; + int len_param; + int num_fds; + uint8_t param[RTE_MP_MAX_PARAM_LEN]; + int fds[RTE_MP_MAX_FD_NUM]; +}; + +struct rte_mp_reply { + int nb_sent; + int nb_received; + struct rte_mp_msg *msgs; /* caller to free */ +}; + +/** + * Action function typedef used by other components. + * + * As we create socket channel for primary/secondary communication, use + * this function typedef to register action for coming messages. + * + * @note When handling IPC request callbacks, the reply must be sent even in + * cases of error handling. Simply returning success or failure will *not* + * send a response to the requestor. + * Implementation of error signalling mechanism is up to the application. + * + * @note No memory allocations should take place inside the callback. + */ +typedef int (*rte_mp_t)(const struct rte_mp_msg *msg, const void *peer); + +/** + * Asynchronous reply function typedef used by other components. + * + * As we create socket channel for primary/secondary communication, use + * this function typedef to register action for coming responses to asynchronous + * requests. + * + * @note When handling IPC request callbacks, the reply must be sent even in + * cases of error handling. Simply returning success or failure will *not* + * send a response to the requestor. + * Implementation of error signalling mechanism is up to the application. + * + * @note No memory allocations should take place inside the callback. + */ +typedef int (*rte_mp_async_reply_t)(const struct rte_mp_msg *request, + const struct rte_mp_reply *reply); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Register an action function for primary/secondary communication. + * + * Call this function to register an action, if the calling component wants + * to response the messages from the corresponding component in its primary + * process or secondary processes. + * + * @note IPC may be unsupported in certain circumstances, so caller should check + * for ENOTSUP error. + * + * @param name + * The name argument plays as the nonredundant key to find the action. + * + * @param action + * The action argument is the function pointer to the action function. + * + * @return + * - 0 on success. + * - (<0) on failure. + */ +__rte_experimental +int +rte_mp_action_register(const char *name, rte_mp_t action); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Unregister an action function for primary/secondary communication. + * + * Call this function to unregister an action if the calling component does + * not want to response the messages from the corresponding component in its + * primary process or secondary processes. + * + * @note IPC may be unsupported in certain circumstances, so caller should check + * for ENOTSUP error. + * + * @param name + * The name argument plays as the nonredundant key to find the action. + * + */ +__rte_experimental +void +rte_mp_action_unregister(const char *name); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Send a message to the peer process. + * + * This function will send a message which will be responded by the action + * identified by name in the peer process. + * + * @param msg + * The msg argument contains the customized message. + * + * @return + * - On success, return 0. + * - On failure, return -1, and the reason will be stored in rte_errno. + */ +__rte_experimental +int +rte_mp_sendmsg(struct rte_mp_msg *msg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Send a request to the peer process and expect a reply. + * + * This function sends a request message to the peer process, and will + * block until receiving reply message from the peer process. + * + * @note The caller is responsible to free reply->replies. + * + * @note This API must not be used inside memory-related or IPC callbacks, and + * no memory allocations should take place inside such callback. + * + * @note IPC may be unsupported in certain circumstances, so caller should check + * for ENOTSUP error. + * + * @param req + * The req argument contains the customized request message. + * + * @param reply + * The reply argument will be for storing all the replied messages; + * the caller is responsible for free reply->msgs. + * + * @param ts + * The ts argument specifies how long we can wait for the peer(s) to reply. + * + * @return + * - On success, return 0. + * - On failure, return -1, and the reason will be stored in rte_errno. + */ +__rte_experimental +int +rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply, + const struct timespec *ts); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Send a request to the peer process and expect a reply in a separate callback. + * + * This function sends a request message to the peer process, and will not + * block. Instead, reply will be received in a separate callback. + * + * @note IPC may be unsupported in certain circumstances, so caller should check + * for ENOTSUP error. + * + * @param req + * The req argument contains the customized request message. + * + * @param ts + * The ts argument specifies how long we can wait for the peer(s) to reply. + * + * @param clb + * The callback to trigger when all responses for this request have arrived. + * + * @return + * - On success, return 0. + * - On failure, return -1, and the reason will be stored in rte_errno. + */ +__rte_experimental +int +rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, + rte_mp_async_reply_t clb); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Send a reply to the peer process. + * + * This function will send a reply message in response to a request message + * received previously. + * + * @note When handling IPC request callbacks, the reply must be sent even in + * cases of error handling. Simply returning success or failure will *not* + * send a response to the requestor. + * Implementation of error signalling mechanism is up to the application. + * + * @param msg + * The msg argument contains the customized message. + * + * @param peer + * The peer argument is the pointer to the peer socket path. + * + * @return + * - On success, return 0. + * - On failure, return -1, and the reason will be stored in rte_errno. + */ +__rte_experimental +int +rte_mp_reply(struct rte_mp_msg *msg, const char *peer); + +/** + * Usage function typedef used by the application usage function. + * + * Use this function typedef to define and call rte_set_application_usage_hook() + * routine. + */ +typedef void (*rte_usage_hook_t)(const char * prgname); + +/** + * Add application usage routine callout from the eal_usage() routine. + * + * This function allows the application to include its usage message + * in the EAL system usage message. The routine rte_set_application_usage_hook() + * needs to be called before the rte_eal_init() routine in the application. + * + * This routine is optional for the application and will behave as if the set + * routine was never called as the default behavior. + * + * @param usage_func + * The func argument is a function pointer to the application usage routine. + * Called function is defined using rte_usage_hook_t typedef, which is of + * the form void rte_usage_func(const char * prgname). + * + * Calling this routine with a NULL value will reset the usage hook routine and + * return the current value, which could be NULL. + * @return + * - Returns the current value of the rte_application_usage pointer to allow + * the caller to daisy chain the usage routines if needing more then one. + */ +rte_usage_hook_t +rte_set_application_usage_hook(rte_usage_hook_t usage_func); + +/** + * Whether EAL is using huge pages (disabled by --no-huge option). + * The no-huge mode is not compatible with all drivers or features. + * + * @return + * Nonzero if hugepages are enabled. + */ +int rte_eal_has_hugepages(void); + +/** + * Whether EAL is using PCI bus. + * Disabled by --no-pci option. + * + * @return + * Nonzero if the PCI bus is enabled. + */ +int rte_eal_has_pci(void); + +/** + * Whether the EAL was asked to create UIO device. + * + * @return + * Nonzero if true. + */ +int rte_eal_create_uio_dev(void); + +/** + * The user-configured vfio interrupt mode. + * + * @return + * Interrupt mode configured with the command line, + * RTE_INTR_MODE_NONE by default. + */ +enum rte_intr_mode rte_eal_vfio_intr_mode(void); + +/** + * A wrap API for syscall gettid. + * + * @return + * On success, returns the thread ID of calling process. + * It is always successful. + */ +int rte_sys_gettid(void); + +/** + * Get system unique thread id. + * + * @return + * On success, returns the thread ID of calling process. + * It is always successful. + */ +static inline int rte_gettid(void) +{ + static RTE_DEFINE_PER_LCORE(int, _thread_id) = -1; + if (RTE_PER_LCORE(_thread_id) == -1) + RTE_PER_LCORE(_thread_id) = rte_sys_gettid(); + return RTE_PER_LCORE(_thread_id); +} + +/** + * Get the iova mode + * + * @return + * enum rte_iova_mode value. + */ +enum rte_iova_mode rte_eal_iova_mode(void); + +/** + * Get user provided pool ops name for mbuf + * + * @return + * returns user provided pool ops name. + */ +const char * +rte_eal_mbuf_user_pool_ops(void); + +/** + * Get the runtime directory of DPDK + * + * @return + * The runtime directory path of DPDK + */ +const char * +rte_eal_get_runtime_dir(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_EAL_H_ */ diff --git a/lib/librte_eal/include/rte_eal_interrupts.h b/lib/librte_eal/include/rte_eal_interrupts.h new file mode 100644 index 0000000000..773a34a42b --- /dev/null +++ b/lib/librte_eal/include/rte_eal_interrupts.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_INTERRUPTS_H_ +#error "don't include this file directly, please include generic " +#endif + +/** + * @file rte_eal_interrupts.h + * @internal + * + * Contains function prototypes exposed by the EAL for interrupt handling by + * drivers and other DPDK internal consumers. + */ + +#ifndef _RTE_EAL_INTERRUPTS_H_ +#define _RTE_EAL_INTERRUPTS_H_ + +#define RTE_MAX_RXTX_INTR_VEC_ID 512 +#define RTE_INTR_VEC_ZERO_OFFSET 0 +#define RTE_INTR_VEC_RXTX_OFFSET 1 + +/** + * The interrupt source type, e.g. UIO, VFIO, ALARM etc. + */ +enum rte_intr_handle_type { + RTE_INTR_HANDLE_UNKNOWN = 0, /**< generic unknown handle */ + RTE_INTR_HANDLE_UIO, /**< uio device handle */ + RTE_INTR_HANDLE_UIO_INTX, /**< uio generic handle */ + RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */ + RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */ + RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */ + RTE_INTR_HANDLE_ALARM, /**< alarm handle */ + RTE_INTR_HANDLE_EXT, /**< external handler */ + RTE_INTR_HANDLE_VDEV, /**< virtual device */ + RTE_INTR_HANDLE_DEV_EVENT, /**< device event handle */ + RTE_INTR_HANDLE_VFIO_REQ, /**< VFIO request handle */ + RTE_INTR_HANDLE_MAX /**< count of elements */ +}; + +#define RTE_INTR_EVENT_ADD 1UL +#define RTE_INTR_EVENT_DEL 2UL + +typedef void (*rte_intr_event_cb_t)(int fd, void *arg); + +struct rte_epoll_data { + uint32_t event; /**< event type */ + void *data; /**< User data */ + rte_intr_event_cb_t cb_fun; /**< IN: callback fun */ + void *cb_arg; /**< IN: callback arg */ +}; + +enum { + RTE_EPOLL_INVALID = 0, + RTE_EPOLL_VALID, + RTE_EPOLL_EXEC, +}; + +/** interrupt epoll event obj, taken by epoll_event.ptr */ +struct rte_epoll_event { + volatile uint32_t status; /**< OUT: event status */ + int fd; /**< OUT: event fd */ + int epfd; /**< OUT: epoll instance the ev associated with */ + struct rte_epoll_data epdata; +}; + +/** Handle for interrupts. */ +struct rte_intr_handle { + RTE_STD_C11 + union { + int vfio_dev_fd; /**< VFIO device file descriptor */ + int uio_cfg_fd; /**< UIO cfg file desc for uio_pci_generic */ + }; + int fd; /**< interrupt event file descriptor */ + enum rte_intr_handle_type type; /**< handle type */ + uint32_t max_intr; /**< max interrupt requested */ + uint32_t nb_efd; /**< number of available efd(event fd) */ + uint8_t efd_counter_size; /**< size of efd counter, used for vdev */ + int efds[RTE_MAX_RXTX_INTR_VEC_ID]; /**< intr vectors/efds mapping */ + struct rte_epoll_event elist[RTE_MAX_RXTX_INTR_VEC_ID]; + /**< intr vector epoll event */ + int *intr_vec; /**< intr vector number array */ +}; + +#define RTE_EPOLL_PER_THREAD -1 /**< to hint using per thread epfd */ + +/** + * It waits for events on the epoll instance. + * + * @param epfd + * Epoll instance fd on which the caller wait for events. + * @param events + * Memory area contains the events that will be available for the caller. + * @param maxevents + * Up to maxevents are returned, must greater than zero. + * @param timeout + * Specifying a timeout of -1 causes a block indefinitely. + * Specifying a timeout equal to zero cause to return immediately. + * @return + * - On success, returns the number of available event. + * - On failure, a negative value. + */ +int +rte_epoll_wait(int epfd, struct rte_epoll_event *events, + int maxevents, int timeout); + +/** + * It performs control operations on epoll instance referred by the epfd. + * It requests that the operation op be performed for the target fd. + * + * @param epfd + * Epoll instance fd on which the caller perform control operations. + * @param op + * The operation be performed for the target fd. + * @param fd + * The target fd on which the control ops perform. + * @param event + * Describes the object linked to the fd. + * Note: The caller must take care the object deletion after CTL_DEL. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +rte_epoll_ctl(int epfd, int op, int fd, + struct rte_epoll_event *event); + +/** + * The function returns the per thread epoll instance. + * + * @return + * epfd the epoll instance referred to. + */ +int +rte_intr_tls_epfd(void); + +/** + * @param intr_handle + * Pointer to the interrupt handle. + * @param epfd + * Epoll instance fd which the intr vector associated to. + * @param op + * The operation be performed for the vector. + * Operation type of {ADD, DEL}. + * @param vec + * RX intr vector number added to the epoll instance wait list. + * @param data + * User raw data. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, + int epfd, int op, unsigned int vec, void *data); + +/** + * It deletes registered eventfds. + * + * @param intr_handle + * Pointer to the interrupt handle. + */ +void +rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle); + +/** + * It enables the packet I/O interrupt event if it's necessary. + * It creates event fd for each interrupt vector when MSIX is used, + * otherwise it multiplexes a single event fd. + * + * @param intr_handle + * Pointer to the interrupt handle. + * @param nb_efd + * Number of interrupt vector trying to enable. + * The value 0 is not allowed. + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd); + +/** + * It disables the packet I/O interrupt event. + * It deletes registered eventfds and closes the open fds. + * + * @param intr_handle + * Pointer to the interrupt handle. + */ +void +rte_intr_efd_disable(struct rte_intr_handle *intr_handle); + +/** + * The packet I/O interrupt on datapath is enabled or not. + * + * @param intr_handle + * Pointer to the interrupt handle. + */ +int +rte_intr_dp_is_en(struct rte_intr_handle *intr_handle); + +/** + * The interrupt handle instance allows other causes or not. + * Other causes stand for any none packet I/O interrupts. + * + * @param intr_handle + * Pointer to the interrupt handle. + */ +int +rte_intr_allow_others(struct rte_intr_handle *intr_handle); + +/** + * The multiple interrupt vector capability of interrupt handle instance. + * It returns zero if no multiple interrupt vector support. + * + * @param intr_handle + * Pointer to the interrupt handle. + */ +int +rte_intr_cap_multiple(struct rte_intr_handle *intr_handle); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * @internal + * Check if currently executing in interrupt context + * + * @return + * - non zero in case of interrupt context + * - zero in case of process context + */ +__rte_experimental +int +rte_thread_is_intr(void); + +#endif /* _RTE_EAL_INTERRUPTS_H_ */ diff --git a/lib/librte_eal/include/rte_eal_memconfig.h b/lib/librte_eal/include/rte_eal_memconfig.h new file mode 100644 index 0000000000..dede2ee324 --- /dev/null +++ b/lib/librte_eal/include/rte_eal_memconfig.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_EAL_MEMCONFIG_H_ +#define _RTE_EAL_MEMCONFIG_H_ + +#include + +#include + +/** + * @file + * + * This API allows access to EAL shared memory configuration through an API. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Lock the internal EAL shared memory configuration for shared access. + */ +void +rte_mcfg_mem_read_lock(void); + +/** + * Unlock the internal EAL shared memory configuration for shared access. + */ +void +rte_mcfg_mem_read_unlock(void); + +/** + * Lock the internal EAL shared memory configuration for exclusive access. + */ +void +rte_mcfg_mem_write_lock(void); + +/** + * Unlock the internal EAL shared memory configuration for exclusive access. + */ +void +rte_mcfg_mem_write_unlock(void); + +/** + * Lock the internal EAL TAILQ list for shared access. + */ +void +rte_mcfg_tailq_read_lock(void); + +/** + * Unlock the internal EAL TAILQ list for shared access. + */ +void +rte_mcfg_tailq_read_unlock(void); + +/** + * Lock the internal EAL TAILQ list for exclusive access. + */ +void +rte_mcfg_tailq_write_lock(void); + +/** + * Unlock the internal EAL TAILQ list for exclusive access. + */ +void +rte_mcfg_tailq_write_unlock(void); + +/** + * Lock the internal EAL Mempool list for shared access. + */ +void +rte_mcfg_mempool_read_lock(void); + +/** + * Unlock the internal EAL Mempool list for shared access. + */ +void +rte_mcfg_mempool_read_unlock(void); + +/** + * Lock the internal EAL Mempool list for exclusive access. + */ +void +rte_mcfg_mempool_write_lock(void); + +/** + * Unlock the internal EAL Mempool list for exclusive access. + */ +void +rte_mcfg_mempool_write_unlock(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Lock the internal EAL Timer Library lock for exclusive access. + */ +__rte_experimental +void +rte_mcfg_timer_lock(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Unlock the internal EAL Timer Library lock for exclusive access. + */ +__rte_experimental +void +rte_mcfg_timer_unlock(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * If true, pages are put in single files (per memseg list), + * as opposed to creating a file per page. + */ +__rte_experimental +bool +rte_mcfg_get_single_file_segments(void); + +#ifdef __cplusplus +} +#endif + +#endif /*__RTE_EAL_MEMCONFIG_H_*/ diff --git a/lib/librte_eal/include/rte_errno.h b/lib/librte_eal/include/rte_errno.h new file mode 100644 index 0000000000..ba45591d24 --- /dev/null +++ b/lib/librte_eal/include/rte_errno.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +/** + * @file + * + * API for error cause tracking + */ + +#ifndef _RTE_ERRNO_H_ +#define _RTE_ERRNO_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */ + +/** + * Error number value, stored per-thread, which can be queried after + * calls to certain functions to determine why those functions failed. + * + * Uses standard values from errno.h wherever possible, with a small number + * of additional possible values for RTE-specific conditions. + */ +#define rte_errno RTE_PER_LCORE(_rte_errno) + +/** + * Function which returns a printable string describing a particular + * error code. For non-RTE-specific error codes, this function returns + * the value from the libc strerror function. + * + * @param errnum + * The error number to be looked up - generally the value of rte_errno + * @return + * A pointer to a thread-local string containing the text describing + * the error. + */ +const char *rte_strerror(int errnum); + +#ifndef __ELASTERROR +/** + * Check if we have a defined value for the max system-defined errno values. + * if no max defined, start from 1000 to prevent overlap with standard values + */ +#define __ELASTERROR 1000 +#endif + +/** Error types */ +enum { + RTE_MIN_ERRNO = __ELASTERROR, /**< Start numbering above std errno vals */ + + E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */ + E_RTE_NO_CONFIG, /**< Missing rte_config */ + + RTE_MAX_ERRNO /**< Max RTE error number */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ERRNO_H_ */ diff --git a/lib/librte_eal/include/rte_fbarray.h b/lib/librte_eal/include/rte_fbarray.h new file mode 100644 index 0000000000..6dccdbec98 --- /dev/null +++ b/lib/librte_eal/include/rte_fbarray.h @@ -0,0 +1,565 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Intel Corporation + */ + +#ifndef RTE_FBARRAY_H +#define RTE_FBARRAY_H + +/** + * @file + * + * File-backed shared indexed array for DPDK. + * + * Basic workflow is expected to be the following: + * 1) Allocate array either using ``rte_fbarray_init()`` or + * ``rte_fbarray_attach()`` (depending on whether it's shared between + * multiple DPDK processes) + * 2) find free spots using ``rte_fbarray_find_next_free()`` + * 3) get pointer to data in the free spot using ``rte_fbarray_get()``, and + * copy data into the pointer (element size is fixed) + * 4) mark entry as used using ``rte_fbarray_set_used()`` + * + * Calls to ``rte_fbarray_init()`` and ``rte_fbarray_destroy()`` will have + * consequences for all processes, while calls to ``rte_fbarray_attach()`` and + * ``rte_fbarray_detach()`` will only have consequences within a single process. + * Therefore, it is safe to call ``rte_fbarray_attach()`` or + * ``rte_fbarray_detach()`` while another process is using ``rte_fbarray``, + * provided no other thread within the same process will try to use + * ``rte_fbarray`` before attaching or after detaching. It is not safe to call + * ``rte_fbarray_init()`` or ``rte_fbarray_destroy()`` while another thread or + * another process is using ``rte_fbarray``. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +#include + +#define RTE_FBARRAY_NAME_LEN 64 + +struct rte_fbarray { + char name[RTE_FBARRAY_NAME_LEN]; /**< name associated with an array */ + unsigned int count; /**< number of entries stored */ + unsigned int len; /**< current length of the array */ + unsigned int elt_sz; /**< size of each element */ + void *data; /**< data pointer */ + rte_rwlock_t rwlock; /**< multiprocess lock */ +}; + +/** + * Set up ``rte_fbarray`` structure and allocate underlying resources. + * + * Call this function to correctly set up ``rte_fbarray`` and allocate + * underlying files that will be backing the data in the current process. Note + * that in order to use and share ``rte_fbarray`` between multiple processes, + * data pointed to by ``arr`` pointer must itself be allocated in shared memory. + * + * @param arr + * Valid pointer to allocated ``rte_fbarray`` structure. + * + * @param name + * Unique name to be assigned to this array. + * + * @param len + * Number of elements initially available in the array. + * + * @param elt_sz + * Size of each element. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len, + unsigned int elt_sz); + + +/** + * Attach to a file backing an already allocated and correctly set up + * ``rte_fbarray`` structure. + * + * Call this function to attach to file that will be backing the data in the + * current process. The structure must have been previously correctly set up + * with a call to ``rte_fbarray_init()``. Calls to ``rte_fbarray_attach()`` are + * usually meant to be performed in a multiprocessing scenario, with data + * pointed to by ``arr`` pointer allocated in shared memory. + * + * @param arr + * Valid pointer to allocated and correctly set up rte_fbarray structure. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_attach(struct rte_fbarray *arr); + + +/** + * Deallocate resources for an already allocated and correctly set up + * ``rte_fbarray`` structure, and remove the underlying file. + * + * Call this function to deallocate all resources associated with an + * ``rte_fbarray`` structure within the current process. This will also + * zero-fill data pointed to by ``arr`` pointer and remove the underlying file + * backing the data, so it is expected that by the time this function is called, + * all other processes have detached from this ``rte_fbarray``. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_destroy(struct rte_fbarray *arr); + + +/** + * Deallocate resources for an already allocated and correctly set up + * ``rte_fbarray`` structure. + * + * Call this function to deallocate all resources associated with an + * ``rte_fbarray`` structure within current process. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_detach(struct rte_fbarray *arr); + + +/** + * Get pointer to element residing at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param idx + * Index of an element to get a pointer to. + * + * @return + * - non-NULL pointer on success. + * - NULL on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +void * +rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx); + + +/** + * Find index of a specified element within the array. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param elt + * Pointer to element to find index to. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt); + + +/** + * Mark specified element as used. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param idx + * Element index to mark as used. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx); + + +/** + * Mark specified element as free. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param idx + * Element index to mark as free. + * + * @return + * - 0 on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx); + + +/** + * Check whether element at specified index is marked as used. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param idx + * Element index to check as used. + * + * @return + * - 1 if element is used. + * - 0 if element is unused. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx); + + +/** + * Find index of next free element, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of next used element, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of next chunk of ``n`` free elements, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @param n + * Number of free elements to look for. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start, + unsigned int n); + + +/** + * Find index of next chunk of ``n`` used elements, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @param n + * Number of used elements to look for. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start, + unsigned int n); + + +/** + * Find how many more free entries there are, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_contig_free(struct rte_fbarray *arr, + unsigned int start); + + +/** + * Find how many more used entries there are, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start); + +/** + * Find index of previous free element, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of previous used element, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find lowest start index of chunk of ``n`` free elements, down from specified + * index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @param n + * Number of free elements to look for. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start, + unsigned int n); + + +/** + * Find lowest start index of chunk of ``n`` used elements, down from specified + * index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @param n + * Number of used elements to look for. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start, + unsigned int n); + + +/** + * Find how many more free entries there are before specified index (like + * ``rte_fbarray_find_contig_free`` but going in reverse). + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, + unsigned int start); + + +/** + * Find how many more used entries there are before specified index (like + * ``rte_fbarray_find_contig_used`` but going in reverse). + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of biggest chunk of free elements, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of biggest chunk of used elements, starting at specified index. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of biggest chunk of free elements before a specified index (like + * ``rte_fbarray_find_biggest_free``, but going in reverse). + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start); + + +/** + * Find index of biggest chunk of used elements before a specified index (like + * ``rte_fbarray_find_biggest_used``, but going in reverse). + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param start + * Element index to start search from. + * + * @return + * - non-negative integer on success. + * - -1 on failure, with ``rte_errno`` indicating reason for failure. + */ +__rte_experimental +int +rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start); + + +/** + * Dump ``rte_fbarray`` metadata. + * + * @param arr + * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure. + * + * @param f + * File object to dump information into. + */ +__rte_experimental +void +rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_FBARRAY_H */ diff --git a/lib/librte_eal/include/rte_function_versioning.h b/lib/librte_eal/include/rte_function_versioning.h new file mode 100644 index 0000000000..c924351d5e --- /dev/null +++ b/lib/librte_eal/include/rte_function_versioning.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Neil Horman . + * All rights reserved. + */ + +#ifndef _RTE_FUNCTION_VERSIONING_H_ +#define _RTE_FUNCTION_VERSIONING_H_ +#include + +#ifndef RTE_USE_FUNCTION_VERSIONING +#error Use of function versioning disabled, is "use_function_versioning=true" in meson.build? +#endif + +#ifdef RTE_BUILD_SHARED_LIB + +/* + * Provides backwards compatibility when updating exported functions. + * When a symol is exported from a library to provide an API, it also provides a + * calling convention (ABI) that is embodied in its name, return type, + * arguments, etc. On occasion that function may need to change to accommodate + * new functionality, behavior, etc. When that occurs, it is desirable to + * allow for backwards compatibility for a time with older binaries that are + * dynamically linked to the dpdk. To support that, the __vsym and + * VERSION_SYMBOL macros are created. They, in conjunction with the + * _version.map file for a given library allow for multiple versions of + * a symbol to exist in a shared library so that older binaries need not be + * immediately recompiled. + * + * Refer to the guidelines document in the docs subdirectory for details on the + * use of these macros + */ + +/* + * Macro Parameters: + * b - function base name + * e - function version extension, to be concatenated with base name + * n - function symbol version string to be applied + * f - function prototype + * p - full function symbol name + */ + +/* + * VERSION_SYMBOL + * Creates a symbol version table entry binding symbol @DPDK_ to the internal + * function name + */ +#define VERSION_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@DPDK_" RTE_STR(n)) + +/* + * BIND_DEFAULT_SYMBOL + * Creates a symbol version entry instructing the linker to bind references to + * symbol to the internal symbol + */ +#define BIND_DEFAULT_SYMBOL(b, e, n) __asm__(".symver " RTE_STR(b) RTE_STR(e) ", " RTE_STR(b) "@@DPDK_" RTE_STR(n)) + +/* + * __vsym + * Annotation to be used in declaration of the internal symbol to signal + * that it is being used as an implementation of a particular version of symbol + * . + */ +#define __vsym __attribute__((used)) + +/* + * MAP_STATIC_SYMBOL + * If a function has been bifurcated into multiple versions, none of which + * are defined as the exported symbol name in the map file, this macro can be + * used to alias a specific version of the symbol to its exported name. For + * example, if you have 2 versions of a function foo_v1 and foo_v2, where the + * former is mapped to foo@DPDK_1 and the latter is mapped to foo@DPDK_2 when + * building a shared library, this macro can be used to map either foo_v1 or + * foo_v2 to the symbol foo when building a static library, e.g.: + * MAP_STATIC_SYMBOL(void foo(), foo_v2); + */ +#define MAP_STATIC_SYMBOL(f, p) + +#else +/* + * No symbol versioning in use + */ +#define VERSION_SYMBOL(b, e, n) +#define __vsym +#define BIND_DEFAULT_SYMBOL(b, e, n) +#define MAP_STATIC_SYMBOL(f, p) f __attribute__((alias(RTE_STR(p)))) +/* + * RTE_BUILD_SHARED_LIB=n + */ +#endif + +#endif /* _RTE_FUNCTION_VERSIONING_H_ */ diff --git a/lib/librte_eal/include/rte_hexdump.h b/lib/librte_eal/include/rte_hexdump.h new file mode 100644 index 0000000000..2d03c089c4 --- /dev/null +++ b/lib/librte_eal/include/rte_hexdump.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_HEXDUMP_H_ +#define _RTE_HEXDUMP_H_ + +/** + * @file + * Simple API to dump out memory in a special hex format. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** +* Dump out memory in a special hex dump format. +* +* @param f +* A pointer to a file for output +* @param title +* If not NULL this string is printed as a header to the output. +* @param buf +* This is the buffer address to print out. +* @param len +* The number of bytes to dump out +* @return +* None. +*/ + +extern void +rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len); + +/** +* Dump out memory in a hex format with colons between bytes. +* +* @param f +* A pointer to a file for output +* @param title +* If not NULL this string is printed as a header to the output. +* @param buf +* This is the buffer address to print out. +* @param len +* The number of bytes to dump out +* @return +* None. +*/ + +void +rte_memdump(FILE *f, const char * title, const void * buf, unsigned int len); + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_HEXDUMP_H_ */ diff --git a/lib/librte_eal/include/rte_hypervisor.h b/lib/librte_eal/include/rte_hypervisor.h new file mode 100644 index 0000000000..5fe719c1d4 --- /dev/null +++ b/lib/librte_eal/include/rte_hypervisor.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 Mellanox Technologies, Ltd + */ + +#ifndef RTE_HYPERVISOR_H +#define RTE_HYPERVISOR_H + +/** + * @file + * Hypervisor awareness. + */ + +enum rte_hypervisor { + RTE_HYPERVISOR_NONE, + RTE_HYPERVISOR_KVM, + RTE_HYPERVISOR_HYPERV, + RTE_HYPERVISOR_VMWARE, + RTE_HYPERVISOR_UNKNOWN +}; + +/** + * Get the id of hypervisor it is running on. + */ +enum rte_hypervisor +rte_hypervisor_get(void); + +/** + * Get the name of a given hypervisor id. + */ +const char * +rte_hypervisor_get_name(enum rte_hypervisor id); + +#endif /* RTE_HYPERVISOR_H */ diff --git a/lib/librte_eal/include/rte_interrupts.h b/lib/librte_eal/include/rte_interrupts.h new file mode 100644 index 0000000000..e3b406abc2 --- /dev/null +++ b/lib/librte_eal/include/rte_interrupts.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_INTERRUPTS_H_ +#define _RTE_INTERRUPTS_H_ + +#include +#include + +/** + * @file + * + * The RTE interrupt interface provides functions to register/unregister + * callbacks for a specific interrupt. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** Interrupt handle */ +struct rte_intr_handle; + +/** Function to be registered for the specific interrupt */ +typedef void (*rte_intr_callback_fn)(void *cb_arg); + +/** + * Function to call after a callback is unregistered. + * Can be used to close fd and free cb_arg. + */ +typedef void (*rte_intr_unregister_callback_fn)(struct rte_intr_handle *intr_handle, + void *cb_arg); + +#include "rte_eal_interrupts.h" + +/** + * It registers the callback for the specific interrupt. Multiple + * callbacks can be registered at the same time. + * @param intr_handle + * Pointer to the interrupt handle. + * @param cb + * callback address. + * @param cb_arg + * address of parameter for callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_callback_register(const struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb, void *cb_arg); + +/** + * It unregisters the callback according to the specified interrupt handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * @param cb + * callback address. + * @param cb_arg + * address of parameter for callback, (void *)-1 means to remove all + * registered which has the same callback address. + * + * @return + * - On success, return the number of callback entities removed. + * - On failure, a negative value. + */ +int rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb, void *cb_arg); + +/** + * Unregister the callback according to the specified interrupt handle, + * after it's no longer active. Fail if source is not active. + * + * @param intr_handle + * pointer to the interrupt handle. + * @param cb_fn + * callback address. + * @param cb_arg + * address of parameter for callback, (void *)-1 means to remove all + * registered which has the same callback address. + * @param ucb_fn + * callback to call before cb is unregistered (optional). + * can be used to close fd and free cb_arg. + * + * @return + * - On success, return the number of callback entities marked for remove. + * - On failure, a negative value. + */ +__rte_experimental +int +rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb_fn, void *cb_arg, + rte_intr_unregister_callback_fn ucb_fn); + +/** + * It enables the interrupt for the specified handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_enable(const struct rte_intr_handle *intr_handle); + +/** + * It disables the interrupt for the specified handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_disable(const struct rte_intr_handle *intr_handle); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * It acknowledges an interrupt raised for the specified handle. + * + * This function should be called at the end of each interrupt handler either + * from application or driver, so that currently raised interrupt is acked and + * further new interrupts are raised. + * + * @param intr_handle + * pointer to the interrupt handle. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +__rte_experimental +int rte_intr_ack(const struct rte_intr_handle *intr_handle); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_eal/include/rte_keepalive.h b/lib/librte_eal/include/rte_keepalive.h new file mode 100644 index 0000000000..4bda7ca56f --- /dev/null +++ b/lib/librte_eal/include/rte_keepalive.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015-2016 Intel Corporation. + */ + +/** + * @file rte_keepalive.h + * DPDK RTE LCore Keepalive Monitor. + * + **/ + +#ifndef _KEEPALIVE_H_ +#define _KEEPALIVE_H_ + +#include +#include + +#ifndef RTE_KEEPALIVE_MAXCORES +/** + * Number of cores to track. + * @note Must be larger than the highest core id. */ +#define RTE_KEEPALIVE_MAXCORES RTE_MAX_LCORE +#endif + +enum rte_keepalive_state { + RTE_KA_STATE_UNUSED = 0, + RTE_KA_STATE_ALIVE = 1, + RTE_KA_STATE_MISSING = 4, + RTE_KA_STATE_DEAD = 2, + RTE_KA_STATE_GONE = 3, + RTE_KA_STATE_DOZING = 5, + RTE_KA_STATE_SLEEP = 6 +}; + +/** + * Keepalive failure callback. + * + * Receives a data pointer passed to rte_keepalive_create() and the id of the + * failed core. + * @param data Data pointer passed to rte_keepalive_create() + * @param id_core ID of the core that has failed + */ +typedef void (*rte_keepalive_failure_callback_t)( + void *data, + const int id_core); + +/** + * Keepalive relay callback. + * + * Receives a data pointer passed to rte_keepalive_register_relay_callback(), + * the id of the core for which state is to be forwarded, and details of the + * current core state. + * @param data Data pointer passed to rte_keepalive_register_relay_callback() + * @param id_core ID of the core for which state is being reported + * @param core_state The current state of the core + * @param Timestamp of when core was last seen alive + */ +typedef void (*rte_keepalive_relay_callback_t)( + void *data, + const int id_core, + enum rte_keepalive_state core_state, + uint64_t last_seen + ); + +/** + * Keepalive state structure. + * @internal + */ +struct rte_keepalive; + +/** + * Initialise keepalive sub-system. + * @param callback + * Function called upon detection of a dead core. + * @param data + * Data pointer to be passed to function callback. + * @return + * Keepalive structure success, NULL on failure. + */ +struct rte_keepalive *rte_keepalive_create( + rte_keepalive_failure_callback_t callback, + void *data); + +/** + * Checks & handles keepalive state of monitored cores. + * @param *ptr_timer Triggering timer (unused) + * @param *ptr_data Data pointer (keepalive structure) + */ +void rte_keepalive_dispatch_pings(void *ptr_timer, void *ptr_data); + +/** + * Registers a core for keepalive checks. + * @param *keepcfg + * Keepalive structure pointer + * @param id_core + * ID number of core to register. + */ +void rte_keepalive_register_core(struct rte_keepalive *keepcfg, + const int id_core); + +/** + * Per-core keepalive check. + * @param *keepcfg + * Keepalive structure pointer + * + * This function needs to be called from within the main process loop of + * the LCore to be checked. + */ +void +rte_keepalive_mark_alive(struct rte_keepalive *keepcfg); + +/** + * Per-core sleep-time indication. + * @param *keepcfg + * Keepalive structure pointer + * + * If CPU idling is enabled, this function needs to be called from within + * the main process loop of the LCore going to sleep, in order to avoid + * the LCore being mis-detected as dead. + */ +void +rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg); + +/** + * Registers a 'live core' callback. + * + * The complement of the 'dead core' callback. This is called when a + * core is known to be alive, and is intended for cases when an app + * needs to know 'liveness' beyond just knowing when a core has died. + * + * @param *keepcfg + * Keepalive structure pointer + * @param callback + * Function called upon detection of a dead core. + * @param data + * Data pointer to be passed to function callback. + */ +void +rte_keepalive_register_relay_callback(struct rte_keepalive *keepcfg, + rte_keepalive_relay_callback_t callback, + void *data); + +#endif /* _KEEPALIVE_H_ */ diff --git a/lib/librte_eal/include/rte_launch.h b/lib/librte_eal/include/rte_launch.h new file mode 100644 index 0000000000..06a671752a --- /dev/null +++ b/lib/librte_eal/include/rte_launch.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_LAUNCH_H_ +#define _RTE_LAUNCH_H_ + +/** + * @file + * + * Launch tasks on other lcores + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * State of an lcore. + */ +enum rte_lcore_state_t { + WAIT, /**< waiting a new command */ + RUNNING, /**< executing command */ + FINISHED, /**< command executed */ +}; + +/** + * Definition of a remote launch function. + */ +typedef int (lcore_function_t)(void *); + +/** + * Launch a function on another lcore. + * + * To be executed on the MASTER lcore only. + * + * Sends a message to a slave lcore (identified by the slave_id) that + * is in the WAIT state (this is true after the first call to + * rte_eal_init()). This can be checked by first calling + * rte_eal_wait_lcore(slave_id). + * + * When the remote lcore receives the message, it switches to + * the RUNNING state, then calls the function f with argument arg. Once the + * execution is done, the remote lcore switches to a FINISHED state and + * the return value of f is stored in a local variable to be read using + * rte_eal_wait_lcore(). + * + * The MASTER lcore returns as soon as the message is sent and knows + * nothing about the completion of f. + * + * Note: This function is not designed to offer optimum + * performance. It is just a practical way to launch a function on + * another lcore at initialization time. + * + * @param f + * The function to be called. + * @param arg + * The argument for the function. + * @param slave_id + * The identifier of the lcore on which the function should be executed. + * @return + * - 0: Success. Execution of function f started on the remote lcore. + * - (-EBUSY): The remote lcore is not in a WAIT state. + */ +int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id); + +/** + * This enum indicates whether the master core must execute the handler + * launched on all logical cores. + */ +enum rte_rmt_call_master_t { + SKIP_MASTER = 0, /**< lcore handler not executed by master core. */ + CALL_MASTER, /**< lcore handler executed by master core. */ +}; + +/** + * Launch a function on all lcores. + * + * Check that each SLAVE lcore is in a WAIT state, then call + * rte_eal_remote_launch() for each lcore. + * + * @param f + * The function to be called. + * @param arg + * The argument for the function. + * @param call_master + * If call_master set to SKIP_MASTER, the MASTER lcore does not call + * the function. If call_master is set to CALL_MASTER, the function + * is also called on master before returning. In any case, the master + * lcore returns as soon as it finished its job and knows nothing + * about the completion of f on the other lcores. + * @return + * - 0: Success. Execution of function f started on all remote lcores. + * - (-EBUSY): At least one remote lcore is not in a WAIT state. In this + * case, no message is sent to any of the lcores. + */ +int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg, + enum rte_rmt_call_master_t call_master); + +/** + * Get the state of the lcore identified by slave_id. + * + * To be executed on the MASTER lcore only. + * + * @param slave_id + * The identifier of the lcore. + * @return + * The state of the lcore. + */ +enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id); + +/** + * Wait until an lcore finishes its job. + * + * To be executed on the MASTER lcore only. + * + * If the slave lcore identified by the slave_id is in a FINISHED state, + * switch to the WAIT state. If the lcore is in RUNNING state, wait until + * the lcore finishes its job and moves to the FINISHED state. + * + * @param slave_id + * The identifier of the lcore. + * @return + * - 0: If the lcore identified by the slave_id is in a WAIT state. + * - The value that was returned by the previous remote launch + * function call if the lcore identified by the slave_id was in a + * FINISHED or RUNNING state. In this case, it changes the state + * of the lcore to WAIT. + */ +int rte_eal_wait_lcore(unsigned slave_id); + +/** + * Wait until all lcores finish their jobs. + * + * To be executed on the MASTER lcore only. Issue an + * rte_eal_wait_lcore() for every lcore. The return values are + * ignored. + * + * After a call to rte_eal_mp_wait_lcore(), the caller can assume + * that all slave lcores are in a WAIT state. + */ +void rte_eal_mp_wait_lcore(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LAUNCH_H_ */ diff --git a/lib/librte_eal/include/rte_lcore.h b/lib/librte_eal/include/rte_lcore.h new file mode 100644 index 0000000000..476b8ef3a7 --- /dev/null +++ b/lib/librte_eal/include/rte_lcore.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_LCORE_H_ +#define _RTE_LCORE_H_ + +/** + * @file + * + * API for lcore and socket manipulation + * + */ +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define LCORE_ID_ANY UINT32_MAX /**< Any lcore. */ + +RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per thread "lcore id". */ +RTE_DECLARE_PER_LCORE(rte_cpuset_t, _cpuset); /**< Per thread "cpuset". */ + +/** + * Get a lcore's role. + * + * @param lcore_id + * The identifier of the lcore, which MUST be between 0 and RTE_MAX_LCORE-1. + * @return + * The role of the lcore. + */ +enum rte_lcore_role_t rte_eal_lcore_role(unsigned int lcore_id); + +/** + * Return the Application thread ID of the execution unit. + * + * Note: in most cases the lcore id returned here will also correspond + * to the processor id of the CPU on which the thread is pinned, this + * will not be the case if the user has explicitly changed the thread to + * core affinities using --lcores EAL argument e.g. --lcores '(0-3)@10' + * to run threads with lcore IDs 0, 1, 2 and 3 on physical core 10.. + * + * @return + * Logical core ID (in EAL thread) or LCORE_ID_ANY (in non-EAL thread) + */ +static inline unsigned +rte_lcore_id(void) +{ + return RTE_PER_LCORE(_lcore_id); +} + +/** + * Get the id of the master lcore + * + * @return + * the id of the master lcore + */ +unsigned int rte_get_master_lcore(void); + +/** + * Return the number of execution units (lcores) on the system. + * + * @return + * the number of execution units (lcores) on the system. + */ +unsigned int rte_lcore_count(void); + +/** + * Return the index of the lcore starting from zero. + * + * When option -c or -l is given, the index corresponds + * to the order in the list. + * For example: + * -c 0x30, lcore 4 has index 0, and 5 has index 1. + * -l 22,18 lcore 22 has index 0, and 18 has index 1. + * + * @param lcore_id + * The targeted lcore, or -1 for the current one. + * @return + * The relative index, or -1 if not enabled. + */ +int rte_lcore_index(int lcore_id); + +/** + * Return the ID of the physical socket of the logical core we are + * running on. + * @return + * the ID of current lcoreid's physical socket + */ +unsigned int rte_socket_id(void); + +/** + * Return number of physical sockets detected on the system. + * + * Note that number of nodes may not be correspondent to their physical id's: + * for example, a system may report two socket id's, but the actual socket id's + * may be 0 and 8. + * + * @return + * the number of physical sockets as recognized by EAL + */ +unsigned int +rte_socket_count(void); + +/** + * Return socket id with a particular index. + * + * This will return socket id at a particular position in list of all detected + * physical socket id's. For example, on a machine with sockets [0, 8], passing + * 1 as a parameter will return 8. + * + * @param idx + * index of physical socket id to return + * + * @return + * - physical socket id as recognized by EAL + * - -1 on error, with errno set to EINVAL + */ +int +rte_socket_id_by_idx(unsigned int idx); + +/** + * Get the ID of the physical socket of the specified lcore + * + * @param lcore_id + * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. + * @return + * the ID of lcoreid's physical socket + */ +unsigned int +rte_lcore_to_socket_id(unsigned int lcore_id); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Return the id of the lcore on a socket starting from zero. + * + * @param lcore_id + * The targeted lcore, or -1 for the current one. + * @return + * The relative index, or -1 if not enabled. + */ +__rte_experimental +int +rte_lcore_to_cpu_id(int lcore_id); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Return the cpuset for a given lcore. + * @param lcore_id + * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. + * @return + * The cpuset of that lcore + */ +__rte_experimental +rte_cpuset_t +rte_lcore_cpuset(unsigned int lcore_id); + +/** + * Test if an lcore is enabled. + * + * @param lcore_id + * The identifier of the lcore, which MUST be between 0 and + * RTE_MAX_LCORE-1. + * @return + * True if the given lcore is enabled; false otherwise. + */ +int rte_lcore_is_enabled(unsigned int lcore_id); + +/** + * Get the next enabled lcore ID. + * + * @param i + * The current lcore (reference). + * @param skip_master + * If true, do not return the ID of the master lcore. + * @param wrap + * If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise, + * return RTE_MAX_LCORE. + * @return + * The next lcore_id or RTE_MAX_LCORE if not found. + */ +unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap); + +/** + * Macro to browse all running lcores. + */ +#define RTE_LCORE_FOREACH(i) \ + for (i = rte_get_next_lcore(-1, 0, 0); \ + i +#include +#include +#include +#include + +#include +#include +#include + +struct rte_log_dynamic_type; + +/** The rte_log structure. */ +struct rte_logs { + uint32_t type; /**< Bitfield with enabled logs. */ + uint32_t level; /**< Log level. */ + FILE *file; /**< Output file set by rte_openlog_stream, or NULL. */ + size_t dynamic_types_len; + struct rte_log_dynamic_type *dynamic_types; +}; + +/** Global log information */ +extern struct rte_logs rte_logs; + +/* SDK log type */ +#define RTE_LOGTYPE_EAL 0 /**< Log related to eal. */ +#define RTE_LOGTYPE_MALLOC 1 /**< Log related to malloc. */ +#define RTE_LOGTYPE_RING 2 /**< Log related to ring. */ +#define RTE_LOGTYPE_MEMPOOL 3 /**< Log related to mempool. */ +#define RTE_LOGTYPE_TIMER 4 /**< Log related to timers. */ +#define RTE_LOGTYPE_PMD 5 /**< Log related to poll mode driver. */ +#define RTE_LOGTYPE_HASH 6 /**< Log related to hash table. */ +#define RTE_LOGTYPE_LPM 7 /**< Log related to LPM. */ +#define RTE_LOGTYPE_KNI 8 /**< Log related to KNI. */ +#define RTE_LOGTYPE_ACL 9 /**< Log related to ACL. */ +#define RTE_LOGTYPE_POWER 10 /**< Log related to power. */ +#define RTE_LOGTYPE_METER 11 /**< Log related to QoS meter. */ +#define RTE_LOGTYPE_SCHED 12 /**< Log related to QoS port scheduler. */ +#define RTE_LOGTYPE_PORT 13 /**< Log related to port. */ +#define RTE_LOGTYPE_TABLE 14 /**< Log related to table. */ +#define RTE_LOGTYPE_PIPELINE 15 /**< Log related to pipeline. */ +#define RTE_LOGTYPE_MBUF 16 /**< Log related to mbuf. */ +#define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */ +#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */ +#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */ +#define RTE_LOGTYPE_GSO 20 /**< Log related to GSO. */ + +/* these log types can be used in an application */ +#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */ +#define RTE_LOGTYPE_USER2 25 /**< User-defined log type 2. */ +#define RTE_LOGTYPE_USER3 26 /**< User-defined log type 3. */ +#define RTE_LOGTYPE_USER4 27 /**< User-defined log type 4. */ +#define RTE_LOGTYPE_USER5 28 /**< User-defined log type 5. */ +#define RTE_LOGTYPE_USER6 29 /**< User-defined log type 6. */ +#define RTE_LOGTYPE_USER7 30 /**< User-defined log type 7. */ +#define RTE_LOGTYPE_USER8 31 /**< User-defined log type 8. */ + +/** First identifier for extended logs */ +#define RTE_LOGTYPE_FIRST_EXT_ID 32 + +/* Can't use 0, as it gives compiler warnings */ +#define RTE_LOG_EMERG 1U /**< System is unusable. */ +#define RTE_LOG_ALERT 2U /**< Action must be taken immediately. */ +#define RTE_LOG_CRIT 3U /**< Critical conditions. */ +#define RTE_LOG_ERR 4U /**< Error conditions. */ +#define RTE_LOG_WARNING 5U /**< Warning conditions. */ +#define RTE_LOG_NOTICE 6U /**< Normal but significant condition. */ +#define RTE_LOG_INFO 7U /**< Informational. */ +#define RTE_LOG_DEBUG 8U /**< Debug-level messages. */ + +/** + * Change the stream that will be used by the logging system. + * + * This can be done at any time. The f argument represents the stream + * to be used to send the logs. If f is NULL, the default output is + * used (stderr). + * + * @param f + * Pointer to the stream. + * @return + * - 0 on success. + * - Negative on error. + */ +int rte_openlog_stream(FILE *f); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Retrieve the stream used by the logging system (see rte_openlog_stream() + * to change it). + * + * @return + * Pointer to the stream. + */ +__rte_experimental +FILE *rte_log_get_stream(void); + +/** + * Set the global log level. + * + * After this call, logs with a level lower or equal than the level + * passed as argument will be displayed. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + */ +void rte_log_set_global_level(uint32_t level); + +/** + * Get the global log level. + * + * @return + * The current global log level. + */ +uint32_t rte_log_get_global_level(void); + +/** + * Get the log level for a given type. + * + * @param logtype + * The log type identifier. + * @return + * 0 on success, a negative value if logtype is invalid. + */ +int rte_log_get_level(uint32_t logtype); + +/** + * For a given `logtype`, check if a log with `loglevel` can be printed. + * + * @param logtype + * The log type identifier + * @param loglevel + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + * @return + * Returns 'true' if log can be printed and 'false' if it can't. + */ +__rte_experimental +bool rte_log_can_log(uint32_t logtype, uint32_t loglevel); + +/** + * Set the log level for a given type based on shell pattern. + * + * @param pattern + * The match pattern identifying the log type. + * @param level + * The level to be set. + * @return + * 0 on success, a negative value if level is invalid. + */ +int rte_log_set_level_pattern(const char *pattern, uint32_t level); + +/** + * Set the log level for a given type based on regular expression. + * + * @param regex + * The regular expression identifying the log type. + * @param level + * The level to be set. + * @return + * 0 on success, a negative value if level is invalid. + */ +int rte_log_set_level_regexp(const char *regex, uint32_t level); + +/** + * Set the log level for a given type. + * + * @param logtype + * The log type identifier. + * @param level + * The level to be set. + * @return + * 0 on success, a negative value if logtype or level is invalid. + */ +int rte_log_set_level(uint32_t logtype, uint32_t level); + +/** + * Get the current loglevel for the message being processed. + * + * Before calling the user-defined stream for logging, the log + * subsystem sets a per-lcore variable containing the loglevel and the + * logtype of the message being processed. This information can be + * accessed by the user-defined log output function through this + * function. + * + * @return + * The loglevel of the message being processed. + */ +int rte_log_cur_msg_loglevel(void); + +/** + * Get the current logtype for the message being processed. + * + * Before calling the user-defined stream for logging, the log + * subsystem sets a per-lcore variable containing the loglevel and the + * logtype of the message being processed. This information can be + * accessed by the user-defined log output function through this + * function. + * + * @return + * The logtype of the message being processed. + */ +int rte_log_cur_msg_logtype(void); + +/** + * Register a dynamic log type + * + * If a log is already registered with the same type, the returned value + * is the same than the previous one. + * + * @param name + * The string identifying the log type. + * @return + * - >0: success, the returned value is the log type identifier. + * - (-ENOMEM): cannot allocate memory. + */ +int rte_log_register(const char *name); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Register a dynamic log type and try to pick its level from EAL options + * + * rte_log_register() is called inside. If successful, the function tries + * to search for matching regexp in the list of EAL log level options and + * pick the level from the last matching entry. If nothing can be applied + * from the list, the level will be set to the user-defined default value. + * + * @param name + * Name for the log type to be registered + * @param level_def + * Fallback level to be set if the global list has no matching options + * @return + * - >=0: the newly registered log type + * - <0: rte_log_register() error value + */ +__rte_experimental +int rte_log_register_type_and_pick_level(const char *name, uint32_t level_def); + +/** + * Dump log information. + * + * Dump the global level and the registered log types. + * + * @param f + * The output stream where the dump should be sent. + */ +void rte_log_dump(FILE *f); + +/** + * Generates a log message. + * + * The message will be sent in the stream defined by the previous call + * to rte_openlog_stream(). + * + * The level argument determines if the log should be displayed or + * not, depending on the global rte_logs variable. + * + * The preferred alternative is the RTE_LOG() because it adds the + * level and type in the logged string. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + * @param logtype + * The log type, for example, RTE_LOGTYPE_EAL. + * @param format + * The format string, as in printf(3), followed by the variable arguments + * required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +int rte_log(uint32_t level, uint32_t logtype, const char *format, ...) +#ifdef __GNUC__ +#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2)) + __attribute__((cold)) +#endif +#endif + __rte_format_printf(3, 4); + +/** + * Generates a log message. + * + * The message will be sent in the stream defined by the previous call + * to rte_openlog_stream(). + * + * The level argument determines if the log should be displayed or + * not, depending on the global rte_logs variable. A trailing + * newline may be added if needed. + * + * The preferred alternative is the RTE_LOG() because it adds the + * level and type in the logged string. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + * @param logtype + * The log type, for example, RTE_LOGTYPE_EAL. + * @param format + * The format string, as in printf(3), followed by the variable arguments + * required by the format. + * @param ap + * The va_list of the variable arguments required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap) + __rte_format_printf(3, 0); + +/** + * Generates a log message. + * + * The RTE_LOG() is a helper that prefixes the string with the log level + * and type, and call rte_log(). + * + * @param l + * Log level. A value between EMERG (1) and DEBUG (8). The short name is + * expanded by the macro, so it cannot be an integer value. + * @param t + * The log type, for example, EAL. The short name is expanded by the + * macro, so it cannot be an integer value. + * @param ... + * The fmt string, as in printf(3), followed by the variable arguments + * required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +#define RTE_LOG(l, t, ...) \ + rte_log(RTE_LOG_ ## l, \ + RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) + +/** + * Generates a log message for data path. + * + * Similar to RTE_LOG(), except that it is removed at compilation time + * if the RTE_LOG_DP_LEVEL configuration option is lower than the log + * level argument. + * + * @param l + * Log level. A value between EMERG (1) and DEBUG (8). The short name is + * expanded by the macro, so it cannot be an integer value. + * @param t + * The log type, for example, EAL. The short name is expanded by the + * macro, so it cannot be an integer value. + * @param ... + * The fmt string, as in printf(3), followed by the variable arguments + * required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +#define RTE_LOG_DP(l, t, ...) \ + (void)((RTE_LOG_ ## l <= RTE_LOG_DP_LEVEL) ? \ + rte_log(RTE_LOG_ ## l, \ + RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \ + 0) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LOG_H_ */ diff --git a/lib/librte_eal/include/rte_malloc.h b/lib/librte_eal/include/rte_malloc.h new file mode 100644 index 0000000000..42ca05182f --- /dev/null +++ b/lib/librte_eal/include/rte_malloc.h @@ -0,0 +1,560 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2019 Intel Corporation + */ + +#ifndef _RTE_MALLOC_H_ +#define _RTE_MALLOC_H_ + +/** + * @file + * RTE Malloc. This library provides methods for dynamically allocating memory + * from hugepages. + */ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Structure to hold heap statistics obtained from rte_malloc_get_socket_stats function. + */ +struct rte_malloc_socket_stats { + size_t heap_totalsz_bytes; /**< Total bytes on heap */ + size_t heap_freesz_bytes; /**< Total free bytes on heap */ + size_t greatest_free_size; /**< Size in bytes of largest free block */ + unsigned free_count; /**< Number of free elements on heap */ + unsigned alloc_count; /**< Number of allocated elements on heap */ + size_t heap_allocsz_bytes; /**< Total allocated bytes on heap */ +}; + +/** + * This function allocates memory from the huge-page area of memory. The memory + * is not cleared. In NUMA systems, the memory allocated resides on the same + * NUMA socket as the core that calls this function. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_malloc(const char *type, size_t size, unsigned align); + +/** + * Allocate zero'ed memory from the heap. + * + * Equivalent to rte_malloc() except that the memory zone is + * initialised with zeros. In NUMA systems, the memory allocated resides on the + * same NUMA socket as the core that calls this function. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_zmalloc(const char *type, size_t size, unsigned align); + +/** + * Replacement function for calloc(), using huge-page memory. Memory area is + * initialised with zeros. In NUMA systems, the memory allocated resides on the + * same NUMA socket as the core that calls this function. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param num + * Number of elements to be allocated. + * @param size + * Size (in bytes) of a single element. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_calloc(const char *type, size_t num, size_t size, unsigned align); + +/** + * Replacement function for realloc(), using huge-page memory. Reserved area + * memory is resized, preserving contents. In NUMA systems, the new area + * may not reside on the same NUMA node as the old one. + * + * @param ptr + * Pointer to already allocated memory + * @param size + * Size (in bytes) of new area. If this is 0, memory is freed. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the reallocated memory. + */ +void * +rte_realloc(void *ptr, size_t size, unsigned int align); + +/** + * Replacement function for realloc(), using huge-page memory. Reserved area + * memory is resized, preserving contents. In NUMA systems, the new area + * resides on requested NUMA socket. + * + * @param ptr + * Pointer to already allocated memory + * @param size + * Size (in bytes) of new area. If this is 0, memory is freed. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @param socket + * NUMA socket to allocate memory on. + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the reallocated memory. + */ +__rte_experimental +void * +rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket); + +/** + * This function allocates memory from the huge-page area of memory. The memory + * is not cleared. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @param socket + * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function + * will behave the same as rte_malloc(). + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_malloc_socket(const char *type, size_t size, unsigned align, int socket); + +/** + * Allocate zero'ed memory from the heap. + * + * Equivalent to rte_malloc() except that the memory zone is + * initialised with zeros. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @param socket + * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function + * will behave the same as rte_zmalloc(). + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket); + +/** + * Replacement function for calloc(), using huge-page memory. Memory area is + * initialised with zeros. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param num + * Number of elements to be allocated. + * @param size + * Size (in bytes) of a single element. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @param socket + * NUMA socket to allocate memory on. If SOCKET_ID_ANY is used, this function + * will behave the same as rte_calloc(). + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_calloc_socket(const char *type, size_t num, size_t size, unsigned align, int socket); + +/** + * Frees the memory space pointed to by the provided pointer. + * + * This pointer must have been returned by a previous call to + * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of + * rte_free() is undefined if the pointer does not match this requirement. + * + * If the pointer is NULL, the function does nothing. + * + * @param ptr + * The pointer to memory to be freed. + */ +void +rte_free(void *ptr); + +/** + * If malloc debug is enabled, check a memory block for header + * and trailer markers to indicate that all is well with the block. + * If size is non-null, also return the size of the block. + * + * @param ptr + * pointer to the start of a data block, must have been returned + * by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc() + * or rte_realloc() + * @param size + * if non-null, and memory block pointer is valid, returns the size + * of the memory block + * @return + * -1 on error, invalid pointer passed or header and trailer markers + * are missing or corrupted + * 0 on success + */ +int +rte_malloc_validate(const void *ptr, size_t *size); + +/** + * Get heap statistics for the specified heap. + * + * @note This function is not thread-safe with respect to + * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. + * + * @param socket + * An unsigned integer specifying the socket to get heap statistics for + * @param socket_stats + * A structure which provides memory to store statistics + * @return + * Null on error + * Pointer to structure storing statistics on success + */ +int +rte_malloc_get_socket_stats(int socket, + struct rte_malloc_socket_stats *socket_stats); + +/** + * Add memory chunk to a heap with specified name. + * + * @note Multiple memory chunks can be added to the same heap + * + * @note Before accessing this memory in other processes, it needs to be + * attached in each of those processes by calling + * ``rte_malloc_heap_memory_attach`` in each other process. + * + * @note Memory must be previously allocated for DPDK to be able to use it as a + * malloc heap. Failing to do so will result in undefined behavior, up to and + * including segmentation faults. + * + * @note Calling this function will erase any contents already present at the + * supplied memory address. + * + * @param heap_name + * Name of the heap to add memory chunk to + * @param va_addr + * Start of virtual area to add to the heap. Must be aligned by ``page_sz``. + * @param len + * Length of virtual area to add to the heap. Must be aligned by ``page_sz``. + * @param iova_addrs + * Array of page IOVA addresses corresponding to each page in this memory + * area. Can be NULL, in which case page IOVA addresses will be set to + * RTE_BAD_IOVA. + * @param n_pages + * Number of elements in the iova_addrs array. Ignored if ``iova_addrs`` + * is NULL. + * @param page_sz + * Page size of the underlying memory + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * EPERM - attempted to add memory to a reserved heap + * ENOSPC - no more space in internal config to store a new memory chunk + */ +__rte_experimental +int +rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len, + rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz); + +/** + * Remove memory chunk from heap with specified name. + * + * @note Memory chunk being removed must be the same as one that was added; + * partially removing memory chunks is not supported + * + * @note Memory area must not contain any allocated elements to allow its + * removal from the heap + * + * @note All other processes must detach from the memory chunk prior to it being + * removed from the heap. + * + * @param heap_name + * Name of the heap to remove memory from + * @param va_addr + * Virtual address to remove from the heap + * @param len + * Length of virtual area to remove from the heap + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * EPERM - attempted to remove memory from a reserved heap + * ENOENT - heap or memory chunk was not found + * EBUSY - memory chunk still contains data + */ +__rte_experimental +int +rte_malloc_heap_memory_remove(const char *heap_name, void *va_addr, size_t len); + +/** + * Attach to an already existing chunk of external memory in another process. + * + * @note This function must be called before any attempt is made to use an + * already existing external memory chunk. This function does *not* need to + * be called if a call to ``rte_malloc_heap_memory_add`` was made in the + * current process. + * + * @param heap_name + * Heap name to which this chunk of memory belongs + * @param va_addr + * Start address of memory chunk to attach to + * @param len + * Length of memory chunk to attach to + * @return + * 0 on successful attach + * -1 on unsuccessful attach, with rte_errno set to indicate cause for error: + * EINVAL - one of the parameters was invalid + * EPERM - attempted to attach memory to a reserved heap + * ENOENT - heap or memory chunk was not found + */ +__rte_experimental +int +rte_malloc_heap_memory_attach(const char *heap_name, void *va_addr, size_t len); + +/** + * Detach from a chunk of external memory in secondary process. + * + * @note This function must be called in before any attempt is made to remove + * external memory from the heap in another process. This function does *not* + * need to be called if a call to ``rte_malloc_heap_memory_remove`` will be + * called in current process. + * + * @param heap_name + * Heap name to which this chunk of memory belongs + * @param va_addr + * Start address of memory chunk to attach to + * @param len + * Length of memory chunk to attach to + * @return + * 0 on successful detach + * -1 on unsuccessful detach, with rte_errno set to indicate cause for error: + * EINVAL - one of the parameters was invalid + * EPERM - attempted to detach memory from a reserved heap + * ENOENT - heap or memory chunk was not found + */ +__rte_experimental +int +rte_malloc_heap_memory_detach(const char *heap_name, void *va_addr, size_t len); + +/** + * Creates a new empty malloc heap with a specified name. + * + * @note Heaps created via this call will automatically get assigned a unique + * socket ID, which can be found using ``rte_malloc_heap_get_socket()`` + * + * @param heap_name + * Name of the heap to create. + * + * @return + * - 0 on successful creation + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - ``heap_name`` was NULL, empty or too long + * EEXIST - heap by name of ``heap_name`` already exists + * ENOSPC - no more space in internal config to store a new heap + */ +__rte_experimental +int +rte_malloc_heap_create(const char *heap_name); + +/** + * Destroys a previously created malloc heap with specified name. + * + * @note This function will return a failure result if not all memory allocated + * from the heap has been freed back to the heap + * + * @note This function will return a failure result if not all memory segments + * were removed from the heap prior to its destruction + * + * @param heap_name + * Name of the heap to create. + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - ``heap_name`` was NULL, empty or too long + * ENOENT - heap by the name of ``heap_name`` was not found + * EPERM - attempting to destroy reserved heap + * EBUSY - heap still contains data + */ +__rte_experimental +int +rte_malloc_heap_destroy(const char *heap_name); + +/** + * Find socket ID corresponding to a named heap. + * + * @param name + * Heap name to find socket ID for + * @return + * Socket ID in case of success (a non-negative number) + * -1 in case of error, with rte_errno set to one of the following: + * EINVAL - ``name`` was NULL + * ENOENT - heap identified by the name ``name`` was not found + */ +__rte_experimental +int +rte_malloc_heap_get_socket(const char *name); + +/** + * Check if a given socket ID refers to externally allocated memory. + * + * @note Passing SOCKET_ID_ANY will return 0. + * + * @param socket_id + * Socket ID to check + * @return + * 1 if socket ID refers to externally allocated memory + * 0 if socket ID refers to internal DPDK memory + * -1 if socket ID is invalid + */ +__rte_experimental +int +rte_malloc_heap_socket_is_external(int socket_id); + +/** + * Dump statistics. + * + * Dump for the specified type to a file. If the type argument is + * NULL, all memory types will be dumped. + * + * @note This function is not thread-safe with respect to + * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. + * + * @param f + * A pointer to a file for output + * @param type + * A string identifying the type of objects to dump, or NULL + * to dump all objects. + */ +void +rte_malloc_dump_stats(FILE *f, const char *type); + +/** + * Dump contents of all malloc heaps to a file. + * + * @note This function is not thread-safe with respect to + * ``rte_malloc_heap_create()``/``rte_malloc_heap_destroy()`` functions. + * + * @param f + * A pointer to a file for output + */ +__rte_experimental +void +rte_malloc_dump_heaps(FILE *f); + +/** + * Set the maximum amount of allocated memory for this type. + * + * This is not yet implemented + * + * @param type + * A string identifying the type of allocated objects. + * @param max + * The maximum amount of allocated bytes for this type. + * @return + * - 0: Success. + * - (-1): Error. + */ +__rte_deprecated +int +rte_malloc_set_limit(const char *type, size_t max); + +/** + * Return the IO address of a virtual address obtained through + * rte_malloc + * + * @param addr + * Address obtained from a previous rte_malloc call + * @return + * RTE_BAD_IOVA on error + * otherwise return an address suitable for IO + */ +rte_iova_t +rte_malloc_virt2iova(const void *addr); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MALLOC_H_ */ diff --git a/lib/librte_eal/include/rte_memory.h b/lib/librte_eal/include/rte_memory.h new file mode 100644 index 0000000000..3d8d0bd697 --- /dev/null +++ b/lib/librte_eal/include/rte_memory.h @@ -0,0 +1,784 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_MEMORY_H_ +#define _RTE_MEMORY_H_ + +/** + * @file + * + * Memory-related RTE API. + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +__extension__ +enum rte_page_sizes { + RTE_PGSIZE_4K = 1ULL << 12, + RTE_PGSIZE_64K = 1ULL << 16, + RTE_PGSIZE_256K = 1ULL << 18, + RTE_PGSIZE_2M = 1ULL << 21, + RTE_PGSIZE_16M = 1ULL << 24, + RTE_PGSIZE_256M = 1ULL << 28, + RTE_PGSIZE_512M = 1ULL << 29, + RTE_PGSIZE_1G = 1ULL << 30, + RTE_PGSIZE_4G = 1ULL << 32, + RTE_PGSIZE_16G = 1ULL << 34, +}; + +#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ + +/** + * Physical memory segment descriptor. + */ +#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0) +/**< Prevent this segment from being freed back to the OS. */ +struct rte_memseg { + RTE_STD_C11 + union { + phys_addr_t phys_addr; /**< deprecated - Start physical address. */ + rte_iova_t iova; /**< Start IO address. */ + }; + RTE_STD_C11 + union { + void *addr; /**< Start virtual address. */ + uint64_t addr_64; /**< Makes sure addr is always 64 bits */ + }; + size_t len; /**< Length of the segment. */ + uint64_t hugepage_sz; /**< The pagesize of underlying memory */ + int32_t socket_id; /**< NUMA socket ID. */ + uint32_t nchannel; /**< Number of channels. */ + uint32_t nrank; /**< Number of ranks. */ + uint32_t flags; /**< Memseg-specific flags */ +} __rte_packed; + +/** + * memseg list is a special case as we need to store a bunch of other data + * together with the array itself. + */ +struct rte_memseg_list { + RTE_STD_C11 + union { + void *base_va; + /**< Base virtual address for this memseg list. */ + uint64_t addr_64; + /**< Makes sure addr is always 64-bits */ + }; + uint64_t page_sz; /**< Page size for all memsegs in this list. */ + int socket_id; /**< Socket ID for all memsegs in this list. */ + volatile uint32_t version; /**< version number for multiprocess sync. */ + size_t len; /**< Length of memory area covered by this memseg list. */ + unsigned int external; /**< 1 if this list points to external memory */ + unsigned int heap; /**< 1 if this list points to a heap */ + struct rte_fbarray memseg_arr; +}; + +/** + * Lock page in physical memory and prevent from swapping. + * + * @param virt + * The virtual address. + * @return + * 0 on success, negative on error. + */ +int rte_mem_lock_page(const void *virt); + +/** + * Get physical address of any mapped virtual address in the current process. + * It is found by browsing the /proc/self/pagemap special file. + * The page must be locked. + * + * @param virt + * The virtual address. + * @return + * The physical address or RTE_BAD_IOVA on error. + */ +phys_addr_t rte_mem_virt2phy(const void *virt); + +/** + * Get IO virtual address of any mapped virtual address in the current process. + * + * @note This function will not check internal page table. Instead, in IOVA as + * PA mode, it will fall back to getting real physical address (which may + * not match the expected IOVA, such as what was specified for external + * memory). + * + * @param virt + * The virtual address. + * @return + * The IO address or RTE_BAD_IOVA on error. + */ +rte_iova_t rte_mem_virt2iova(const void *virt); + +/** + * Get virtual memory address corresponding to iova address. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @param iova + * The iova address. + * @return + * Virtual address corresponding to iova address (or NULL if address does not + * exist within DPDK memory map). + */ +__rte_experimental +void * +rte_mem_iova2virt(rte_iova_t iova); + +/** + * Get memseg to which a particular virtual address belongs. + * + * @param virt + * The virtual address. + * @param msl + * The memseg list in which to look up based on ``virt`` address + * (can be NULL). + * @return + * Memseg pointer on success, or NULL on error. + */ +__rte_experimental +struct rte_memseg * +rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl); + +/** + * Get memseg list corresponding to virtual memory address. + * + * @param virt + * The virtual address. + * @return + * Memseg list to which this virtual address belongs to. + */ +__rte_experimental +struct rte_memseg_list * +rte_mem_virt2memseg_list(const void *virt); + +/** + * Memseg walk function prototype. + * + * Returning 0 will continue walk + * Returning 1 will stop the walk + * Returning -1 will stop the walk and report error + */ +typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, void *arg); + +/** + * Memseg contig walk function prototype. This will trigger a callback on every + * VA-contiguous area starting at memseg ``ms``, so total valid VA space at each + * callback call will be [``ms->addr``, ``ms->addr + len``). + * + * Returning 0 will continue walk + * Returning 1 will stop the walk + * Returning -1 will stop the walk and report error + */ +typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg); + +/** + * Memseg list walk function prototype. This will trigger a callback on every + * allocated memseg list. + * + * Returning 0 will continue walk + * Returning 1 will stop the walk + * Returning -1 will stop the walk and report error + */ +typedef int (*rte_memseg_list_walk_t)(const struct rte_memseg_list *msl, + void *arg); + +/** + * Walk list of all memsegs. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @note This function will also walk through externally allocated segments. It + * is up to the user to decide whether to skip through these segments. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_walk(rte_memseg_walk_t func, void *arg); + +/** + * Walk each VA-contiguous area. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @note This function will also walk through externally allocated segments. It + * is up to the user to decide whether to skip through these segments. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg); + +/** + * Walk each allocated memseg list. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @note This function will also walk through externally allocated segments. It + * is up to the user to decide whether to skip through these segments. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg); + +/** + * Walk list of all memsegs without performing any locking. + * + * @note This function does not perform any locking, and is only safe to call + * from within memory-related callback functions. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg); + +/** + * Walk each VA-contiguous area without performing any locking. + * + * @note This function does not perform any locking, and is only safe to call + * from within memory-related callback functions. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg); + +/** + * Walk each allocated memseg list without performing any locking. + * + * @note This function does not perform any locking, and is only safe to call + * from within memory-related callback functions. + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + * @return + * 0 if walked over the entire list + * 1 if stopped by the user + * -1 if user function reported error + */ +__rte_experimental +int +rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg); + +/** + * Return file descriptor associated with a particular memseg (if available). + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @note This returns an internal file descriptor. Performing any operations on + * this file descriptor is inherently dangerous, so it should be treated + * as read-only for all intents and purposes. + * + * @param ms + * A pointer to memseg for which to get file descriptor. + * + * @return + * Valid file descriptor in case of success. + * -1 in case of error, with ``rte_errno`` set to the following values: + * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg + * - ENODEV - ``ms`` fd is not available + * - ENOENT - ``ms`` is an unused segment + * - ENOTSUP - segment fd's are not supported + */ +__rte_experimental +int +rte_memseg_get_fd(const struct rte_memseg *ms); + +/** + * Return file descriptor associated with a particular memseg (if available). + * + * @note This function does not perform any locking, and is only safe to call + * from within memory-related callback functions. + * + * @note This returns an internal file descriptor. Performing any operations on + * this file descriptor is inherently dangerous, so it should be treated + * as read-only for all intents and purposes. + * + * @param ms + * A pointer to memseg for which to get file descriptor. + * + * @return + * Valid file descriptor in case of success. + * -1 in case of error, with ``rte_errno`` set to the following values: + * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg + * - ENODEV - ``ms`` fd is not available + * - ENOENT - ``ms`` is an unused segment + * - ENOTSUP - segment fd's are not supported + */ +__rte_experimental +int +rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms); + +/** + * Get offset into segment file descriptor associated with a particular memseg + * (if available). + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @param ms + * A pointer to memseg for which to get file descriptor. + * @param offset + * A pointer to offset value where the result will be stored. + * + * @return + * Valid file descriptor in case of success. + * -1 in case of error, with ``rte_errno`` set to the following values: + * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg + * - EINVAL - ``offset`` pointer was NULL + * - ENODEV - ``ms`` fd is not available + * - ENOENT - ``ms`` is an unused segment + * - ENOTSUP - segment fd's are not supported + */ +__rte_experimental +int +rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset); + +/** + * Get offset into segment file descriptor associated with a particular memseg + * (if available). + * + * @note This function does not perform any locking, and is only safe to call + * from within memory-related callback functions. + * + * @param ms + * A pointer to memseg for which to get file descriptor. + * @param offset + * A pointer to offset value where the result will be stored. + * + * @return + * Valid file descriptor in case of success. + * -1 in case of error, with ``rte_errno`` set to the following values: + * - EINVAL - ``ms`` pointer was NULL or did not point to a valid memseg + * - EINVAL - ``offset`` pointer was NULL + * - ENODEV - ``ms`` fd is not available + * - ENOENT - ``ms`` is an unused segment + * - ENOTSUP - segment fd's are not supported + */ +__rte_experimental +int +rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, + size_t *offset); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Register external memory chunk with DPDK. + * + * @note Using this API is mutually exclusive with ``rte_malloc`` family of + * API's. + * + * @note This API will not perform any DMA mapping. It is expected that user + * will do that themselves. + * + * @note Before accessing this memory in other processes, it needs to be + * attached in each of those processes by calling ``rte_extmem_attach`` in + * each other process. + * + * @param va_addr + * Start of virtual area to register. Must be aligned by ``page_sz``. + * @param len + * Length of virtual area to register. Must be aligned by ``page_sz``. + * @param iova_addrs + * Array of page IOVA addresses corresponding to each page in this memory + * area. Can be NULL, in which case page IOVA addresses will be set to + * RTE_BAD_IOVA. + * @param n_pages + * Number of elements in the iova_addrs array. Ignored if ``iova_addrs`` + * is NULL. + * @param page_sz + * Page size of the underlying memory + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * EEXIST - memory chunk is already registered + * ENOSPC - no more space in internal config to store a new memory chunk + */ +__rte_experimental +int +rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[], + unsigned int n_pages, size_t page_sz); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Unregister external memory chunk with DPDK. + * + * @note Using this API is mutually exclusive with ``rte_malloc`` family of + * API's. + * + * @note This API will not perform any DMA unmapping. It is expected that user + * will do that themselves. + * + * @note Before calling this function, all other processes must call + * ``rte_extmem_detach`` to detach from the memory area. + * + * @param va_addr + * Start of virtual area to unregister + * @param len + * Length of virtual area to unregister + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * ENOENT - memory chunk was not found + */ +__rte_experimental +int +rte_extmem_unregister(void *va_addr, size_t len); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Attach to external memory chunk registered in another process. + * + * @note Using this API is mutually exclusive with ``rte_malloc`` family of + * API's. + * + * @note This API will not perform any DMA mapping. It is expected that user + * will do that themselves. + * + * @param va_addr + * Start of virtual area to register + * @param len + * Length of virtual area to register + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * ENOENT - memory chunk was not found + */ +__rte_experimental +int +rte_extmem_attach(void *va_addr, size_t len); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Detach from external memory chunk registered in another process. + * + * @note Using this API is mutually exclusive with ``rte_malloc`` family of + * API's. + * + * @note This API will not perform any DMA unmapping. It is expected that user + * will do that themselves. + * + * @param va_addr + * Start of virtual area to unregister + * @param len + * Length of virtual area to unregister + * + * @return + * - 0 on success + * - -1 in case of error, with rte_errno set to one of the following: + * EINVAL - one of the parameters was invalid + * ENOENT - memory chunk was not found + */ +__rte_experimental +int +rte_extmem_detach(void *va_addr, size_t len); + +/** + * Dump the physical memory layout to a file. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @param f + * A pointer to a file for output + */ +void rte_dump_physmem_layout(FILE *f); + +/** + * Get the total amount of available physical memory. + * + * @note This function read-locks the memory hotplug subsystem, and thus cannot + * be used within memory-related callback functions. + * + * @return + * The total amount of available physical memory in bytes. + */ +uint64_t rte_eal_get_physmem_size(void); + +/** + * Get the number of memory channels. + * + * @return + * The number of memory channels on the system. The value is 0 if unknown + * or not the same on all devices. + */ +unsigned rte_memory_get_nchannel(void); + +/** + * Get the number of memory ranks. + * + * @return + * The number of memory ranks on the system. The value is 0 if unknown or + * not the same on all devices. + */ +unsigned rte_memory_get_nrank(void); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Check if all currently allocated memory segments are compliant with + * supplied DMA address width. + * + * @param maskbits + * Address width to check against. + */ +__rte_experimental +int rte_mem_check_dma_mask(uint8_t maskbits); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Check if all currently allocated memory segments are compliant with + * supplied DMA address width. This function will use + * rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying + * memory_hotplug_lock will not be acquired avoiding deadlock during + * memory initialization. + * + * This function is just for EAL core memory internal use. Drivers should + * use the previous rte_mem_check_dma_mask. + * + * @param maskbits + * Address width to check against. + */ +__rte_experimental +int rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Set dma mask to use once memory initialization is done. Previous functions + * rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be + * used safely until memory has been initialized. + */ +__rte_experimental +void rte_mem_set_dma_mask(uint8_t maskbits); + +/** + * Drivers based on uio will not load unless physical + * addresses are obtainable. It is only possible to get + * physical addresses when running as a privileged user. + * + * @return + * 1 if the system is able to obtain physical addresses. + * 0 if using DMA addresses through an IOMMU. + */ +int rte_eal_using_phys_addrs(void); + + +/** + * Enum indicating which kind of memory event has happened. Used by callbacks to + * distinguish between memory allocations and deallocations. + */ +enum rte_mem_event { + RTE_MEM_EVENT_ALLOC = 0, /**< Allocation event. */ + RTE_MEM_EVENT_FREE, /**< Deallocation event. */ +}; +#define RTE_MEM_EVENT_CALLBACK_NAME_LEN 64 +/**< maximum length of callback name */ + +/** + * Function typedef used to register callbacks for memory events. + */ +typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type, + const void *addr, size_t len, void *arg); + +/** + * Function used to register callbacks for memory events. + * + * @note callbacks will happen while memory hotplug subsystem is write-locked, + * therefore some functions (e.g. `rte_memseg_walk()`) will cause a + * deadlock when called from within such callbacks. + * + * @note mem event callbacks not being supported is an expected error condition, + * so user code needs to handle this situation. In these cases, return + * value will be -1, and rte_errno will be set to ENOTSUP. + * + * @param name + * Name associated with specified callback to be added to the list. + * + * @param clb + * Callback function pointer. + * + * @param arg + * Argument to pass to the callback. + * + * @return + * 0 on successful callback register + * -1 on unsuccessful callback register, with rte_errno value indicating + * reason for failure. + */ +__rte_experimental +int +rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb, + void *arg); + +/** + * Function used to unregister callbacks for memory events. + * + * @param name + * Name associated with specified callback to be removed from the list. + * + * @param arg + * Argument to look for among callbacks with specified callback name. + * + * @return + * 0 on successful callback unregister + * -1 on unsuccessful callback unregister, with rte_errno value indicating + * reason for failure. + */ +__rte_experimental +int +rte_mem_event_callback_unregister(const char *name, void *arg); + + +#define RTE_MEM_ALLOC_VALIDATOR_NAME_LEN 64 +/**< maximum length of alloc validator name */ +/** + * Function typedef used to register memory allocation validation callbacks. + * + * Returning 0 will allow allocation attempt to continue. Returning -1 will + * prevent allocation from succeeding. + */ +typedef int (*rte_mem_alloc_validator_t)(int socket_id, + size_t cur_limit, size_t new_len); + +/** + * @brief Register validator callback for memory allocations. + * + * Callbacks registered by this function will be called right before memory + * allocator is about to trigger allocation of more pages from the system if + * said allocation will bring total memory usage above specified limit on + * specified socket. User will be able to cancel pending allocation if callback + * returns -1. + * + * @note callbacks will happen while memory hotplug subsystem is write-locked, + * therefore some functions (e.g. `rte_memseg_walk()`) will cause a + * deadlock when called from within such callbacks. + * + * @note validator callbacks not being supported is an expected error condition, + * so user code needs to handle this situation. In these cases, return + * value will be -1, and rte_errno will be set to ENOTSUP. + * + * @param name + * Name associated with specified callback to be added to the list. + * + * @param clb + * Callback function pointer. + * + * @param socket_id + * Socket ID on which to watch for allocations. + * + * @param limit + * Limit above which to trigger callbacks. + * + * @return + * 0 on successful callback register + * -1 on unsuccessful callback register, with rte_errno value indicating + * reason for failure. + */ +__rte_experimental +int +rte_mem_alloc_validator_register(const char *name, + rte_mem_alloc_validator_t clb, int socket_id, size_t limit); + +/** + * @brief Unregister validator callback for memory allocations. + * + * @param name + * Name associated with specified callback to be removed from the list. + * + * @param socket_id + * Socket ID on which to watch for allocations. + * + * @return + * 0 on successful callback unregister + * -1 on unsuccessful callback unregister, with rte_errno value indicating + * reason for failure. + */ +__rte_experimental +int +rte_mem_alloc_validator_unregister(const char *name, int socket_id); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMORY_H_ */ diff --git a/lib/librte_eal/include/rte_memzone.h b/lib/librte_eal/include/rte_memzone.h new file mode 100644 index 0000000000..f478fa9e67 --- /dev/null +++ b/lib/librte_eal/include/rte_memzone.h @@ -0,0 +1,320 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_MEMZONE_H_ +#define _RTE_MEMZONE_H_ + +/** + * @file + * RTE Memzone + * + * The goal of the memzone allocator is to reserve contiguous + * portions of physical memory. These zones are identified by a name. + * + * The memzone descriptors are shared by all partitions and are + * located in a known place of physical memory. This zone is accessed + * using rte_eal_get_configuration(). The lookup (by name) of a + * memory zone can be done in any partition and returns the same + * physical address. + * + * A reserved memory zone cannot be unreserved. The reservation shall + * be done at initialization time only. + */ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */ +#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */ +#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */ +#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */ +#define RTE_MEMZONE_256KB 0x00010000 /**< Use 256KB pages. */ +#define RTE_MEMZONE_256MB 0x00020000 /**< Use 256MB pages. */ +#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */ +#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */ +#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */ +#define RTE_MEMZONE_IOVA_CONTIG 0x00100000 /**< Ask for IOVA-contiguous memzone. */ + +/** + * A structure describing a memzone, which is a contiguous portion of + * physical memory identified by a name. + */ +struct rte_memzone { + +#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/ + char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */ + + RTE_STD_C11 + union { + phys_addr_t phys_addr; /**< deprecated - Start physical address. */ + rte_iova_t iova; /**< Start IO address. */ + }; + RTE_STD_C11 + union { + void *addr; /**< Start virtual address. */ + uint64_t addr_64; /**< Makes sure addr is always 64-bits */ + }; + size_t len; /**< Length of the memzone. */ + + uint64_t hugepage_sz; /**< The page size of underlying memory */ + + int32_t socket_id; /**< NUMA socket ID. */ + + uint32_t flags; /**< Characteristics of this memzone. */ +} __attribute__((__packed__)); + +/** + * Reserve a portion of physical memory. + * + * This function reserves some memory and returns a pointer to a + * correctly filled memzone descriptor. If the allocation cannot be + * done, return NULL. + * + * @note Reserving memzones with len set to 0 will only attempt to allocate + * memzones from memory that is already available. It will not trigger any + * new allocations. + * + * @note: When reserving memzones with len set to 0, it is preferable to also + * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but + * will likely not yield expected results. Specifically, the resulting memzone + * may not necessarily be the biggest memzone available, but rather biggest + * memzone available on socket id corresponding to an lcore from which + * reservation was called. + * + * @param name + * The name of the memzone. If it already exists, the function will + * fail and return NULL. + * @param len + * The size of the memory to be reserved. If it + * is 0, the biggest contiguous zone will be reserved. + * @param socket_id + * The socket identifier in the case of + * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The flags parameter is used to request memzones to be + * taken from specifically sized hugepages. + * - RTE_MEMZONE_2MB - Reserved from 2MB pages + * - RTE_MEMZONE_1GB - Reserved from 1GB pages + * - RTE_MEMZONE_16MB - Reserved from 16MB pages + * - RTE_MEMZONE_16GB - Reserved from 16GB pages + * - RTE_MEMZONE_256KB - Reserved from 256KB pages + * - RTE_MEMZONE_256MB - Reserved from 256MB pages + * - RTE_MEMZONE_512MB - Reserved from 512MB pages + * - RTE_MEMZONE_4GB - Reserved from 4GB pages + * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if + * the requested page size is unavailable. + * If this flag is not set, the function + * will return error on an unavailable size + * request. + * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. + * This option should be used when allocating + * memory intended for hardware rings etc. + * @return + * A pointer to a correctly-filled read-only memzone descriptor, or NULL + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - EINVAL - invalid parameters + */ +const struct rte_memzone *rte_memzone_reserve(const char *name, + size_t len, int socket_id, + unsigned flags); + +/** + * Reserve a portion of physical memory with alignment on a specified + * boundary. + * + * This function reserves some memory with alignment on a specified + * boundary, and returns a pointer to a correctly filled memzone + * descriptor. If the allocation cannot be done or if the alignment + * is not a power of 2, returns NULL. + * + * @note Reserving memzones with len set to 0 will only attempt to allocate + * memzones from memory that is already available. It will not trigger any + * new allocations. + * + * @note: When reserving memzones with len set to 0, it is preferable to also + * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but + * will likely not yield expected results. Specifically, the resulting memzone + * may not necessarily be the biggest memzone available, but rather biggest + * memzone available on socket id corresponding to an lcore from which + * reservation was called. + * + * @param name + * The name of the memzone. If it already exists, the function will + * fail and return NULL. + * @param len + * The size of the memory to be reserved. If it + * is 0, the biggest contiguous zone will be reserved. + * @param socket_id + * The socket identifier in the case of + * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The flags parameter is used to request memzones to be + * taken from specifically sized hugepages. + * - RTE_MEMZONE_2MB - Reserved from 2MB pages + * - RTE_MEMZONE_1GB - Reserved from 1GB pages + * - RTE_MEMZONE_16MB - Reserved from 16MB pages + * - RTE_MEMZONE_16GB - Reserved from 16GB pages + * - RTE_MEMZONE_256KB - Reserved from 256KB pages + * - RTE_MEMZONE_256MB - Reserved from 256MB pages + * - RTE_MEMZONE_512MB - Reserved from 512MB pages + * - RTE_MEMZONE_4GB - Reserved from 4GB pages + * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if + * the requested page size is unavailable. + * If this flag is not set, the function + * will return error on an unavailable size + * request. + * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. + * This option should be used when allocating + * memory intended for hardware rings etc. + * @param align + * Alignment for resulting memzone. Must be a power of 2. + * @return + * A pointer to a correctly-filled read-only memzone descriptor, or NULL + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - EINVAL - invalid parameters + */ +const struct rte_memzone *rte_memzone_reserve_aligned(const char *name, + size_t len, int socket_id, + unsigned flags, unsigned align); + +/** + * Reserve a portion of physical memory with specified alignment and + * boundary. + * + * This function reserves some memory with specified alignment and + * boundary, and returns a pointer to a correctly filled memzone + * descriptor. If the allocation cannot be done or if the alignment + * or boundary are not a power of 2, returns NULL. + * Memory buffer is reserved in a way, that it wouldn't cross specified + * boundary. That implies that requested length should be less or equal + * then boundary. + * + * @note Reserving memzones with len set to 0 will only attempt to allocate + * memzones from memory that is already available. It will not trigger any + * new allocations. + * + * @note: When reserving memzones with len set to 0, it is preferable to also + * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but + * will likely not yield expected results. Specifically, the resulting memzone + * may not necessarily be the biggest memzone available, but rather biggest + * memzone available on socket id corresponding to an lcore from which + * reservation was called. + * + * @param name + * The name of the memzone. If it already exists, the function will + * fail and return NULL. + * @param len + * The size of the memory to be reserved. If it + * is 0, the biggest contiguous zone will be reserved. + * @param socket_id + * The socket identifier in the case of + * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The flags parameter is used to request memzones to be + * taken from specifically sized hugepages. + * - RTE_MEMZONE_2MB - Reserved from 2MB pages + * - RTE_MEMZONE_1GB - Reserved from 1GB pages + * - RTE_MEMZONE_16MB - Reserved from 16MB pages + * - RTE_MEMZONE_16GB - Reserved from 16GB pages + * - RTE_MEMZONE_256KB - Reserved from 256KB pages + * - RTE_MEMZONE_256MB - Reserved from 256MB pages + * - RTE_MEMZONE_512MB - Reserved from 512MB pages + * - RTE_MEMZONE_4GB - Reserved from 4GB pages + * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if + * the requested page size is unavailable. + * If this flag is not set, the function + * will return error on an unavailable size + * request. + * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous. + * This option should be used when allocating + * memory intended for hardware rings etc. + * @param align + * Alignment for resulting memzone. Must be a power of 2. + * @param bound + * Boundary for resulting memzone. Must be a power of 2 or zero. + * Zero value implies no boundary condition. + * @return + * A pointer to a correctly-filled read-only memzone descriptor, or NULL + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - EINVAL - invalid parameters + */ +const struct rte_memzone *rte_memzone_reserve_bounded(const char *name, + size_t len, int socket_id, + unsigned flags, unsigned align, unsigned bound); + +/** + * Free a memzone. + * + * @param mz + * A pointer to the memzone + * @return + * -EINVAL - invalid parameter. + * 0 - success + */ +int rte_memzone_free(const struct rte_memzone *mz); + +/** + * Lookup for a memzone. + * + * Get a pointer to a descriptor of an already reserved memory + * zone identified by the name given as an argument. + * + * @param name + * The name of the memzone. + * @return + * A pointer to a read-only memzone descriptor. + */ +const struct rte_memzone *rte_memzone_lookup(const char *name); + +/** + * Dump all reserved memzones to a file. + * + * @param f + * A pointer to a file for output + */ +void rte_memzone_dump(FILE *f); + +/** + * Walk list of all memzones + * + * @param func + * Iterator function + * @param arg + * Argument passed to iterator + */ +void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *arg), + void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMZONE_H_ */ diff --git a/lib/librte_eal/include/rte_option.h b/lib/librte_eal/include/rte_option.h new file mode 100644 index 0000000000..7ad65a4eb4 --- /dev/null +++ b/lib/librte_eal/include/rte_option.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#ifndef __INCLUDE_RTE_OPTION_H__ +#define __INCLUDE_RTE_OPTION_H__ + +/** + * @file + * + * This API offers the ability to register options to the EAL command line and + * map those options to functions that will be executed at the end of EAL + * initialization. These options will be available as part of the EAL command + * line of applications and are dynamically managed. + * + * This is used primarily by DPDK libraries offering command line options. + * Currently, this API is limited to registering options without argument. + * + * The register API can be used to resolve circular dependency issues + * between EAL and the library. The library uses EAL, but is also initialized + * by EAL. Hence, EAL depends on the init function of the library. The API + * introduced in rte_option allows us to register the library init with EAL + * (passing a function pointer) and avoid the circular dependency. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int (*rte_option_cb)(void); + +/** + * Structure describing an EAL command line option dynamically registered. + * + * Common EAL options are mostly statically defined. + * Some libraries need additional options to be dynamically added. + * This structure describes such options. + */ +struct rte_option { + TAILQ_ENTRY(rte_option) next; /**< Next entry in the list. */ + const char *name; /**< The option name. */ + const char *usage; /**< Option summary string. */ + rte_option_cb cb; /**< Function called when option is used. */ + int enabled; /**< Set when the option is used. */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice + * + * Register an option to the EAL command line. + * When recognized, the associated function will be executed at the end of EAL + * initialization. + * + * The associated structure must be available the whole time this option is + * registered (i.e. not stack memory). + * + * @param opt + * Structure describing the option to parse. + * + * @return + * 0 on success, <0 otherwise. + */ +__rte_experimental +int +rte_option_register(struct rte_option *opt); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_eal/include/rte_pci_dev_feature_defs.h b/lib/librte_eal/include/rte_pci_dev_feature_defs.h new file mode 100644 index 0000000000..e12c22081f --- /dev/null +++ b/lib/librte_eal/include/rte_pci_dev_feature_defs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_PCI_DEV_DEFS_H_ +#define _RTE_PCI_DEV_DEFS_H_ + +/* interrupt mode */ +enum rte_intr_mode { + RTE_INTR_MODE_NONE = 0, + RTE_INTR_MODE_LEGACY, + RTE_INTR_MODE_MSI, + RTE_INTR_MODE_MSIX +}; + +#endif /* _RTE_PCI_DEV_DEFS_H_ */ diff --git a/lib/librte_eal/include/rte_pci_dev_features.h b/lib/librte_eal/include/rte_pci_dev_features.h new file mode 100644 index 0000000000..6104123d27 --- /dev/null +++ b/lib/librte_eal/include/rte_pci_dev_features.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_PCI_DEV_FEATURES_H +#define _RTE_PCI_DEV_FEATURES_H + +#include + +#define RTE_INTR_MODE_NONE_NAME "none" +#define RTE_INTR_MODE_LEGACY_NAME "legacy" +#define RTE_INTR_MODE_MSI_NAME "msi" +#define RTE_INTR_MODE_MSIX_NAME "msix" + +#endif diff --git a/lib/librte_eal/include/rte_per_lcore.h b/lib/librte_eal/include/rte_per_lcore.h new file mode 100644 index 0000000000..eaedf0cb37 --- /dev/null +++ b/lib/librte_eal/include/rte_per_lcore.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_PER_LCORE_H_ +#define _RTE_PER_LCORE_H_ + +/** + * @file + * + * Per-lcore variables in RTE + * + * This file defines an API for instantiating per-lcore "global + * variables" that are environment-specific. Note that in all + * environments, a "shared variable" is the default when you use a + * global variable. + * + * Parts of this are execution environment specific. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Macro to define a per lcore variable "var" of type "type", don't + * use keywords like "static" or "volatile" in type, just prefix the + * whole macro. + */ +#define RTE_DEFINE_PER_LCORE(type, name) \ + __thread __typeof__(type) per_lcore_##name + +/** + * Macro to declare an extern per lcore variable "var" of type "type" + */ +#define RTE_DECLARE_PER_LCORE(type, name) \ + extern __thread __typeof__(type) per_lcore_##name + +/** + * Read/write the per-lcore variable value + */ +#define RTE_PER_LCORE(name) (per_lcore_##name) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PER_LCORE_H_ */ diff --git a/lib/librte_eal/include/rte_random.h b/lib/librte_eal/include/rte_random.h new file mode 100644 index 0000000000..2b30ec85c1 --- /dev/null +++ b/lib/librte_eal/include/rte_random.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_RANDOM_H_ +#define _RTE_RANDOM_H_ + +/** + * @file + * + * Pseudo-random Generators in RTE + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include + +/** + * Seed the pseudo-random generator. + * + * The generator is automatically seeded by the EAL init with a timer + * value. It may need to be re-seeded by the user with a real random + * value. + * + * This function is not multi-thread safe in regards to other + * rte_srand() calls, nor is it in relation to concurrent rte_rand() + * calls. + * + * @param seedval + * The value of the seed. + */ +void +rte_srand(uint64_t seedval); + +/** + * Get a pseudo-random value. + * + * The generator is not cryptographically secure. + * + * If called from lcore threads, this function is thread-safe. + * + * @return + * A pseudo-random value between 0 and (1<<64)-1. + */ +uint64_t +rte_rand(void); + +/** + * Generates a pseudo-random number with an upper bound. + * + * This function returns an uniformly distributed (unbiased) random + * number less than a user-specified maximum value. + * + * If called from lcore threads, this function is thread-safe. + * + * @param upper_bound + * The upper bound of the generated number. + * @return + * A pseudo-random value between 0 and (upper_bound-1). + */ +__rte_experimental +uint64_t +rte_rand_max(uint64_t upper_bound); + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_RANDOM_H_ */ diff --git a/lib/librte_eal/include/rte_reciprocal.h b/lib/librte_eal/include/rte_reciprocal.h new file mode 100644 index 0000000000..63e16fde0a --- /dev/null +++ b/lib/librte_eal/include/rte_reciprocal.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + */ +/* + * Reciprocal divide + * + * Used with permission from original authors + * Hannes Frederic Sowa and Daniel Borkmann + * + * This algorithm is based on the paper "Division by Invariant + * Integers Using Multiplication" by Torbjörn Granlund and Peter + * L. Montgomery. + * + * The assembler implementation from Agner Fog, which this code is + * based on, can be found here: + * http://www.agner.org/optimize/asmlib.zip + * + * This optimization for A/B is helpful if the divisor B is mostly + * runtime invariant. The reciprocal of B is calculated in the + * slow-path with reciprocal_value(). The fast-path can then just use + * a much faster multiplication operation with a variable dividend A + * to calculate the division A/B. + */ + +#ifndef _RTE_RECIPROCAL_H_ +#define _RTE_RECIPROCAL_H_ + +#include + +struct rte_reciprocal { + uint32_t m; + uint8_t sh1, sh2; +}; + +struct rte_reciprocal_u64 { + uint64_t m; + uint8_t sh1, sh2; +}; + +static inline uint32_t rte_reciprocal_divide(uint32_t a, struct rte_reciprocal R) +{ + uint32_t t = (uint32_t)(((uint64_t)a * R.m) >> 32); + + return (t + ((a - t) >> R.sh1)) >> R.sh2; +} + +static __rte_always_inline uint64_t +mullhi_u64(uint64_t x, uint64_t y) +{ +#ifdef __SIZEOF_INT128__ + __uint128_t xl = x; + __uint128_t rl = xl * y; + + return (rl >> 64); +#else + uint64_t u0, u1, v0, v1, k, t; + uint64_t w1, w2; + uint64_t whi; + + u1 = x >> 32; u0 = x & 0xFFFFFFFF; + v1 = y >> 32; v0 = y & 0xFFFFFFFF; + + t = u0*v0; + k = t >> 32; + + t = u1*v0 + k; + w1 = t & 0xFFFFFFFF; + w2 = t >> 32; + + t = u0*v1 + w1; + k = t >> 32; + + whi = u1*v1 + w2 + k; + + return whi; +#endif +} + +static __rte_always_inline uint64_t +rte_reciprocal_divide_u64(uint64_t a, const struct rte_reciprocal_u64 *R) +{ + uint64_t t = mullhi_u64(a, R->m); + + return (t + ((a - t) >> R->sh1)) >> R->sh2; +} + +struct rte_reciprocal rte_reciprocal_value(uint32_t d); +struct rte_reciprocal_u64 rte_reciprocal_value_u64(uint64_t d); + +#endif /* _RTE_RECIPROCAL_H_ */ diff --git a/lib/librte_eal/include/rte_service.h b/lib/librte_eal/include/rte_service.h new file mode 100644 index 0000000000..d8701dd4cf --- /dev/null +++ b/lib/librte_eal/include/rte_service.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _RTE_SERVICE_H_ +#define _RTE_SERVICE_H_ + +/** + * @file + * + * Service functions + * + * The service functionality provided by this header allows a DPDK component + * to indicate that it requires a function call in order for it to perform + * its processing. + * + * An example usage of this functionality would be a component that registers + * a service to perform a particular packet processing duty: for example the + * eventdev software PMD. At startup the application requests all services + * that have been registered, and the cores in the service-coremask run the + * required services. The EAL removes these number of cores from the available + * runtime cores, and dedicates them to performing service-core workloads. The + * application has access to the remaining lcores as normal. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#include +#include + +#define RTE_SERVICE_NAME_MAX 32 + +/* Capabilities of a service. + * + * Use the *rte_service_probe_capability* function to check if a service is + * capable of a specific capability. + */ +/** When set, the service is capable of having multiple threads run it at the + * same time. + */ +#define RTE_SERVICE_CAP_MT_SAFE (1 << 0) + +/** + * Return the number of services registered. + * + * The number of services registered can be passed to *rte_service_get_by_id*, + * enabling the application to retrieve the specification of each service. + * + * @return The number of services registered. + */ +uint32_t rte_service_get_count(void); + +/** + * Return the id of a service by name. + * + * This function provides the id of the service using the service name as + * lookup key. The service id is to be passed to other functions in the + * rte_service_* API. + * + * Example usage: + * @code + * uint32_t service_id; + * int32_t ret = rte_service_get_by_name("service_X", &service_id); + * if (ret) { + * // handle error + * } + * @endcode + * + * @param name The name of the service to retrieve + * @param[out] service_id A pointer to a uint32_t, to be filled in with the id. + * @retval 0 Success. The service id is provided in *service_id*. + * @retval -EINVAL Null *service_id* pointer provided + * @retval -ENODEV No such service registered + */ +int32_t rte_service_get_by_name(const char *name, uint32_t *service_id); + +/** + * Return the name of the service. + * + * @return A pointer to the name of the service. The returned pointer remains + * in ownership of the service, and the application must not free it. + */ +const char *rte_service_get_name(uint32_t id); + +/** + * Check if a service has a specific capability. + * + * This function returns if *service* has implements *capability*. + * See RTE_SERVICE_CAP_* defines for a list of valid capabilities. + * @retval 1 Capability supported by this service instance + * @retval 0 Capability not supported by this service instance + */ +int32_t rte_service_probe_capability(uint32_t id, uint32_t capability); + +/** + * Map or unmap a lcore to a service. + * + * Each core can be added or removed from running a specific service. This + * function enables or disables *lcore* to run *service_id*. + * + * If multiple cores are enabled on a service, an atomic is used to ensure that + * only one cores runs the service at a time. The exception to this is when + * a service indicates that it is multi-thread safe by setting the capability + * called RTE_SERVICE_CAP_MT_SAFE. With the multi-thread safe capability set, + * the service function can be run on multiple threads at the same time. + * + * @param service_id the service to apply the lcore to + * @param lcore The lcore that will be mapped to service + * @param enable Zero to unmap or disable the core, non-zero to enable + * + * @retval 0 lcore map updated successfully + * @retval -EINVAL An invalid service or lcore was provided. + */ +int32_t rte_service_map_lcore_set(uint32_t service_id, uint32_t lcore, + uint32_t enable); + +/** + * Retrieve the mapping of an lcore to a service. + * + * @param service_id the service to apply the lcore to + * @param lcore The lcore that will be mapped to service + * + * @retval 1 lcore is mapped to service + * @retval 0 lcore is not mapped to service + * @retval -EINVAL An invalid service or lcore was provided. + */ +int32_t rte_service_map_lcore_get(uint32_t service_id, uint32_t lcore); + +/** + * Set the runstate of the service. + * + * Each service is either running or stopped. Setting a non-zero runstate + * enables the service to run, while setting runstate zero disables it. + * + * @param id The id of the service + * @param runstate The run state to apply to the service + * + * @retval 0 The service was successfully started + * @retval -EINVAL Invalid service id + */ +int32_t rte_service_runstate_set(uint32_t id, uint32_t runstate); + +/** + * Get the runstate for the service with *id*. See *rte_service_runstate_set* + * for details of runstates. A service can call this function to ensure that + * the application has indicated that it will receive CPU cycles. Either a + * service-core is mapped (default case), or the application has explicitly + * disabled the check that a service-cores is mapped to the service and takes + * responsibility to run the service manually using the available function + * *rte_service_run_iter_on_app_lcore* to do so. + * + * @retval 1 Service is running + * @retval 0 Service is stopped + * @retval -EINVAL Invalid service id + */ +int32_t rte_service_runstate_get(uint32_t id); + +/** + * This function returns whether the service may be currently executing on + * at least one lcore, or definitely is not. This function can be used to + * determine if, after setting the service runstate to stopped, the service + * is still executing a service lcore. + * + * Care must be taken if calling this function when the service runstate is + * running, since the result of this function may be incorrect by the time the + * function returns due to service cores running in parallel. + * + * @retval 1 Service may be running on one or more lcores + * @retval 0 Service is not running on any lcore + * @retval -EINVAL Invalid service id + */ +int32_t +rte_service_may_be_active(uint32_t id); + +/** + * Enable or disable the check for a service-core being mapped to the service. + * An application can disable the check when takes the responsibility to run a + * service itself using *rte_service_run_iter_on_app_lcore*. + * + * @param id The id of the service to set the check on + * @param enable When zero, the check is disabled. Non-zero enables the check. + * + * @retval 0 Success + * @retval -EINVAL Invalid service ID + */ +int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enable); + +/** + * This function runs a service callback from a non-service lcore. + * + * This function is designed to enable gradual porting to service cores, and + * to enable unit tests to verify a service behaves as expected. + * + * When called, this function ensures that the service identified by *id* is + * safe to run on this lcore. Multi-thread safe services are invoked even if + * other cores are simultaneously running them as they are multi-thread safe. + * + * Multi-thread unsafe services are handled depending on the variable + * *serialize_multithread_unsafe*: + * - When set, the function will check if a service is already being invoked + * on another lcore, refusing to run it and returning -EBUSY. + * - When zero, the application takes responsibility to ensure that the service + * indicated by *id* is not going to be invoked by another lcore. This setting + * avoids atomic operations, so is likely to be more performant. + * + * @param id The ID of the service to run + * @param serialize_multithread_unsafe This parameter indicates to the service + * cores library if it is required to use atomics to serialize access + * to mult-thread unsafe services. As there is an overhead in using + * atomics, applications can choose to enable or disable this feature + * + * Note that any thread calling this function MUST be a DPDK EAL thread, as + * the *rte_lcore_id* function is used to access internal data structures. + * + * @retval 0 Service was run on the calling thread successfully + * @retval -EBUSY Another lcore is executing the service, and it is not a + * multi-thread safe service, so the service was not run on this lcore + * @retval -ENOEXEC Service is not in a run-able state + * @retval -EINVAL Invalid service id + */ +int32_t rte_service_run_iter_on_app_lcore(uint32_t id, + uint32_t serialize_multithread_unsafe); + +/** + * Start a service core. + * + * Starting a core makes the core begin polling. Any services assigned to it + * will be run as fast as possible. The application must ensure that the lcore + * is in a launchable state: e.g. call *rte_eal_lcore_wait* on the lcore_id + * before calling this function. + * + * @retval 0 Success + * @retval -EINVAL Failed to start core. The *lcore_id* passed in is not + * currently assigned to be a service core. + */ +int32_t rte_service_lcore_start(uint32_t lcore_id); + +/** + * Stop a service core. + * + * Stopping a core makes the core become idle, but remains assigned as a + * service core. + * + * @retval 0 Success + * @retval -EINVAL Invalid *lcore_id* provided + * @retval -EALREADY Already stopped core + * @retval -EBUSY Failed to stop core, as it would cause a service to not + * be run, as this is the only core currently running the service. + * The application must stop the service first, and then stop the + * lcore. + */ +int32_t rte_service_lcore_stop(uint32_t lcore_id); + +/** + * Adds lcore to the list of service cores. + * + * This functions can be used at runtime in order to modify the service core + * mask. + * + * @retval 0 Success + * @retval -EBUSY lcore is busy, and not available for service core duty + * @retval -EALREADY lcore is already added to the service core list + * @retval -EINVAL Invalid lcore provided + */ +int32_t rte_service_lcore_add(uint32_t lcore); + +/** + * Removes lcore from the list of service cores. + * + * This can fail if the core is not stopped, see *rte_service_core_stop*. + * + * @retval 0 Success + * @retval -EBUSY Lcore is not stopped, stop service core before removing. + * @retval -EINVAL failed to add lcore to service core mask. + */ +int32_t rte_service_lcore_del(uint32_t lcore); + +/** + * Retrieve the number of service cores currently available. + * + * This function returns the integer count of service cores available. The + * service core count can be used in mapping logic when creating mappings + * from service cores to services. + * + * See *rte_service_lcore_list* for details on retrieving the lcore_id of each + * service core. + * + * @return The number of service cores currently configured. + */ +int32_t rte_service_lcore_count(void); + +/** + * Resets all service core mappings. This does not remove the service cores + * from duty, just unmaps all services / cores, and stops() the service cores. + * The runstate of services is not modified. + * + * @retval 0 Success + */ +int32_t rte_service_lcore_reset_all(void); + +/** + * Enable or disable statistics collection for *service*. + * + * This function enables per core, per-service cycle count collection. + * @param id The service to enable statistics gathering on. + * @param enable Zero to disable statistics, non-zero to enable. + * @retval 0 Success + * @retval -EINVAL Invalid service pointer passed + */ +int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable); + +/** + * Retrieve the list of currently enabled service cores. + * + * This function fills in an application supplied array, with each element + * indicating the lcore_id of a service core. + * + * Adding and removing service cores can be performed using + * *rte_service_lcore_add* and *rte_service_lcore_del*. + * @param [out] array An array of at least *rte_service_lcore_count* items. + * If statically allocating the buffer, use RTE_MAX_LCORE. + * @param [out] n The size of *array*. + * @retval >=0 Number of service cores that have been populated in the array + * @retval -ENOMEM The provided array is not large enough to fill in the + * service core list. No items have been populated, call this function + * with a size of at least *rte_service_core_count* items. + */ +int32_t rte_service_lcore_list(uint32_t array[], uint32_t n); + +/** + * Get the number of services running on the supplied lcore. + * + * @param lcore Id of the service core. + * @retval >=0 Number of services registered to this core. + * @retval -EINVAL Invalid lcore provided + * @retval -ENOTSUP The provided lcore is not a service core. + */ +int32_t rte_service_lcore_count_services(uint32_t lcore); + +/** + * Dumps any information available about the service. When id is UINT32_MAX, + * this function dumps info for all services. + * + * @retval 0 Statistics have been successfully dumped + * @retval -EINVAL Invalid service id provided + */ +int32_t rte_service_dump(FILE *f, uint32_t id); + +/** + * Returns the number of cycles that this service has consumed + */ +#define RTE_SERVICE_ATTR_CYCLES 0 + +/** + * Returns the count of invocations of this service function + */ +#define RTE_SERVICE_ATTR_CALL_COUNT 1 + +/** + * Get an attribute from a service. + * + * @retval 0 Success, the attribute value has been written to *attr_value*. + * -EINVAL Invalid id, attr_id or attr_value was NULL. + */ +int32_t rte_service_attr_get(uint32_t id, uint32_t attr_id, + uint64_t *attr_value); + +/** + * Reset all attribute values of a service. + * + * @param id The service to reset all statistics of + * @retval 0 Successfully reset attributes + * -EINVAL Invalid service id provided + */ +int32_t rte_service_attr_reset_all(uint32_t id); + +/** + * Returns the number of times the service runner has looped. + */ +#define RTE_SERVICE_LCORE_ATTR_LOOPS 0 + +/** + * Get an attribute from a service core. + * + * @param lcore Id of the service core. + * @param attr_id Id of the attribute to be retrieved. + * @param [out] attr_value Pointer to storage in which to write retrieved value. + * @retval 0 Success, the attribute value has been written to *attr_value*. + * -EINVAL Invalid lcore, attr_id or attr_value was NULL. + * -ENOTSUP lcore is not a service core. + */ +int32_t +rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id, + uint64_t *attr_value); + +/** + * Reset all attribute values of a service core. + * + * @param lcore The service core to reset all the statistics of + * @retval 0 Successfully reset attributes + * -EINVAL Invalid service id provided + * -ENOTSUP lcore is not a service core. + */ +int32_t +rte_service_lcore_attr_reset_all(uint32_t lcore); + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_SERVICE_H_ */ diff --git a/lib/librte_eal/include/rte_service_component.h b/lib/librte_eal/include/rte_service_component.h new file mode 100644 index 0000000000..16eab79eea --- /dev/null +++ b/lib/librte_eal/include/rte_service_component.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _SERVICE_PRIVATE_H_ +#define _SERVICE_PRIVATE_H_ + +/* This file specifies the internal service specification. + * Include this file if you are writing a component that requires CPU cycles to + * operate, and you wish to run the component using service cores + */ +#include +#include + +/** + * Signature of callback function to run a service. + */ +typedef int32_t (*rte_service_func)(void *args); + +/** + * The specification of a service. + * + * This struct contains metadata about the service itself, the callback + * function to run one iteration of the service, a userdata pointer, flags etc. + */ +struct rte_service_spec { + /** The name of the service. This should be used by the application to + * understand what purpose this service provides. + */ + char name[RTE_SERVICE_NAME_MAX]; + /** The callback to invoke to run one iteration of the service. */ + rte_service_func callback; + /** The userdata pointer provided to the service callback. */ + void *callback_userdata; + /** Flags to indicate the capabilities of this service. See defines in + * the public header file for values of RTE_SERVICE_CAP_* + */ + uint32_t capabilities; + /** NUMA socket ID that this service is affinitized to */ + int socket_id; +}; + +/** + * Register a new service. + * + * A service represents a component that the requires CPU time periodically to + * achieve its purpose. + * + * For example the eventdev SW PMD requires CPU cycles to perform its + * scheduling. This can be achieved by registering it as a service, and the + * application can then assign CPU resources to that service. + * + * Note that when a service component registers itself, it is not permitted to + * add or remove service-core threads, or modify lcore-to-service mappings. The + * only API that may be called by the service-component is + * *rte_service_component_runstate_set*, which indicates that the service + * component is ready to be executed. + * + * @param spec The specification of the service to register + * @param[out] service_id A pointer to a uint32_t, which will be filled in + * during registration of the service. It is set to the integers + * service number given to the service. This parameter may be NULL. + * @retval 0 Successfully registered the service. + * -EINVAL Attempted to register an invalid service (eg, no callback + * set) + */ +int32_t rte_service_component_register(const struct rte_service_spec *spec, + uint32_t *service_id); + +/** + * Unregister a service component. + * + * The service being removed must be stopped before calling this function. + * + * @retval 0 The service was successfully unregistered. + * @retval -EBUSY The service is currently running, stop the service before + * calling unregister. No action has been taken. + */ +int32_t rte_service_component_unregister(uint32_t id); + +/** + * Private function to allow EAL to initialized default mappings. + * + * This function iterates all the services, and maps then to the available + * cores. Based on the capabilities of the services, they are set to run on the + * available cores in a round-robin manner. + * + * @retval 0 Success + * @retval -ENOTSUP No service lcores in use + * @retval -EINVAL Error while iterating over services + * @retval -ENODEV Error in enabling service lcore on a service + * @retval -ENOEXEC Error when starting services + */ +int32_t rte_service_start_with_defaults(void); + +/** + * Set the backend runstate of a component. + * + * This function allows services to be registered at startup, but not yet + * enabled to run by default. When the service has been configured (via the + * usual method; eg rte_eventdev_configure, the service can mark itself as + * ready to run. The differentiation between backend runstate and + * service_runstate is that the backend runstate is set by the service + * component while the service runstate is reserved for application usage. + * + * @retval 0 Success + */ +int32_t rte_service_component_runstate_set(uint32_t id, uint32_t runstate); + +/** + * Initialize the service library. + * + * In order to use the service library, it must be initialized. EAL initializes + * the library at startup. + * + * @retval 0 Success + * @retval -EALREADY Service library is already initialized + */ +int32_t rte_service_init(void); + +/** + * @internal Free up the memory that has been initialized. + * This routine is to be invoked prior to process termination. + * + * @retval None + */ +void rte_service_finalize(void); + +#endif /* _SERVICE_PRIVATE_H_ */ diff --git a/lib/librte_eal/include/rte_string_fns.h b/lib/librte_eal/include/rte_string_fns.h new file mode 100644 index 0000000000..8bac8243c9 --- /dev/null +++ b/lib/librte_eal/include/rte_string_fns.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2019 Intel Corporation + */ + +/** + * @file + * + * String-related functions as replacement for libc equivalents + */ + +#ifndef _RTE_STRING_FNS_H_ +#define _RTE_STRING_FNS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include + +/** + * Takes string "string" parameter and splits it at character "delim" + * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like + * strtok or strsep functions, this modifies its input string, by replacing + * instances of "delim" with '\\0'. All resultant tokens are returned in the + * "tokens" array which must have enough entries to hold "maxtokens". + * + * @param string + * The input string to be split into tokens + * + * @param stringlen + * The max length of the input buffer + * + * @param tokens + * The array to hold the pointers to the tokens in the string + * + * @param maxtokens + * The number of elements in the tokens array. At most, maxtokens-1 splits + * of the string will be done. + * + * @param delim + * The character on which the split of the data will be done + * + * @return + * The number of tokens in the tokens array. + */ +int +rte_strsplit(char *string, int stringlen, + char **tokens, int maxtokens, char delim); + +/** + * @internal + * DPDK-specific version of strlcpy for systems without + * libc or libbsd copies of the function + */ +static inline size_t +rte_strlcpy(char *dst, const char *src, size_t size) +{ + return (size_t)snprintf(dst, size, "%s", src); +} + +/** + * @internal + * DPDK-specific version of strlcat for systems without + * libc or libbsd copies of the function + */ +static inline size_t +rte_strlcat(char *dst, const char *src, size_t size) +{ + size_t l = strnlen(dst, size); + if (l < size) + return l + rte_strlcpy(&dst[l], src, size - l); + return l + strlen(src); +} + +/* pull in a strlcpy function */ +#ifdef RTE_EXEC_ENV_FREEBSD +#ifndef __BSD_VISIBLE /* non-standard functions are hidden */ +#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) +#define strlcat(dst, src, size) rte_strlcat(dst, src, size) +#endif + +#else /* non-BSD platforms */ +#ifdef RTE_USE_LIBBSD +#include + +#else /* no BSD header files, create own */ +#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) +#define strlcat(dst, src, size) rte_strlcat(dst, src, size) + +#endif /* RTE_USE_LIBBSD */ +#endif /* FREEBSD */ + +/** + * Copy string src to buffer dst of size dsize. + * At most dsize-1 chars will be copied. + * Always NUL-terminates, unless (dsize == 0). + * Returns number of bytes copied (terminating NUL-byte excluded) on success ; + * negative errno on error. + * + * @param dst + * The destination string. + * + * @param src + * The input string to be copied. + * + * @param dsize + * Length in bytes of the destination buffer. + * + * @return + * The number of bytes copied on success + * -E2BIG if the destination buffer is too small. + */ +ssize_t +rte_strscpy(char *dst, const char *src, size_t dsize); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_STRING_FNS_H */ diff --git a/lib/librte_eal/include/rte_tailq.h b/lib/librte_eal/include/rte_tailq.h new file mode 100644 index 0000000000..b6fe4e5f78 --- /dev/null +++ b/lib/librte_eal/include/rte_tailq.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#ifndef _RTE_TAILQ_H_ +#define _RTE_TAILQ_H_ + +/** + * @file + * Here defines rte_tailq APIs for only internal use + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** dummy structure type used by the rte_tailq APIs */ +struct rte_tailq_entry { + TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */ + void *data; /**< Pointer to the data referenced by this tailq entry */ +}; +/** dummy */ +TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry); + +#define RTE_TAILQ_NAMESIZE 32 + +/** + * The structure defining a tailq header entry for storing + * in the rte_config structure in shared memory. Each tailq + * is identified by name. + * Any library storing a set of objects e.g. rings, mempools, hash-tables, + * is recommended to use an entry here, so as to make it easy for + * a multi-process app to find already-created elements in shared memory. + */ +struct rte_tailq_head { + struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */ + char name[RTE_TAILQ_NAMESIZE]; +}; + +struct rte_tailq_elem { + /** + * Reference to head in shared mem, updated at init time by + * rte_eal_tailqs_init() + */ + struct rte_tailq_head *head; + TAILQ_ENTRY(rte_tailq_elem) next; + const char name[RTE_TAILQ_NAMESIZE]; +}; + +/** + * Return the first tailq entry cast to the right struct. + */ +#define RTE_TAILQ_CAST(tailq_entry, struct_name) \ + (struct struct_name *)&(tailq_entry)->tailq_head + +/** + * Utility macro to make looking up a tailqueue for a particular struct easier. + * + * @param name + * The name of tailq + * + * @param struct_name + * The name of the list type we are using. (Generally this is the same as the + * first parameter passed to TAILQ_HEAD macro) + * + * @return + * The return value from rte_eal_tailq_lookup, typecast to the appropriate + * structure pointer type. + * NULL on error, since the tailq_head is the first + * element in the rte_tailq_head structure. + */ +#define RTE_TAILQ_LOOKUP(name, struct_name) \ + RTE_TAILQ_CAST(rte_eal_tailq_lookup(name), struct_name) + +/** + * Dump tail queues to a file. + * + * @param f + * A pointer to a file for output + */ +void rte_dump_tailq(FILE *f); + +/** + * Lookup for a tail queue. + * + * Get a pointer to a tail queue header of a tail + * queue identified by the name given as an argument. + * Note: this function is not multi-thread safe, and should only be called from + * a single thread at a time + * + * @param name + * The name of the queue. + * @return + * A pointer to the tail queue head structure. + */ +struct rte_tailq_head *rte_eal_tailq_lookup(const char *name); + +/** + * Register a tail queue. + * + * Register a tail queue from shared memory. + * This function is mainly used by EAL_REGISTER_TAILQ macro which is used to + * register tailq from the different dpdk libraries. Since this macro is a + * constructor, the function has no access to dpdk shared memory, so the + * registered tailq can not be used before call to rte_eal_init() which calls + * rte_eal_tailqs_init(). + * + * @param t + * The tailq element which contains the name of the tailq you want to + * create (/retrieve when in secondary process). + * @return + * 0 on success or -1 in case of an error. + */ +int rte_eal_tailq_register(struct rte_tailq_elem *t); + +#define EAL_REGISTER_TAILQ(t) \ +RTE_INIT(tailqinitfn_ ##t) \ +{ \ + if (rte_eal_tailq_register(&t) < 0) \ + rte_panic("Cannot initialize tailq: %s\n", t.name); \ +} + +/* This macro permits both remove and free var within the loop safely.*/ +#ifndef TAILQ_FOREACH_SAFE +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_TAILQ_H_ */ diff --git a/lib/librte_eal/include/rte_test.h b/lib/librte_eal/include/rte_test.h new file mode 100644 index 0000000000..89e47f47a5 --- /dev/null +++ b/lib/librte_eal/include/rte_test.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Cavium, Inc + */ + +#ifndef _RTE_TEST_H_ +#define _RTE_TEST_H_ + +#include + +/* Before including rte_test.h file you can define + * RTE_TEST_TRACE_FAILURE(_file, _line, _func) macro to better trace/debug test + * failures. Mostly useful in development phase. + */ +#ifndef RTE_TEST_TRACE_FAILURE +#define RTE_TEST_TRACE_FAILURE(_file, _line, _func) +#endif + + +#define RTE_TEST_ASSERT(cond, msg, ...) do { \ + if (!(cond)) { \ + RTE_LOG(DEBUG, EAL, "Test assert %s line %d failed: " \ + msg "\n", __func__, __LINE__, ##__VA_ARGS__); \ + RTE_TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); \ + return -1; \ + } \ +} while (0) + +#define RTE_TEST_ASSERT_EQUAL(a, b, msg, ...) \ + RTE_TEST_ASSERT(a == b, msg, ##__VA_ARGS__) + +#define RTE_TEST_ASSERT_NOT_EQUAL(a, b, msg, ...) \ + RTE_TEST_ASSERT(a != b, msg, ##__VA_ARGS__) + +#define RTE_TEST_ASSERT_SUCCESS(val, msg, ...) \ + RTE_TEST_ASSERT(val == 0, msg, ##__VA_ARGS__) + +#define RTE_TEST_ASSERT_FAIL(val, msg, ...) \ + RTE_TEST_ASSERT(val != 0, msg, ##__VA_ARGS__) + +#define RTE_TEST_ASSERT_NULL(val, msg, ...) \ + RTE_TEST_ASSERT(val == NULL, msg, ##__VA_ARGS__) + +#define RTE_TEST_ASSERT_NOT_NULL(val, msg, ...) \ + RTE_TEST_ASSERT(val != NULL, msg, ##__VA_ARGS__) + +#endif /* _RTE_TEST_H_ */ diff --git a/lib/librte_eal/include/rte_time.h b/lib/librte_eal/include/rte_time.h new file mode 100644 index 0000000000..5ad7c8841a --- /dev/null +++ b/lib/librte_eal/include/rte_time.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 Intel Corporation + */ + +#ifndef _RTE_TIME_H_ +#define _RTE_TIME_H_ + +#include +#include + +#define NSEC_PER_SEC 1000000000L + +/** + * Structure to hold the parameters of a running cycle counter to assist + * in converting cycles to nanoseconds. + */ +struct rte_timecounter { + /** Last cycle counter value read. */ + uint64_t cycle_last; + /** Nanoseconds count. */ + uint64_t nsec; + /** Bitmask separating nanosecond and sub-nanoseconds. */ + uint64_t nsec_mask; + /** Sub-nanoseconds count. */ + uint64_t nsec_frac; + /** Bitmask for two's complement subtraction of non-64 bit counters. */ + uint64_t cc_mask; + /** Cycle to nanosecond divisor (power of two). */ + uint32_t cc_shift; +}; + +/** + * Converts cyclecounter cycles to nanoseconds. + */ +static inline uint64_t +rte_cyclecounter_cycles_to_ns(struct rte_timecounter *tc, uint64_t cycles) +{ + uint64_t ns; + + /* Add fractional nanoseconds. */ + ns = cycles + tc->nsec_frac; + tc->nsec_frac = ns & tc->nsec_mask; + + /* Shift to get only nanoseconds. */ + return ns >> tc->cc_shift; +} + +/** + * Update the internal nanosecond count in the structure. + */ +static inline uint64_t +rte_timecounter_update(struct rte_timecounter *tc, uint64_t cycle_now) +{ + uint64_t cycle_delta, ns_offset; + + /* Calculate the delta since the last call. */ + if (tc->cycle_last <= cycle_now) + cycle_delta = (cycle_now - tc->cycle_last) & tc->cc_mask; + else + /* Handle cycle counts that have wrapped around . */ + cycle_delta = (~(tc->cycle_last - cycle_now) & tc->cc_mask) + 1; + + /* Convert to nanoseconds. */ + ns_offset = rte_cyclecounter_cycles_to_ns(tc, cycle_delta); + + /* Store current cycle counter for next call. */ + tc->cycle_last = cycle_now; + + /* Update the nanosecond count. */ + tc->nsec += ns_offset; + + return tc->nsec; +} + +/** + * Convert from timespec structure into nanosecond units. + */ +static inline uint64_t +rte_timespec_to_ns(const struct timespec *ts) +{ + return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} + +/** + * Convert from nanosecond units into timespec structure. + */ +static inline struct timespec +rte_ns_to_timespec(uint64_t nsec) +{ + struct timespec ts = {0, 0}; + + if (nsec == 0) + return ts; + + ts.tv_sec = nsec / NSEC_PER_SEC; + ts.tv_nsec = nsec % NSEC_PER_SEC; + + return ts; +} + +#endif /* _RTE_TIME_H_ */ diff --git a/lib/librte_eal/include/rte_uuid.h b/lib/librte_eal/include/rte_uuid.h new file mode 100644 index 0000000000..044afbdfab --- /dev/null +++ b/lib/librte_eal/include/rte_uuid.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 1996, 1997, 1998 Theodore Ts'o. + */ +/** + * @file + * + * UUID related functions originally from libuuid + */ + +#ifndef _RTE_UUID_H_ +#define _RTE_UUID_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Struct describing a Universal Unique Identifier + */ +typedef unsigned char rte_uuid_t[16]; + +/** + * Helper for defining UUID values for id tables. + */ +#define RTE_UUID_INIT(a, b, c, d, e) { \ + ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, \ + ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + ((d) >> 8) & 0xff, (d) & 0xff, \ + ((e) >> 40) & 0xff, ((e) >> 32) & 0xff, \ + ((e) >> 24) & 0xff, ((e) >> 16) & 0xff, \ + ((e) >> 8) & 0xff, (e) & 0xff \ +} + +/** + * Test if UUID is all zeros. + * + * @param uu + * The uuid to check. + * @return + * true if uuid is NULL value, false otherwise + */ +bool rte_uuid_is_null(const rte_uuid_t uu); + +/** + * Copy uuid. + * + * @param dst + * Destination uuid + * @param src + * Source uuid + */ +static inline void rte_uuid_copy(rte_uuid_t dst, const rte_uuid_t src) +{ + memcpy(dst, src, sizeof(rte_uuid_t)); +} + +/** + * Compare two UUID's + * + * @param a + * A UUID to compare + * @param b + * A UUID to compare + * @return + * returns an integer less than, equal to, or greater than zero if UUID a is + * is less than, equal, or greater than UUID b. + */ +int rte_uuid_compare(const rte_uuid_t a, const rte_uuid_t b); + +/** + * Extract UUID from string + * + * @param in + * Pointer to string of characters to convert + * @param uu + * Destination UUID + * @return + * Returns 0 on success, and -1 if string is not a valid UUID. + */ +int rte_uuid_parse(const char *in, rte_uuid_t uu); + +/** + * Convert UUID to string + * + * @param uu + * UUID to format + * @param out + * Resulting string buffer + * @param len + * Sizeof the available string buffer + */ +#define RTE_UUID_STRLEN (36 + 1) +void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_UUID_H */ diff --git a/lib/librte_eal/include/rte_version.h b/lib/librte_eal/include/rte_version.h new file mode 100644 index 0000000000..f7a3a1ebcf --- /dev/null +++ b/lib/librte_eal/include/rte_version.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +/** + * @file + * Definitions of DPDK version numbers + */ + +#ifndef _RTE_VERSION_H_ +#define _RTE_VERSION_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/** + * Macro to compute a version number usable for comparisons + */ +#define RTE_VERSION_NUM(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d)) + +/** + * All version numbers in one to compare with RTE_VERSION_NUM() + */ +#define RTE_VERSION RTE_VERSION_NUM( \ + RTE_VER_YEAR, \ + RTE_VER_MONTH, \ + RTE_VER_MINOR, \ + RTE_VER_RELEASE) + +/** + * Function returning version string + * @return + * string + */ +static inline const char * +rte_version(void) +{ + static char version[32]; + if (version[0] != 0) + return version; + if (strlen(RTE_VER_SUFFIX) == 0) + snprintf(version, sizeof(version), "%s %d.%02d.%d", + RTE_VER_PREFIX, + RTE_VER_YEAR, + RTE_VER_MONTH, + RTE_VER_MINOR); + else + snprintf(version, sizeof(version), "%s %d.%02d.%d%s%d", + RTE_VER_PREFIX, + RTE_VER_YEAR, + RTE_VER_MONTH, + RTE_VER_MINOR, + RTE_VER_SUFFIX, + RTE_VER_RELEASE); + return version; +} + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_VERSION_H */ diff --git a/lib/librte_eal/include/rte_vfio.h b/lib/librte_eal/include/rte_vfio.h new file mode 100644 index 0000000000..20ed8c45a9 --- /dev/null +++ b/lib/librte_eal/include/rte_vfio.h @@ -0,0 +1,360 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 6WIND S.A. + */ + +#ifndef _RTE_VFIO_H_ +#define _RTE_VFIO_H_ + +/** + * @file + * RTE VFIO. This library provides various VFIO related utility functions. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* + * determine if VFIO is present on the system + */ +#if !defined(VFIO_PRESENT) && defined(RTE_EAL_VFIO) +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) +#define VFIO_PRESENT +#endif /* kernel version >= 3.6.0 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) +#define HAVE_VFIO_DEV_REQ_INTERFACE +#endif /* kernel version >= 4.0.0 */ +#endif /* RTE_EAL_VFIO */ + +#ifdef VFIO_PRESENT + +#include + +#define VFIO_DIR "/dev/vfio" +#define VFIO_CONTAINER_PATH "/dev/vfio/vfio" +#define VFIO_GROUP_FMT "/dev/vfio/%u" +#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u" +#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL) +#define VFIO_GET_REGION_IDX(x) (x >> 40) +#define VFIO_NOIOMMU_MODE \ + "/sys/module/vfio/parameters/enable_unsafe_noiommu_mode" + +/* NOIOMMU is defined from kernel version 4.5 onwards */ +#ifdef VFIO_NOIOMMU_IOMMU +#define RTE_VFIO_NOIOMMU VFIO_NOIOMMU_IOMMU +#else +#define RTE_VFIO_NOIOMMU 8 +#endif + +/* + * capabilities are only supported on kernel 4.6+. there were also some API + * changes as well, so add a macro to get cap offset. + */ +#ifdef VFIO_REGION_INFO_FLAG_CAPS +#define RTE_VFIO_INFO_FLAG_CAPS VFIO_REGION_INFO_FLAG_CAPS +#define VFIO_CAP_OFFSET(x) (x->cap_offset) +#else +#define RTE_VFIO_INFO_FLAG_CAPS (1 << 3) +#define VFIO_CAP_OFFSET(x) (x->resv) +struct vfio_info_cap_header { + uint16_t id; + uint16_t version; + uint32_t next; +}; +#endif + +/* kernels 4.16+ can map BAR containing MSI-X table */ +#ifdef VFIO_REGION_INFO_CAP_MSIX_MAPPABLE +#define RTE_VFIO_CAP_MSIX_MAPPABLE VFIO_REGION_INFO_CAP_MSIX_MAPPABLE +#else +#define RTE_VFIO_CAP_MSIX_MAPPABLE 3 +#endif + +#else /* not VFIO_PRESENT */ + +/* we don't need an actual definition, only pointer is used */ +struct vfio_device_info; + +#endif /* VFIO_PRESENT */ + +#define RTE_VFIO_DEFAULT_CONTAINER_FD (-1) + +/** + * Setup vfio_cfg for the device identified by its address. + * It discovers the configured I/O MMU groups or sets a new one for the device. + * If a new groups is assigned, the DMA mapping is performed. + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param sysfs_base + * sysfs path prefix. + * + * @param dev_addr + * device location. + * + * @param vfio_dev_fd + * VFIO fd. + * + * @param device_info + * Device information. + * + * @return + * 0 on success. + * <0 on failure. + * >1 if the device cannot be managed this way. + */ +int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr, + int *vfio_dev_fd, struct vfio_device_info *device_info); + +/** + * Release a device mapped to a VFIO-managed I/O MMU group. + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param sysfs_base + * sysfs path prefix. + * + * @param dev_addr + * device location. + * + * @param fd + * VFIO fd. + * + * @return + * 0 on success. + * <0 on failure. + */ +int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd); + +/** + * Enable a VFIO-related kmod. + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param modname + * kernel module name. + * + * @return + * 0 on success. + * <0 on failure. + */ +int rte_vfio_enable(const char *modname); + +/** + * Check whether a VFIO-related kmod is enabled. + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param modname + * kernel module name. + * + * @return + * !0 if true. + * 0 otherwise. + */ +int rte_vfio_is_enabled(const char *modname); + +/** + * Whether VFIO NOIOMMU mode is enabled. + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @return + * !0 if true. + * 0 otherwise. + */ +int rte_vfio_noiommu_is_enabled(void); + +/** + * Remove group fd from internal VFIO group fd array/ + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param vfio_group_fd + * VFIO Group FD. + * + * @return + * 0 on success. + * <0 on failure. + */ +int +rte_vfio_clear_group(int vfio_group_fd); + +/** + * Parse IOMMU group number for a device + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param sysfs_base + * sysfs path prefix. + * + * @param dev_addr + * device location. + * + * @param iommu_group_num + * iommu group number + * + * @return + * >0 on success + * 0 for non-existent group or VFIO + * <0 for errors + */ +int +rte_vfio_get_group_num(const char *sysfs_base, + const char *dev_addr, int *iommu_group_num); + +/** + * Open a new VFIO container fd + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @return + * > 0 container fd + * < 0 for errors + */ +int +rte_vfio_get_container_fd(void); + +/** + * Open VFIO group fd or get an existing one + * + * This function is only relevant to linux and will return + * an error on BSD. + * + * @param iommu_group_num + * iommu group number + * + * @return + * > 0 group fd + * < 0 for errors + */ +int +rte_vfio_get_group_fd(int iommu_group_num); + +/** + * Create a new container for device binding. + * + * @note Any newly allocated DPDK memory will not be mapped into these + * containers by default, user needs to manage DMA mappings for + * any container created by this API. + * + * @note When creating containers using this API, the container will only be + * available in the process that has created it. Sharing containers and + * devices between multiple processes is not supported. + * + * @return + * the container fd if successful + * <0 if failed + */ +int +rte_vfio_container_create(void); + +/** + * Destroy the container, unbind all vfio groups within it. + * + * @param container_fd + * the container fd to destroy + * + * @return + * 0 if successful + * <0 if failed + */ +int +rte_vfio_container_destroy(int container_fd); + +/** + * Bind a IOMMU group to a container. + * + * @param container_fd + * the container's fd + * + * @param iommu_group_num + * the iommu group number to bind to container + * + * @return + * group fd if successful + * <0 if failed + */ +int +rte_vfio_container_group_bind(int container_fd, int iommu_group_num); + +/** + * Unbind a IOMMU group from a container. + * + * @param container_fd + * the container fd of container + * + * @param iommu_group_num + * the iommu group number to delete from container + * + * @return + * 0 if successful + * <0 if failed + */ +int +rte_vfio_container_group_unbind(int container_fd, int iommu_group_num); + +/** + * Perform DMA mapping for devices in a container. + * + * @param container_fd + * the specified container fd. Use RTE_VFIO_DEFAULT_CONTAINER_FD to + * use the default container. + * + * @param vaddr + * Starting virtual address of memory to be mapped. + * + * @param iova + * Starting IOVA address of memory to be mapped. + * + * @param len + * Length of memory segment being mapped. + * + * @return + * 0 if successful + * <0 if failed + */ +int +rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, + uint64_t iova, uint64_t len); + +/** + * Perform DMA unmapping for devices in a container. + * + * @param container_fd + * the specified container fd. Use RTE_VFIO_DEFAULT_CONTAINER_FD to + * use the default container. + * + * @param vaddr + * Starting virtual address of memory to be unmapped. + * + * @param iova + * Starting IOVA address of memory to be unmapped. + * + * @param len + * Length of memory segment being unmapped. + * + * @return + * 0 if successful + * <0 if failed + */ +int +rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, + uint64_t iova, uint64_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_VFIO_H_ */ diff --git a/lib/librte_eal/linux/eal/Makefile b/lib/librte_eal/linux/eal/Makefile index 177b7c45da..692fec2695 100644 --- a/lib/librte_eal/linux/eal/Makefile +++ b/lib/librte_eal/linux/eal/Makefile @@ -15,7 +15,7 @@ VPATH += $(RTE_SDK)/lib/librte_eal/common CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -I$(SRCDIR)/include CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include CFLAGS += $(WERROR_FLAGS) -O3 LDLIBS += -ldl diff --git a/lib/librte_eal/meson.build b/lib/librte_eal/meson.build index 1fc532139b..23ae03ad46 100644 --- a/lib/librte_eal/meson.build +++ b/lib/librte_eal/meson.build @@ -5,6 +5,8 @@ # have a straight list of headers and source files. # Initially pull in common settings eal_inc = [global_inc] +subdir('include') + subdir('common') # Now do OS/exec-env specific settings, including building kernel modules @@ -27,5 +29,5 @@ if cc.has_header('getopt.h') endif sources += env_sources objs = env_objs -headers = common_headers + env_headers +headers += env_headers includes += eal_inc diff --git a/lib/librte_kvargs/Makefile b/lib/librte_kvargs/Makefile index 419be8bd7c..24b1c3c5b9 100644 --- a/lib/librte_kvargs/Makefile +++ b/lib/librte_kvargs/Makefile @@ -7,7 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_kvargs.a CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/include EXPORT_MAP := rte_kvargs_version.map diff --git a/meson.build b/meson.build index b7ae9c8d9a..ace4a0b8bf 100644 --- a/meson.build +++ b/meson.build @@ -28,7 +28,7 @@ abi_version_file = files('ABI_VERSION') # able to be included in any file. We also store a global array of include dirs # for passing to pmdinfogen scripts global_inc = include_directories('.', 'config', - 'lib/librte_eal/common/include', + 'lib/librte_eal/include', 'lib/librte_eal/@0@/eal/include'.format(host_machine.system()), ) subdir('config')