We had some inconsistencies between functions prototypes and actual
definitions.
Let's avoid this by only adding the experimental tag to the prototypes.
Tests with gcc and clang show it is enough.
git grep -l __rte_experimental |grep \.c$ |while read file; do
sed -i -e '/^__rte_experimental$/d' $file;
sed -i -e 's/ *__rte_experimental//' $file;
sed -i -e 's/__rte_experimental *//' $file;
done
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
54 files changed:
+check_experimental_tags() { # <patch>
+ res=0
+
+ cat "$1" |awk '
+ BEGIN {
+ current_file = "";
+ ret = 0;
+ }
+ /^+++ b\// {
+ current_file = $2;
+ }
+ /^+.*__rte_experimental/ {
+ if (current_file ~ ".c$" ) {
+ print "Please only put __rte_experimental tags in " \
+ "headers ("current_file")";
+ ret = 1;
+ }
+ }
+ END {
+ exit ret;
+ }' || res=1
+
+ return $res
+}
+
number=0
range='origin/master..'
quiet=false
number=0
range='origin/master..'
quiet=false
+ ! $verbose || printf '\nChecking __rte_experimental tags:\n'
+ report=$(check_experimental_tags "$tmpinput")
+ if [ $? -ne 0 ] ; then
+ $headline_printed || print_headline "$3"
+ printf '%s\n' "$report"
+ ret=1
+ fi
+
clean_tmp_files
[ $ret -eq 0 ] && return 0
clean_tmp_files
[ $ret -eq 0 ] && return 0
To mark an API as experimental, the symbols which are desired to be exported
must be placed in an EXPERIMENTAL version block in the corresponding libraries'
version map script.
To mark an API as experimental, the symbols which are desired to be exported
must be placed in an EXPERIMENTAL version block in the corresponding libraries'
version map script.
-Secondly, the corresponding definitions of those exported functions, and
-their forward declarations (in the development header files), must be marked
-with the ``__rte_experimental`` tag (see ``rte_compat.h``).
+Secondly, the corresponding prototypes of those exported functions (in the
+development header files), must be marked with the ``__rte_experimental`` tag
+(see ``rte_compat.h``).
The DPDK build makefiles perform a check to ensure that the map file and the
C code reflect the same list of symbols.
This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API``
The DPDK build makefiles perform a check to ensure that the map file and the
C code reflect the same list of symbols.
This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API``
rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
{
dpaa2_enable_ts = enable;
rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
{
dpaa2_enable_ts = enable;
rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
{
struct ixgbe_hw *hw;
rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
{
struct ixgbe_hw *hw;
ixgbe_release_swfw_semaphore(hw, mask);
}
ixgbe_release_swfw_semaphore(hw, mask);
}
rte_pmd_ixgbe_mdio_lock(uint16_t port)
{
struct ixgbe_hw *hw;
rte_pmd_ixgbe_mdio_lock(uint16_t port)
{
struct ixgbe_hw *hw;
rte_pmd_ixgbe_mdio_unlock(uint16_t port)
{
struct rte_eth_dev *dev;
rte_pmd_ixgbe_mdio_unlock(uint16_t port)
{
struct rte_eth_dev *dev;
rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t *phy_data)
{
rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t *phy_data)
{
rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t phy_data)
{
rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t phy_data)
{
return RTE_BBDEV_MAX_DEVS;
}
return RTE_BBDEV_MAX_DEVS;
}
-struct rte_bbdev * __rte_experimental
rte_bbdev_allocate(const char *name)
{
int ret;
rte_bbdev_allocate(const char *name)
{
int ret;
rte_bbdev_release(struct rte_bbdev *bbdev)
{
uint16_t dev_id;
rte_bbdev_release(struct rte_bbdev *bbdev)
{
uint16_t dev_id;
-struct rte_bbdev * __rte_experimental
rte_bbdev_get_named_dev(const char *name)
{
unsigned int i;
rte_bbdev_get_named_dev(const char *name)
{
unsigned int i;
-uint16_t __rte_experimental
rte_bbdev_count(void)
{
return num_devs;
}
rte_bbdev_count(void)
{
return num_devs;
}
rte_bbdev_is_valid(uint16_t dev_id)
{
if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
rte_bbdev_is_valid(uint16_t dev_id)
{
if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
-uint16_t __rte_experimental
rte_bbdev_find_next(uint16_t dev_id)
{
dev_id++;
rte_bbdev_find_next(uint16_t dev_id)
{
dev_id++;
rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
{
unsigned int i;
rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
{
unsigned int i;
rte_bbdev_intr_enable(uint16_t dev_id)
{
int ret;
rte_bbdev_intr_enable(uint16_t dev_id)
{
int ret;
rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
const struct rte_bbdev_queue_conf *conf)
{
rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
const struct rte_bbdev_queue_conf *conf)
{
rte_bbdev_start(uint16_t dev_id)
{
int i;
rte_bbdev_start(uint16_t dev_id)
{
int i;
rte_bbdev_stop(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_stop(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_close(uint16_t dev_id)
{
int ret;
rte_bbdev_close(uint16_t dev_id)
{
int ret;
rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
}
rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
}
rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_stats_reset(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_stats_reset(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
struct rte_bbdev_queue_info *queue_info)
{
rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
struct rte_bbdev_queue_info *queue_info)
{
-struct rte_mempool * __rte_experimental
rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
unsigned int num_elements, unsigned int cache_size,
int socket_id)
rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
unsigned int num_elements, unsigned int cache_size,
int socket_id)
rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
return (user_cb == NULL) ? -ENOMEM : 0;
}
return (user_cb == NULL) ? -ENOMEM : 0;
}
rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
enum rte_bbdev_event_type event, void *ret_param)
{
rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
enum rte_bbdev_event_type event, void *ret_param)
{
rte_spinlock_unlock(&rte_bbdev_cb_lock);
}
rte_spinlock_unlock(&rte_bbdev_cb_lock);
}
rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return dev->dev_ops->queue_intr_enable(dev, queue_id);
}
return dev->dev_ops->queue_intr_enable(dev, queue_id);
}
rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return dev->dev_ops->queue_intr_disable(dev, queue_id);
}
return dev->dev_ops->queue_intr_disable(dev, queue_id);
}
rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
void *data)
{
rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
void *data)
{
-const char * __rte_experimental
rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
{
static const char * const op_types[] = {
rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
{
static const char * const op_types[] = {
rte_bpf_destroy(struct rte_bpf *bpf)
{
if (bpf != NULL) {
rte_bpf_destroy(struct rte_bpf *bpf)
{
if (bpf != NULL) {
rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit)
{
if (bpf == NULL || jit == NULL)
rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit)
{
if (bpf == NULL || jit == NULL)
-__rte_experimental uint32_t
rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
uint32_t num)
{
rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
uint32_t num)
{
-__rte_experimental uint64_t
rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
{
uint64_t rc;
rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
{
uint64_t rc;
-__rte_experimental struct rte_bpf *
rte_bpf_load(const struct rte_bpf_prm *prm)
{
struct rte_bpf *bpf;
rte_bpf_load(const struct rte_bpf_prm *prm)
{
struct rte_bpf *bpf;
}
#ifndef RTE_LIBRTE_BPF_ELF
}
#ifndef RTE_LIBRTE_BPF_ELF
-__rte_experimental struct rte_bpf *
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
-__rte_experimental struct rte_bpf *
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
rte_spinlock_unlock(&cbh->lock);
}
rte_spinlock_unlock(&cbh->lock);
}
rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
#include "rte_compressdev.h"
#include "rte_compressdev_internal.h"
#include "rte_compressdev.h"
#include "rte_compressdev_internal.h"
-const char * __rte_experimental
rte_comp_get_feature_name(uint64_t flag)
{
switch (flag) {
rte_comp_get_feature_name(uint64_t flag)
{
switch (flag) {
-struct rte_mempool * __rte_experimental
rte_comp_op_pool_create(const char *name,
unsigned int nb_elts, unsigned int cache_size,
uint16_t user_size, int socket_id)
rte_comp_op_pool_create(const char *name,
unsigned int nb_elts, unsigned int cache_size,
uint16_t user_size, int socket_id)
-struct rte_comp_op * __rte_experimental
rte_comp_op_alloc(struct rte_mempool *mempool)
{
struct rte_comp_op *op = NULL;
rte_comp_op_alloc(struct rte_mempool *mempool)
{
struct rte_comp_op *op = NULL;
rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
struct rte_comp_op **ops, uint16_t nb_ops)
{
rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
struct rte_comp_op **ops, uint16_t nb_ops)
{
* @param op
* Compress operation
*/
* @param op
* Compress operation
*/
rte_comp_op_free(struct rte_comp_op *op)
{
if (op != NULL && op->mempool != NULL)
rte_mempool_put(op->mempool, op);
}
rte_comp_op_free(struct rte_comp_op *op)
{
if (op != NULL && op->mempool != NULL)
rte_mempool_put(op->mempool, op);
}
rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops)
{
uint16_t i;
rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops)
{
uint16_t i;
.max_devs = RTE_COMPRESS_MAX_DEVS
};
.max_devs = RTE_COMPRESS_MAX_DEVS
};
-const struct rte_compressdev_capabilities * __rte_experimental
+const struct rte_compressdev_capabilities *
rte_compressdev_capability_get(uint8_t dev_id,
enum rte_comp_algorithm algo)
{
rte_compressdev_capability_get(uint8_t dev_id,
enum rte_comp_algorithm algo)
{
-const char * __rte_experimental
rte_compressdev_get_feature_name(uint64_t flag)
{
switch (flag) {
rte_compressdev_get_feature_name(uint64_t flag)
{
switch (flag) {
return &compressdev_globals.devs[dev_id];
}
return &compressdev_globals.devs[dev_id];
}
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_get_named_dev(const char *name)
{
struct rte_compressdev *dev;
rte_compressdev_pmd_get_named_dev(const char *name)
{
struct rte_compressdev *dev;
rte_compressdev_get_dev_id(const char *name)
{
unsigned int i;
rte_compressdev_get_dev_id(const char *name)
{
unsigned int i;
-uint8_t __rte_experimental
rte_compressdev_count(void)
{
return compressdev_globals.nb_devs;
}
rte_compressdev_count(void)
{
return compressdev_globals.nb_devs;
}
-uint8_t __rte_experimental
rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices)
{
rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices)
{
rte_compressdev_socket_id(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_socket_id(uint8_t dev_id)
{
struct rte_compressdev *dev;
return RTE_COMPRESS_MAX_DEVS;
}
return RTE_COMPRESS_MAX_DEVS;
}
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_allocate(const char *name, int socket_id)
{
struct rte_compressdev *compressdev;
rte_compressdev_pmd_allocate(const char *name, int socket_id)
{
struct rte_compressdev *compressdev;
rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
{
int ret;
rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
{
int ret;
-uint16_t __rte_experimental
rte_compressdev_queue_pair_count(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_queue_pair_count(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
{
struct rte_compressdev *dev;
rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
{
struct rte_compressdev *dev;
return (*dev->dev_ops->dev_configure)(dev, config);
}
return (*dev->dev_ops->dev_configure)(dev, config);
}
rte_compressdev_start(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_start(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_stop(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_stop(uint8_t dev_id)
{
struct rte_compressdev *dev;
dev->data->dev_started = 0;
}
dev->data->dev_started = 0;
}
rte_compressdev_close(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_close(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
uint32_t max_inflight_ops, int socket_id)
{
rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
uint32_t max_inflight_ops, int socket_id)
{
max_inflight_ops, socket_id);
}
max_inflight_ops, socket_id);
}
-uint16_t __rte_experimental
rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
-uint16_t __rte_experimental
rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
dev->data->queue_pairs[qp_id], ops, nb_ops);
}
dev->data->queue_pairs[qp_id], ops, nb_ops);
}
rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
{
struct rte_compressdev *dev;
rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
{
struct rte_compressdev *dev;
rte_compressdev_stats_reset(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_stats_reset(uint8_t dev_id)
{
struct rte_compressdev *dev;
rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
{
struct rte_compressdev *dev;
rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
{
struct rte_compressdev *dev;
dev_info->driver_name = dev->device->driver->name;
}
dev_info->driver_name = dev->device->driver->name;
}
rte_compressdev_private_xform_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **priv_xform)
rte_compressdev_private_xform_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **priv_xform)
rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
{
struct rte_compressdev *dev;
rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
{
struct rte_compressdev *dev;
rte_compressdev_stream_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **stream)
rte_compressdev_stream_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **stream)
rte_compressdev_stream_free(uint8_t dev_id, void *stream)
{
struct rte_compressdev *dev;
rte_compressdev_stream_free(uint8_t dev_id, void *stream)
{
struct rte_compressdev *dev;
-const char * __rte_experimental
rte_compressdev_name_get(uint8_t dev_id)
{
struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
rte_compressdev_name_get(uint8_t dev_id)
{
struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
rte_compressdev_pmd_parse_input_args(
struct rte_compressdev_pmd_init_params *params,
const char *args)
rte_compressdev_pmd_parse_input_args(
struct rte_compressdev_pmd_init_params *params,
const char *args)
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_create(const char *name,
struct rte_device *device,
size_t private_data_size,
rte_compressdev_pmd_create(const char *name,
struct rte_device *device,
size_t private_data_size,
rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
{
int retval;
rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
{
int retval;
rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
const char *xform_string)
{
rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
const char *xform_string)
{
-const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
+const struct rte_cryptodev_asymmetric_xform_capability *
rte_cryptodev_asym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_asym_capability_idx *idx)
{
rte_cryptodev_asym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_asym_capability_idx *idx)
{
rte_cryptodev_asym_xform_capability_check_optype(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
enum rte_crypto_asym_op_type op_type)
rte_cryptodev_asym_xform_capability_check_optype(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
enum rte_crypto_asym_op_type op_type)
rte_cryptodev_asym_xform_capability_check_modlen(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
uint16_t modlen)
rte_cryptodev_asym_xform_capability_check_modlen(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
uint16_t modlen)
rte_cryptodev_asym_session_init(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess,
struct rte_crypto_asym_xform *xforms,
rte_cryptodev_asym_session_init(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess,
struct rte_crypto_asym_xform *xforms,
-struct rte_mempool * __rte_experimental
rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
int socket_id)
rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
int socket_id)
-struct rte_cryptodev_asym_session * __rte_experimental
+struct rte_cryptodev_asym_session *
rte_cryptodev_asym_session_create(struct rte_mempool *mp)
{
struct rte_cryptodev_asym_session *sess;
rte_cryptodev_asym_session_create(struct rte_mempool *mp)
{
struct rte_cryptodev_asym_session *sess;
rte_cryptodev_asym_session_clear(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess)
{
rte_cryptodev_asym_session_clear(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess)
{
rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
{
uint8_t i;
rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
{
uint8_t i;
rte_cryptodev_sym_session_data_size(&s));
}
rte_cryptodev_sym_session_data_size(&s));
}
-unsigned int __rte_experimental
rte_cryptodev_sym_get_existing_header_session_size(
struct rte_cryptodev_sym_session *sess)
{
rte_cryptodev_sym_get_existing_header_session_size(
struct rte_cryptodev_sym_session *sess)
{
rte_cryptodev_sym_session_data_size(sess));
}
rte_cryptodev_sym_session_data_size(sess));
}
-unsigned int __rte_experimental
rte_cryptodev_asym_get_header_session_size(void)
{
/*
rte_cryptodev_asym_get_header_session_size(void)
{
/*
-unsigned int __rte_experimental
rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
{
struct rte_cryptodev *dev;
rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
{
struct rte_cryptodev *dev;
rte_cryptodev_sym_session_set_user_data(
struct rte_cryptodev_sym_session *sess,
void *data,
rte_cryptodev_sym_session_set_user_data(
struct rte_cryptodev_sym_session *sess,
void *data,
-void * __rte_experimental
rte_cryptodev_sym_session_get_user_data(
struct rte_cryptodev_sym_session *sess)
{
rte_cryptodev_sym_session_get_user_data(
struct rte_cryptodev_sym_session *sess)
{
static struct rte_class_list rte_class_list =
TAILQ_HEAD_INITIALIZER(rte_class_list);
static struct rte_class_list rte_class_list =
TAILQ_HEAD_INITIALIZER(rte_class_list);
rte_class_register(struct rte_class *class)
{
RTE_VERIFY(class);
rte_class_register(struct rte_class *class)
{
RTE_VERIFY(class);
RTE_LOG(DEBUG, EAL, "Registered [%s] device class.\n", class->name);
}
RTE_LOG(DEBUG, EAL, "Registered [%s] device class.\n", class->name);
}
rte_class_unregister(struct rte_class *class)
{
TAILQ_REMOVE(&rte_class_list, class, next);
RTE_LOG(DEBUG, EAL, "Unregistered [%s] device class.\n", class->name);
}
rte_class_unregister(struct rte_class *class)
{
TAILQ_REMOVE(&rte_class_list, class, next);
RTE_LOG(DEBUG, EAL, "Unregistered [%s] device class.\n", class->name);
}
struct rte_class *
rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
const void *data)
struct rte_class *
rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
const void *data)
return strcmp(class->name, name);
}
return strcmp(class->name, name);
}
struct rte_class *
rte_class_find_by_name(const char *name)
{
struct rte_class *
rte_class_find_by_name(const char *name)
{
rte_dev_event_callback_register(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
rte_dev_event_callback_register(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
rte_dev_event_callback_unregister(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
rte_dev_event_callback_unregister(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
rte_dev_event_callback_process(const char *device_name,
enum rte_dev_event_type event)
{
rte_dev_event_callback_process(const char *device_name,
enum rte_dev_event_type event)
{
rte_spinlock_unlock(&dev_event_lock);
}
rte_spinlock_unlock(&dev_event_lock);
}
int
rte_dev_iterator_init(struct rte_dev_iterator *it,
const char *dev_str)
int
rte_dev_iterator_init(struct rte_dev_iterator *it,
const char *dev_str)
it->device = dev;
return dev == NULL;
}
it->device = dev;
return dev == NULL;
}
struct rte_device *
rte_dev_iterator_next(struct rte_dev_iterator *it)
{
struct rte_device *
rte_dev_iterator_next(struct rte_dev_iterator *it)
{
rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
unsigned int elt_sz)
{
rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
unsigned int elt_sz)
{
rte_fbarray_attach(struct rte_fbarray *arr)
{
struct mem_area *ma = NULL, *tmp = NULL;
rte_fbarray_attach(struct rte_fbarray *arr)
{
struct mem_area *ma = NULL, *tmp = NULL;
rte_fbarray_detach(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
rte_fbarray_detach(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
rte_fbarray_destroy(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
rte_fbarray_destroy(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
-void * __rte_experimental
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
void *ret = NULL;
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
void *ret = NULL;
rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, true);
}
rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, true);
}
rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, false);
}
rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, false);
}
rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
{
struct used_mask *msk;
rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
{
struct used_mask *msk;
rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, false);
}
rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, false);
}
rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, true);
}
rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, true);
}
rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, false);
}
rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, false);
}
rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, true);
rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, true);
rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, false);
}
rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, false);
}
rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, true);
}
rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, true);
}
rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, false, false);
}
rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, false, false);
}
rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, false);
}
rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, false);
}
rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, false);
}
rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, false);
}
rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, true);
}
rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, true);
}
rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, true);
}
rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, true);
}
rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, false);
}
rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, false);
}
rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, true);
}
rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, true);
}
rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, false);
}
rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, false);
}
rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, true);
}
rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, true);
}
rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
{
void *end;
rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
{
void *end;
rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
{
struct used_mask *msk;
rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
{
struct used_mask *msk;
}
/* Register an extended log type and try to pick its level from EAL options */
}
/* Register an extended log type and try to pick its level from EAL options */
rte_log_register_type_and_pick_level(const char *name, uint32_t level_def)
{
struct rte_eal_opt_loglevel *opt_ll;
rte_log_register_type_and_pick_level(const char *name, uint32_t level_def)
{
struct rte_eal_opt_loglevel *opt_ll;
-__rte_experimental struct rte_memseg_list *
+struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *addr)
{
return virt2memseg_list(addr);
rte_mem_virt2memseg_list(const void *addr)
{
return virt2memseg_list(addr);
-__rte_experimental void *
rte_mem_iova2virt(rte_iova_t iova)
{
struct virtiova vi;
rte_mem_iova2virt(rte_iova_t iova)
{
struct virtiova vi;
-__rte_experimental struct rte_memseg *
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
return virt2memseg(addr, msl != NULL ? msl :
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
return virt2memseg(addr, msl != NULL ? msl :
* Defining here because declared in rte_memory.h, but the actual implementation
* is in eal_common_memalloc.c, like all other memalloc internals.
*/
* Defining here because declared in rte_memory.h, but the actual implementation
* is in eal_common_memalloc.c, like all other memalloc internals.
*/
rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
void *arg)
{
rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
void *arg)
{
return eal_memalloc_mem_event_callback_register(name, clb, arg);
}
return eal_memalloc_mem_event_callback_register(name, clb, arg);
}
rte_mem_event_callback_unregister(const char *name, void *arg)
{
/* FreeBSD boots with legacy mem enabled by default */
rte_mem_event_callback_unregister(const char *name, void *arg)
{
/* FreeBSD boots with legacy mem enabled by default */
return eal_memalloc_mem_event_callback_unregister(name, arg);
}
return eal_memalloc_mem_event_callback_unregister(name, arg);
}
rte_mem_alloc_validator_register(const char *name,
rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
{
rte_mem_alloc_validator_register(const char *name,
rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
{
rte_mem_alloc_validator_unregister(const char *name, int socket_id)
{
/* FreeBSD boots with legacy mem enabled by default */
rte_mem_alloc_validator_unregister(const char *name, int socket_id)
{
/* FreeBSD boots with legacy mem enabled by default */
rte_mem_check_dma_mask(uint8_t maskbits)
{
return check_dma_mask(maskbits, false);
}
rte_mem_check_dma_mask(uint8_t maskbits)
{
return check_dma_mask(maskbits, false);
}
rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
{
return check_dma_mask(maskbits, true);
rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
{
return check_dma_mask(maskbits, true);
* initialization. PMDs should use rte_mem_check_dma_mask if addressing
* limitations by the device.
*/
* initialization. PMDs should use rte_mem_check_dma_mask if addressing
* limitations by the device.
*/
rte_mem_set_dma_mask(uint8_t maskbits)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_mem_set_dma_mask(uint8_t maskbits)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return mlock((void *)aligned, page_size);
}
return mlock((void *)aligned, page_size);
}
rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_walk(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_walk(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
size_t *offset)
{
rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
size_t *offset)
{
rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
unsigned int n_pages, size_t page_sz)
{
rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
unsigned int n_pages, size_t page_sz)
{
rte_extmem_unregister(void *va_addr, size_t len)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_extmem_unregister(void *va_addr, size_t len)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_extmem_attach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, true);
}
rte_extmem_attach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, true);
}
rte_extmem_detach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, false);
rte_extmem_detach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, false);
rte_mp_action_register(const char *name, rte_mp_t action)
{
struct action_entry *entry;
rte_mp_action_register(const char *name, rte_mp_t action)
{
struct action_entry *entry;
rte_mp_action_unregister(const char *name)
{
struct action_entry *entry;
rte_mp_action_unregister(const char *name)
{
struct action_entry *entry;
rte_mp_sendmsg(struct rte_mp_msg *msg)
{
if (check_input(msg) != 0)
rte_mp_sendmsg(struct rte_mp_msg *msg)
{
if (check_input(msg) != 0)
rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts)
{
rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts)
{
rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
rte_mp_async_reply_t clb)
{
rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
rte_mp_async_reply_t clb)
{
rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
{
RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
{
RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
rte_delay_us_sleep(unsigned int us)
{
struct timespec wait[2];
rte_delay_us_sleep(unsigned int us)
{
struct timespec wait[2];
/*
* Function to dump contents of all heaps
*/
/*
* Function to dump contents of all heaps
*/
rte_malloc_dump_heaps(FILE *f)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_malloc_dump_heaps(FILE *f)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int
rte_option_register(struct rte_option *opt)
{
int
rte_option_register(struct rte_option *opt)
{
return __rte_rand_lfsr258(state);
}
return __rte_rand_lfsr258(state);
}
-uint64_t __rte_experimental
rte_rand_max(uint64_t upper_bound)
{
struct rte_rand_state *state;
rte_rand_max(uint64_t upper_bound)
{
struct rte_rand_state *state;
-int32_t __rte_experimental
rte_service_may_be_active(uint32_t id)
{
uint32_t ids[RTE_MAX_LCORE] = {0};
rte_service_may_be_active(uint32_t id)
{
uint32_t ids[RTE_MAX_LCORE] = {0};
-int32_t __rte_experimental
rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
uint64_t *attr_value)
{
rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
uint64_t *attr_value)
{
-int32_t __rte_experimental
rte_service_lcore_attr_reset_all(uint32_t lcore)
{
struct core_state *cs;
rte_service_lcore_attr_reset_all(uint32_t lcore)
{
struct core_state *cs;
rte_eal_cleanup(void)
{
rte_service_finalize();
rte_eal_cleanup(void)
{
rte_service_finalize();
#include <rte_compat.h>
#include <rte_dev.h>
#include <rte_compat.h>
#include <rte_dev.h>
rte_dev_event_monitor_start(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_event_monitor_start(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_event_monitor_stop(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_event_monitor_stop(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_hotplug_handle_enable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_hotplug_handle_enable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
rte_dev_hotplug_handle_disable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
rte_dev_hotplug_handle_disable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
rte_spinlock_unlock(&failure_handle_lock);
}
rte_spinlock_unlock(&failure_handle_lock);
}
rte_dev_event_monitor_start(void)
{
int ret;
rte_dev_event_monitor_start(void)
{
int ret;
rte_dev_event_monitor_stop(void)
{
int ret;
rte_dev_event_monitor_stop(void)
{
int ret;
rte_dev_hotplug_handle_enable(void)
{
int ret = 0;
rte_dev_hotplug_handle_enable(void)
{
int ret = 0;
rte_dev_hotplug_handle_disable(void)
{
int ret = 0;
rte_dev_hotplug_handle_disable(void)
{
int ret = 0;
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
rte_eth_dev_owner_new(uint64_t *owner_id)
{
rte_eth_dev_shared_data_prepare();
rte_eth_dev_owner_new(uint64_t *owner_id)
{
rte_eth_dev_shared_data_prepare();
rte_eth_dev_owner_set(const uint16_t port_id,
const struct rte_eth_dev_owner *owner)
{
rte_eth_dev_owner_set(const uint16_t port_id,
const struct rte_eth_dev_owner *owner)
{
rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
{
const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
{
const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
rte_eth_dev_owner_delete(const uint64_t owner_id)
{
uint16_t port_id;
rte_eth_dev_owner_delete(const uint64_t owner_id)
{
uint16_t port_id;
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
}
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
}
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
int ret = 0;
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
int ret = 0;
return eth_err(port_id, ret);
}
return eth_err(port_id, ret);
}
rte_eth_dev_is_removed(uint16_t port_id)
{
struct rte_eth_dev *dev;
rte_eth_dev_is_removed(uint16_t port_id)
{
struct rte_eth_dev *dev;
rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
{
struct rte_intr_handle *intr_handle;
rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
{
struct rte_intr_handle *intr_handle;
RTE_MEMZONE_IOVA_CONTIG, align);
}
RTE_MEMZONE_IOVA_CONTIG, align);
}
rte_eth_dev_create(struct rte_device *device, const char *name,
size_t priv_data_size,
ethdev_bus_specific_init ethdev_bus_specific_init,
rte_eth_dev_create(struct rte_device *device, const char *name,
size_t priv_data_size,
ethdev_bus_specific_init ethdev_bus_specific_init,
rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
ethdev_uninit_t ethdev_uninit)
{
rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
ethdev_uninit_t ethdev_uninit)
{
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
rte_eth_dev_get_module_info(uint16_t port_id,
struct rte_eth_dev_module_info *modinfo)
{
rte_eth_dev_get_module_info(uint16_t port_id,
struct rte_eth_dev_module_info *modinfo)
{
return (*dev->dev_ops->get_module_info)(dev, modinfo);
}
return (*dev->dev_ops->get_module_info)(dev, modinfo);
}
rte_eth_dev_get_module_eeprom(uint16_t port_id,
struct rte_dev_eeprom_info *info)
{
rte_eth_dev_get_module_eeprom(uint16_t port_id,
struct rte_dev_eeprom_info *info)
{
enum rte_eth_switch_domain_state state;
} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
enum rte_eth_switch_domain_state state;
} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
rte_eth_switch_domain_alloc(uint16_t *domain_id)
{
unsigned int i;
rte_eth_switch_domain_alloc(uint16_t *domain_id)
{
unsigned int i;
rte_eth_switch_domain_free(uint16_t domain_id)
{
if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
rte_eth_switch_domain_free(uint16_t domain_id)
{
if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
{
struct rte_kvargs args;
rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
{
struct rte_kvargs args;
* Expand RSS flows into several possible flows according to the RSS hash
* fields requested and the driver capabilities.
*/
* Expand RSS flows into several possible flows according to the RSS hash
* fields requested and the driver capabilities.
*/
rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
const struct rte_flow_item *pattern, uint64_t types,
const struct rte_flow_expand_node graph[],
rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
const struct rte_flow_item *pattern, uint64_t types,
const struct rte_flow_expand_node graph[],
})
/* MTR capabilities get */
})
/* MTR capabilities get */
rte_mtr_capabilities_get(uint16_t port_id,
struct rte_mtr_capabilities *cap,
struct rte_mtr_error *error)
rte_mtr_capabilities_get(uint16_t port_id,
struct rte_mtr_capabilities *cap,
struct rte_mtr_error *error)
}
/* MTR meter profile add */
}
/* MTR meter profile add */
rte_mtr_meter_profile_add(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_meter_profile *profile,
rte_mtr_meter_profile_add(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_meter_profile *profile,
}
/** MTR meter profile delete */
}
/** MTR meter profile delete */
rte_mtr_meter_profile_delete(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_error *error)
rte_mtr_meter_profile_delete(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_error *error)
}
/** MTR object create */
}
/** MTR object create */
rte_mtr_create(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_params *params,
rte_mtr_create(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_params *params,
}
/** MTR object destroy */
}
/** MTR object destroy */
rte_mtr_destroy(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
rte_mtr_destroy(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter enable */
}
/** MTR object meter enable */
rte_mtr_meter_enable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
rte_mtr_meter_enable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter disable */
}
/** MTR object meter disable */
rte_mtr_meter_disable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
rte_mtr_meter_disable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter profile update */
}
/** MTR object meter profile update */
rte_mtr_meter_profile_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t meter_profile_id,
rte_mtr_meter_profile_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t meter_profile_id,
}
/** MTR object meter DSCP table update */
}
/** MTR object meter DSCP table update */
rte_mtr_meter_dscp_table_update(uint16_t port_id,
uint32_t mtr_id,
enum rte_color *dscp_table,
rte_mtr_meter_dscp_table_update(uint16_t port_id,
uint32_t mtr_id,
enum rte_color *dscp_table,
}
/** MTR object policer action update */
}
/** MTR object policer action update */
rte_mtr_policer_actions_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t action_mask,
rte_mtr_policer_actions_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t action_mask,
}
/** MTR object enabled stats update */
}
/** MTR object enabled stats update */
rte_mtr_stats_update(uint16_t port_id,
uint32_t mtr_id,
uint64_t stats_mask,
rte_mtr_stats_update(uint16_t port_id,
uint32_t mtr_id,
uint64_t stats_mask,
}
/** MTR object stats read */
}
/** MTR object stats read */
rte_mtr_stats_read(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_stats *stats,
rte_mtr_stats_read(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_stats *stats,
return rxa_ctrl(id, 0);
}
return rxa_ctrl(id, 0);
}
rte_event_eth_rx_adapter_stats_get(uint8_t id,
struct rte_event_eth_rx_adapter_stats *stats)
{
rte_event_eth_rx_adapter_stats_get(uint8_t id,
struct rte_event_eth_rx_adapter_stats *stats)
{
return rx_adapter->service_inited ? 0 : -ESRCH;
}
return rx_adapter->service_inited ? 0 : -ESRCH;
}
rte_event_eth_rx_adapter_cb_register(uint8_t id,
uint16_t eth_dev_id,
rte_event_eth_rx_adapter_cb_fn cb_fn,
rte_event_eth_rx_adapter_cb_register(uint8_t id,
uint16_t eth_dev_id,
rte_event_eth_rx_adapter_cb_fn cb_fn,
void *entry_ptr; /* handle to the table entry for rule meta data */
};
void *entry_ptr; /* handle to the table entry for rule meta data */
};
rte_flow_classify_validate(
struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
rte_flow_classify_validate(
struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
-struct rte_flow_classifier * __rte_experimental
+struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params *params)
{
struct rte_flow_classifier *cls;
rte_flow_classifier_create(struct rte_flow_classifier_params *params)
{
struct rte_flow_classifier *cls;
table->ops.f_free(table->h_table);
}
table->ops.f_free(table->h_table);
}
rte_flow_classifier_free(struct rte_flow_classifier *cls)
{
uint32_t i;
rte_flow_classifier_free(struct rte_flow_classifier *cls)
{
uint32_t i;
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params)
{
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params)
{
-struct rte_flow_classify_rule * __rte_experimental
+struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
struct rte_flow_classify_rule *rule)
{
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
struct rte_flow_classify_rule *rule)
{
rte_flow_classifier_query(struct rte_flow_classifier *cls,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
rte_flow_classifier_query(struct rte_flow_classifier *cls,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
rte_hash_free_key_with_position(const struct rte_hash *h,
const int32_t position)
{
rte_hash_free_key_with_position(const struct rte_hash *h,
const int32_t position)
{
}
/* Delete expired fragments */
}
/* Delete expired fragments */
rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, uint64_t tms)
{
rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, uint64_t tms)
{
-uint64_t __rte_experimental
rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
{
return sa->type;
rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
{
return sa->type;
rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
{
memset(sa, 0, sa->size);
rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
{
memset(sa, 0, sa->size);
((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
}
((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
}
rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
{
uint64_t type;
rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
{
uint64_t type;
return ipsec_sa_size(type, &wsz, &nb);
}
return ipsec_sa_size(type, &wsz, &nb);
}
rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
uint32_t size)
{
rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
uint32_t size)
{
rte_ipsec_session_prepare(struct rte_ipsec_session *ss)
{
int32_t rc;
rte_ipsec_session_prepare(struct rte_ipsec_session *ss)
{
int32_t rc;
rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
{
char path[64];
rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
{
char path[64];
struct rte_kvargs *
rte_kvargs_parse_delim(const char *args, const char * const valid_keys[],
const char *valid_ends)
struct rte_kvargs *
rte_kvargs_parse_delim(const char *args, const char * const valid_keys[],
const char *valid_ends)
int
rte_kvargs_strcmp(const char *key __rte_unused,
const char *value, void *opaque)
int
rte_kvargs_strcmp(const char *key __rte_unused,
const char *value, void *opaque)
rte_panic("%s\n", reason);
}
rte_panic("%s\n", reason);
}
int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
const char **reason)
{
int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
const char **reason)
{
rte_meter_trtcm_rfc4115_profile_config(
struct rte_meter_trtcm_rfc4115_profile *p,
struct rte_meter_trtcm_rfc4115_params *params)
rte_meter_trtcm_rfc4115_profile_config(
struct rte_meter_trtcm_rfc4115_profile *p,
struct rte_meter_trtcm_rfc4115_params *params)
rte_meter_trtcm_rfc4115_config(
struct rte_meter_trtcm_rfc4115 *m,
struct rte_meter_trtcm_rfc4115_profile *p)
rte_meter_trtcm_rfc4115_config(
struct rte_meter_trtcm_rfc4115 *m,
struct rte_meter_trtcm_rfc4115_profile *p)
#include <rte_arp.h>
#define RARP_PKT_SIZE 64
#include <rte_arp.h>
#define RARP_PKT_SIZE 64
-struct rte_mbuf * __rte_experimental
rte_net_make_rarp_packet(struct rte_mempool *mpool,
const struct rte_ether_addr *mac)
{
rte_net_make_rarp_packet(struct rte_mempool *mpool,
const struct rte_ether_addr *mac)
{
}
/* parse ipv6 extended headers, update offset and return next proto */
}
/* parse ipv6 extended headers, update offset and return next proto */
rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
int *frag)
{
rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
int *frag)
{
rte_empty_poll_detection(struct rte_timer *tim, void *arg)
{
rte_empty_poll_detection(struct rte_timer *tim, void *arg)
{
rte_power_empty_poll_stat_init(struct ep_params **eptr, uint8_t *freq_tlb,
struct ep_policy *policy)
{
rte_power_empty_poll_stat_init(struct ep_params **eptr, uint8_t *freq_tlb,
struct ep_policy *policy)
{
rte_power_empty_poll_stat_free(void)
{
rte_power_empty_poll_stat_free(void)
{
rte_power_empty_poll_stat_update(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
rte_power_empty_poll_stat_update(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
rte_power_poll_stat_update(unsigned int lcore_id, uint8_t nb_pkt)
{
rte_power_poll_stat_update(unsigned int lcore_id, uint8_t nb_pkt)
{
-uint64_t __rte_experimental
rte_power_empty_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
rte_power_empty_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
return poll_stats->empty_dequeues;
}
return poll_stats->empty_dequeues;
}
-uint64_t __rte_experimental
rte_power_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
rte_power_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
#include "rte_rcu_qsbr.h"
/* Get the memory size of QSBR variable */
#include "rte_rcu_qsbr.h"
/* Get the memory size of QSBR variable */
-size_t __rte_experimental
rte_rcu_qsbr_get_memsize(uint32_t max_threads)
{
size_t sz;
rte_rcu_qsbr_get_memsize(uint32_t max_threads)
{
size_t sz;
}
/* Initialize a quiescent state variable */
}
/* Initialize a quiescent state variable */
rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
{
size_t sz;
rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
{
size_t sz;
/* Register a reader thread to report its quiescent state
* on a QS variable.
*/
/* Register a reader thread to report its quiescent state
* on a QS variable.
*/
rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
/* Remove a reader thread, from the list of threads reporting their
* quiescent state on a QS variable.
*/
/* Remove a reader thread, from the list of threads reporting their
* quiescent state on a QS variable.
*/
rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
}
/* Wait till the reader threads have entered quiescent state. */
}
/* Wait till the reader threads have entered quiescent state. */
rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
uint64_t t;
rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
uint64_t t;
}
/* Dump the details of a single quiescent state variable to a file. */
}
/* Dump the details of a single quiescent state variable to a file. */
rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
{
uint64_t bmap;
rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
{
uint64_t bmap;
rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
struct rte_sched_pipe_params *params,
uint32_t *pipe_profile_id)
rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
struct rte_sched_pipe_params *params,
uint32_t *pipe_profile_id)
rte_security_session_update(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_session_conf *conf)
rte_security_session_update(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_session_conf *conf)
return instance->ops->session_get_size(instance->device);
}
return instance->ops->session_get_size(instance->device);
}
rte_security_session_stats_get(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_stats *stats)
rte_security_session_stats_get(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_stats *stats)
-void * __rte_experimental
rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
{
void *userdata = NULL;
rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
{
void *userdata = NULL;
-int32_t __rte_experimental
rte_telemetry_init()
{
int ret;
rte_telemetry_init()
{
int ret;
-int32_t __rte_experimental
rte_telemetry_cleanup(void)
{
int ret;
rte_telemetry_cleanup(void)
{
int ret;
-int32_t __rte_experimental
rte_telemetry_selftest(void)
{
const char *invalid_client_path = SELFTEST_INVALID_CLIENT;
rte_telemetry_selftest(void)
{
const char *invalid_client_path = SELFTEST_INVALID_CLIENT;
-int32_t __rte_experimental
rte_telemetry_parse(struct telemetry_impl *telemetry, char *socket_rx_data)
{
int ret, action_int;
rte_telemetry_parse(struct telemetry_impl *telemetry, char *socket_rx_data)
{
int ret, action_int;
timer_data = &rte_timer_data_arr[id]; \
} while (0)
timer_data = &rte_timer_data_arr[id]; \
} while (0)
rte_timer_data_alloc(uint32_t *id_ptr)
{
int i;
rte_timer_data_alloc(uint32_t *id_ptr)
{
int i;
rte_timer_data_dealloc(uint32_t id)
{
struct rte_timer_data *timer_data;
rte_timer_data_dealloc(uint32_t id)
{
struct rte_timer_data *timer_data;
rte_timer_subsystem_init_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05);
rte_timer_subsystem_init_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05);
rte_timer_subsystem_finalize(void)
{
if (!rte_timer_subsystem_initialized)
rte_timer_subsystem_finalize(void)
{
if (!rte_timer_subsystem_initialized)
rte_timer_reset_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05);
rte_timer_reset_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05);
rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
uint64_t ticks, enum rte_timer_type type,
unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
uint64_t ticks, enum rte_timer_type type,
unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
rte_timer_stop_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05);
rte_timer_stop_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05);
rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
{
struct rte_timer_data *timer_data;
rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
{
struct rte_timer_data *timer_data;
MAP_STATIC_SYMBOL(int rte_timer_manage(void), rte_timer_manage_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05);
MAP_STATIC_SYMBOL(int rte_timer_manage(void), rte_timer_manage_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05);
rte_timer_alt_manage(uint32_t timer_data_id,
unsigned int *poll_lcores,
int nb_poll_lcores,
rte_timer_alt_manage(uint32_t timer_data_id,
unsigned int *poll_lcores,
int nb_poll_lcores,
}
/* Walk pending lists, stopping timers and calling user-specified function */
}
/* Walk pending lists, stopping timers and calling user-specified function */
rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
int nb_walk_lcores,
rte_timer_stop_all_cb_t f, void *f_arg)
rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
int nb_walk_lcores,
rte_timer_stop_all_cb_t f, void *f_arg)
rte_timer_dump_stats_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05);
rte_timer_dump_stats_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05);
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
{
struct rte_timer_data *timer_data;
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
{
struct rte_timer_data *timer_data;
return vdpa_device_num;
}
return vdpa_device_num;
}
rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
{
struct virtio_net *dev = get_device(vid);
rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
{
struct virtio_net *dev = get_device(vid);
rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
struct rte_mempool *sess_pool,
struct rte_mempool *sess_priv_pool,
rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
struct rte_mempool *sess_pool,
struct rte_mempool *sess_priv_pool,
rte_vhost_crypto_free(int vid)
{
struct virtio_net *dev = get_device(vid);
rte_vhost_crypto_free(int vid)
{
struct virtio_net *dev = get_device(vid);
rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
{
struct virtio_net *dev = get_device(vid);
rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
{
struct virtio_net *dev = get_device(vid);
-uint16_t __rte_experimental
rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
-uint16_t __rte_experimental
rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
{
rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
{