return $res
}
+check_experimental_tags() { # <patch>
+ res=0
+
+ cat "$1" |awk '
+ BEGIN {
+ current_file = "";
+ ret = 0;
+ }
+ /^+++ b\// {
+ current_file = $2;
+ }
+ /^+.*__rte_experimental/ {
+ if (current_file ~ ".c$" ) {
+ print "Please only put __rte_experimental tags in " \
+ "headers ("current_file")";
+ ret = 1;
+ }
+ }
+ END {
+ exit ret;
+ }' || res=1
+
+ return $res
+}
+
number=0
range='origin/master..'
quiet=false
ret=1
fi
+ ! $verbose || printf '\nChecking __rte_experimental tags:\n'
+ report=$(check_experimental_tags "$tmpinput")
+ if [ $? -ne 0 ] ; then
+ $headline_printed || print_headline "$3"
+ printf '%s\n' "$report"
+ ret=1
+ fi
+
clean_tmp_files
[ $ret -eq 0 ] && return 0
To mark an API as experimental, the symbols which are desired to be exported
must be placed in an EXPERIMENTAL version block in the corresponding libraries'
version map script.
-Secondly, the corresponding definitions of those exported functions, and
-their forward declarations (in the development header files), must be marked
-with the ``__rte_experimental`` tag (see ``rte_compat.h``).
+Secondly, the corresponding prototypes of those exported functions (in the
+development header files), must be marked with the ``__rte_experimental`` tag
+(see ``rte_compat.h``).
The DPDK build makefiles perform a check to ensure that the map file and the
C code reflect the same list of symbols.
This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API``
int dpaa2_logtype_pmd;
-__rte_experimental void
+void
rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
{
dpaa2_enable_ts = enable;
return 0;
}
-int __rte_experimental
+int
rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
{
struct ixgbe_hw *hw;
ixgbe_release_swfw_semaphore(hw, mask);
}
-int __rte_experimental
+int
rte_pmd_ixgbe_mdio_lock(uint16_t port)
{
struct ixgbe_hw *hw;
return IXGBE_SUCCESS;
}
-int __rte_experimental
+int
rte_pmd_ixgbe_mdio_unlock(uint16_t port)
{
struct rte_eth_dev *dev;
return IXGBE_SUCCESS;
}
-int __rte_experimental
+int
rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t *phy_data)
{
return 0;
}
-int __rte_experimental
+int
rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
uint32_t dev_type, uint16_t phy_data)
{
return RTE_BBDEV_MAX_DEVS;
}
-struct rte_bbdev * __rte_experimental
+struct rte_bbdev *
rte_bbdev_allocate(const char *name)
{
int ret;
return bbdev;
}
-int __rte_experimental
+int
rte_bbdev_release(struct rte_bbdev *bbdev)
{
uint16_t dev_id;
return 0;
}
-struct rte_bbdev * __rte_experimental
+struct rte_bbdev *
rte_bbdev_get_named_dev(const char *name)
{
unsigned int i;
return NULL;
}
-uint16_t __rte_experimental
+uint16_t
rte_bbdev_count(void)
{
return num_devs;
}
-bool __rte_experimental
+bool
rte_bbdev_is_valid(uint16_t dev_id)
{
if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
return false;
}
-uint16_t __rte_experimental
+uint16_t
rte_bbdev_find_next(uint16_t dev_id)
{
dev_id++;
return dev_id;
}
-int __rte_experimental
+int
rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
{
unsigned int i;
return ret;
}
-int __rte_experimental
+int
rte_bbdev_intr_enable(uint16_t dev_id)
{
int ret;
return -ENOTSUP;
}
-int __rte_experimental
+int
rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
const struct rte_bbdev_queue_conf *conf)
{
return 0;
}
-int __rte_experimental
+int
rte_bbdev_start(uint16_t dev_id)
{
int i;
return 0;
}
-int __rte_experimental
+int
rte_bbdev_stop(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_bbdev_close(uint16_t dev_id)
{
int ret;
return 0;
}
-int __rte_experimental
+int
rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
}
-int __rte_experimental
+int
rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
{
struct rte_bbdev *dev = get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_bbdev_stats_reset(uint16_t dev_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
{
struct rte_bbdev *dev = get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
struct rte_bbdev_queue_info *queue_info)
{
}
}
-struct rte_mempool * __rte_experimental
+struct rte_mempool *
rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
unsigned int num_elements, unsigned int cache_size,
int socket_id)
return mp;
}
-int __rte_experimental
+int
rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
return (user_cb == NULL) ? -ENOMEM : 0;
}
-int __rte_experimental
+int
rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
rte_bbdev_cb_fn cb_fn, void *cb_arg)
{
return ret;
}
-void __rte_experimental
+void
rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
enum rte_bbdev_event_type event, void *ret_param)
{
rte_spinlock_unlock(&rte_bbdev_cb_lock);
}
-int __rte_experimental
+int
rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return dev->dev_ops->queue_intr_enable(dev, queue_id);
}
-int __rte_experimental
+int
rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
{
struct rte_bbdev *dev = get_dev(dev_id);
return dev->dev_ops->queue_intr_disable(dev, queue_id);
}
-int __rte_experimental
+int
rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
void *data)
{
}
-const char * __rte_experimental
+const char *
rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
{
static const char * const op_types[] = {
int rte_bpf_logtype;
-__rte_experimental void
+void
rte_bpf_destroy(struct rte_bpf *bpf)
{
if (bpf != NULL) {
}
}
-__rte_experimental int
+int
rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit)
{
if (bpf == NULL || jit == NULL)
return 0;
}
-__rte_experimental uint32_t
+uint32_t
rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
uint32_t num)
{
return i;
}
-__rte_experimental uint64_t
+uint64_t
rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
{
uint64_t rc;
return 0;
}
-__rte_experimental struct rte_bpf *
+struct rte_bpf *
rte_bpf_load(const struct rte_bpf_prm *prm)
{
struct rte_bpf *bpf;
}
#ifndef RTE_LIBRTE_BPF_ELF
-__rte_experimental struct rte_bpf *
+struct rte_bpf *
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
return bpf;
}
-__rte_experimental struct rte_bpf *
+struct rte_bpf *
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
}
-__rte_experimental void
+void
rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
rte_spinlock_unlock(&cbh->lock);
}
-__rte_experimental void
+void
rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)
{
struct bpf_eth_cbh *cbh;
return rc;
}
-__rte_experimental int
+int
rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
return rc;
}
-__rte_experimental int
+int
rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
const struct rte_bpf_prm *prm, const char *fname, const char *sname,
uint32_t flags)
#include "rte_compressdev.h"
#include "rte_compressdev_internal.h"
-const char * __rte_experimental
+const char *
rte_comp_get_feature_name(uint64_t flag)
{
switch (flag) {
op->mempool = mempool;
}
-struct rte_mempool * __rte_experimental
+struct rte_mempool *
rte_comp_op_pool_create(const char *name,
unsigned int nb_elts, unsigned int cache_size,
uint16_t user_size, int socket_id)
return mp;
}
-struct rte_comp_op * __rte_experimental
+struct rte_comp_op *
rte_comp_op_alloc(struct rte_mempool *mempool)
{
struct rte_comp_op *op = NULL;
return op;
}
-int __rte_experimental
+int
rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
struct rte_comp_op **ops, uint16_t nb_ops)
{
* @param op
* Compress operation
*/
-void __rte_experimental
+void
rte_comp_op_free(struct rte_comp_op *op)
{
if (op != NULL && op->mempool != NULL)
rte_mempool_put(op->mempool, op);
}
-void __rte_experimental
+void
rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops)
{
uint16_t i;
.max_devs = RTE_COMPRESS_MAX_DEVS
};
-const struct rte_compressdev_capabilities * __rte_experimental
+const struct rte_compressdev_capabilities *
rte_compressdev_capability_get(uint8_t dev_id,
enum rte_comp_algorithm algo)
{
return NULL;
}
-const char * __rte_experimental
+const char *
rte_compressdev_get_feature_name(uint64_t flag)
{
switch (flag) {
return &compressdev_globals.devs[dev_id];
}
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_get_named_dev(const char *name)
{
struct rte_compressdev *dev;
}
-int __rte_experimental
+int
rte_compressdev_get_dev_id(const char *name)
{
unsigned int i;
return -1;
}
-uint8_t __rte_experimental
+uint8_t
rte_compressdev_count(void)
{
return compressdev_globals.nb_devs;
}
-uint8_t __rte_experimental
+uint8_t
rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
uint8_t nb_devices)
{
return count;
}
-int __rte_experimental
+int
rte_compressdev_socket_id(uint8_t dev_id)
{
struct rte_compressdev *dev;
return RTE_COMPRESS_MAX_DEVS;
}
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_allocate(const char *name, int socket_id)
{
struct rte_compressdev *compressdev;
return compressdev;
}
-int __rte_experimental
+int
rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
{
int ret;
return 0;
}
-uint16_t __rte_experimental
+uint16_t
rte_compressdev_queue_pair_count(uint8_t dev_id)
{
struct rte_compressdev *dev;
return 0;
}
-int __rte_experimental
+int
rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
{
struct rte_compressdev *dev;
return (*dev->dev_ops->dev_configure)(dev, config);
}
-int __rte_experimental
+int
rte_compressdev_start(uint8_t dev_id)
{
struct rte_compressdev *dev;
return 0;
}
-void __rte_experimental
+void
rte_compressdev_stop(uint8_t dev_id)
{
struct rte_compressdev *dev;
dev->data->dev_started = 0;
}
-int __rte_experimental
+int
rte_compressdev_close(uint8_t dev_id)
{
struct rte_compressdev *dev;
return 0;
}
-int __rte_experimental
+int
rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
uint32_t max_inflight_ops, int socket_id)
{
max_inflight_ops, socket_id);
}
-uint16_t __rte_experimental
+uint16_t
rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
return nb_ops;
}
-uint16_t __rte_experimental
+uint16_t
rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_comp_op **ops, uint16_t nb_ops)
{
dev->data->queue_pairs[qp_id], ops, nb_ops);
}
-int __rte_experimental
+int
rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
{
struct rte_compressdev *dev;
return 0;
}
-void __rte_experimental
+void
rte_compressdev_stats_reset(uint8_t dev_id)
{
struct rte_compressdev *dev;
}
-void __rte_experimental
+void
rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
{
struct rte_compressdev *dev;
dev_info->driver_name = dev->device->driver->name;
}
-int __rte_experimental
+int
rte_compressdev_private_xform_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **priv_xform)
return 0;
}
-int __rte_experimental
+int
rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
{
struct rte_compressdev *dev;
return 0;
}
-int __rte_experimental
+int
rte_compressdev_stream_create(uint8_t dev_id,
const struct rte_comp_xform *xform,
void **stream)
}
-int __rte_experimental
+int
rte_compressdev_stream_free(uint8_t dev_id, void *stream)
{
struct rte_compressdev *dev;
return 0;
}
-const char * __rte_experimental
+const char *
rte_compressdev_name_get(uint8_t dev_id)
{
struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
return 0;
}
-int __rte_experimental
+int
rte_compressdev_pmd_parse_input_args(
struct rte_compressdev_pmd_init_params *params,
const char *args)
return ret;
}
-struct rte_compressdev * __rte_experimental
+struct rte_compressdev *
rte_compressdev_pmd_create(const char *name,
struct rte_device *device,
size_t private_data_size,
return compressdev;
}
-int __rte_experimental
+int
rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
{
int retval;
return -1;
}
-int __rte_experimental
+int
rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
const char *xform_string)
{
return -1;
}
-const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
+const struct rte_cryptodev_asymmetric_xform_capability *
rte_cryptodev_asym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_asym_capability_idx *idx)
{
return 0;
}
-int __rte_experimental
+int
rte_cryptodev_asym_xform_capability_check_optype(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
enum rte_crypto_asym_op_type op_type)
return 0;
}
-int __rte_experimental
+int
rte_cryptodev_asym_xform_capability_check_modlen(
const struct rte_cryptodev_asymmetric_xform_capability *capability,
uint16_t modlen)
return 0;
}
-int __rte_experimental
+int
rte_cryptodev_asym_session_init(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess,
struct rte_crypto_asym_xform *xforms,
return 0;
}
-struct rte_mempool * __rte_experimental
+struct rte_mempool *
rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
int socket_id)
return sess;
}
-struct rte_cryptodev_asym_session * __rte_experimental
+struct rte_cryptodev_asym_session *
rte_cryptodev_asym_session_create(struct rte_mempool *mp)
{
struct rte_cryptodev_asym_session *sess;
return 0;
}
-int __rte_experimental
+int
rte_cryptodev_asym_session_clear(uint8_t dev_id,
struct rte_cryptodev_asym_session *sess)
{
return 0;
}
-int __rte_experimental
+int
rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
{
uint8_t i;
rte_cryptodev_sym_session_data_size(&s));
}
-unsigned int __rte_experimental
+unsigned int
rte_cryptodev_sym_get_existing_header_session_size(
struct rte_cryptodev_sym_session *sess)
{
rte_cryptodev_sym_session_data_size(sess));
}
-unsigned int __rte_experimental
+unsigned int
rte_cryptodev_asym_get_header_session_size(void)
{
/*
return priv_sess_size;
}
-unsigned int __rte_experimental
+unsigned int
rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
{
struct rte_cryptodev *dev;
}
-int __rte_experimental
+int
rte_cryptodev_sym_session_set_user_data(
struct rte_cryptodev_sym_session *sess,
void *data,
return 0;
}
-void * __rte_experimental
+void *
rte_cryptodev_sym_session_get_user_data(
struct rte_cryptodev_sym_session *sess)
{
static struct rte_class_list rte_class_list =
TAILQ_HEAD_INITIALIZER(rte_class_list);
-__rte_experimental void
+void
rte_class_register(struct rte_class *class)
{
RTE_VERIFY(class);
RTE_LOG(DEBUG, EAL, "Registered [%s] device class.\n", class->name);
}
-__rte_experimental void
+void
rte_class_unregister(struct rte_class *class)
{
TAILQ_REMOVE(&rte_class_list, class, next);
RTE_LOG(DEBUG, EAL, "Unregistered [%s] device class.\n", class->name);
}
-__rte_experimental
struct rte_class *
rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
const void *data)
return strcmp(class->name, name);
}
-__rte_experimental
struct rte_class *
rte_class_find_by_name(const char *name)
{
return ret;
}
-int __rte_experimental
+int
rte_dev_event_callback_register(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
return ret;
}
-int __rte_experimental
+int
rte_dev_event_callback_unregister(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg)
return ret;
}
-void __rte_experimental
+void
rte_dev_event_callback_process(const char *device_name,
enum rte_dev_event_type event)
{
rte_spinlock_unlock(&dev_event_lock);
}
-__rte_experimental
int
rte_dev_iterator_init(struct rte_dev_iterator *it,
const char *dev_str)
it->device = dev;
return dev == NULL;
}
-__rte_experimental
struct rte_device *
rte_dev_iterator_next(struct rte_dev_iterator *it)
{
return 0;
}
-int __rte_experimental
+int
rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
unsigned int elt_sz)
{
return -1;
}
-int __rte_experimental
+int
rte_fbarray_attach(struct rte_fbarray *arr)
{
struct mem_area *ma = NULL, *tmp = NULL;
return -1;
}
-int __rte_experimental
+int
rte_fbarray_detach(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
return ret;
}
-int __rte_experimental
+int
rte_fbarray_destroy(struct rte_fbarray *arr)
{
struct mem_area *tmp = NULL;
return ret;
}
-void * __rte_experimental
+void *
rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
{
void *ret = NULL;
return ret;
}
-int __rte_experimental
+int
rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, true);
}
-int __rte_experimental
+int
rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
{
return set_used(arr, idx, false);
}
-int __rte_experimental
+int
rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
{
struct used_mask *msk;
return ret;
}
-int __rte_experimental
+int
rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, false);
}
-int __rte_experimental
+int
rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, true, true);
}
-int __rte_experimental
+int
rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, false);
}
-int __rte_experimental
+int
rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find(arr, start, false, true);
return ret;
}
-int __rte_experimental
+int
rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, false);
}
-int __rte_experimental
+int
rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, true, true);
}
-int __rte_experimental
+int
rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return fbarray_find_n(arr, start, n, false, false);
}
-int __rte_experimental
+int
rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
unsigned int n)
{
return biggest_idx;
}
-int __rte_experimental
+int
rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, false);
}
-int __rte_experimental
+int
rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, false);
}
-int __rte_experimental
+int
rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, false, true);
}
-int __rte_experimental
+int
rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_biggest(arr, start, true, true);
}
-int __rte_experimental
+int
rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, false);
}
-int __rte_experimental
+int
rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, true, true);
}
-int __rte_experimental
+int
rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, false);
}
-int __rte_experimental
+int
rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
{
return fbarray_find_contig(arr, start, false, true);
}
-int __rte_experimental
+int
rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
{
void *end;
return ret;
}
-void __rte_experimental
+void
rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
{
struct used_mask *msk;
}
/* Register an extended log type and try to pick its level from EAL options */
-int __rte_experimental
+int
rte_log_register_type_and_pick_level(const char *name, uint32_t level_def)
{
struct rte_eal_opt_loglevel *opt_ll;
return msl;
}
-__rte_experimental struct rte_memseg_list *
+struct rte_memseg_list *
rte_mem_virt2memseg_list(const void *addr)
{
return virt2memseg_list(addr);
return 0;
}
-__rte_experimental void *
+void *
rte_mem_iova2virt(rte_iova_t iova)
{
struct virtiova vi;
return vi.virt;
}
-__rte_experimental struct rte_memseg *
+struct rte_memseg *
rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
return virt2memseg(addr, msl != NULL ? msl :
* Defining here because declared in rte_memory.h, but the actual implementation
* is in eal_common_memalloc.c, like all other memalloc internals.
*/
-int __rte_experimental
+int
rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
void *arg)
{
return eal_memalloc_mem_event_callback_register(name, clb, arg);
}
-int __rte_experimental
+int
rte_mem_event_callback_unregister(const char *name, void *arg)
{
/* FreeBSD boots with legacy mem enabled by default */
return eal_memalloc_mem_event_callback_unregister(name, arg);
}
-int __rte_experimental
+int
rte_mem_alloc_validator_register(const char *name,
rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
{
limit);
}
-int __rte_experimental
+int
rte_mem_alloc_validator_unregister(const char *name, int socket_id)
{
/* FreeBSD boots with legacy mem enabled by default */
return 0;
}
-int __rte_experimental
+int
rte_mem_check_dma_mask(uint8_t maskbits)
{
return check_dma_mask(maskbits, false);
}
-int __rte_experimental
+int
rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
{
return check_dma_mask(maskbits, true);
* initialization. PMDs should use rte_mem_check_dma_mask if addressing
* limitations by the device.
*/
-void __rte_experimental
+void
rte_mem_set_dma_mask(uint8_t maskbits)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return mlock((void *)aligned, page_size);
}
-int __rte_experimental
+int
rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return 0;
}
-int __rte_experimental
+int
rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return 0;
}
-int __rte_experimental
+int
rte_memseg_walk(rte_memseg_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return 0;
}
-int __rte_experimental
+int
rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_memseg_get_fd(const struct rte_memseg *ms)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
size_t *offset)
{
return ret;
}
-int __rte_experimental
+int
rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
unsigned int n_pages, size_t page_sz)
{
return ret;
}
-int __rte_experimental
+int
rte_extmem_unregister(void *va_addr, size_t len)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return ret;
}
-int __rte_experimental
+int
rte_extmem_attach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, true);
}
-int __rte_experimental
+int
rte_extmem_detach(void *va_addr, size_t len)
{
return sync_memory(va_addr, len, false);
return 0;
}
-int __rte_experimental
+int
rte_mp_action_register(const char *name, rte_mp_t action)
{
struct action_entry *entry;
return 0;
}
-void __rte_experimental
+void
rte_mp_action_unregister(const char *name)
{
struct action_entry *entry;
return 0;
}
-int __rte_experimental
+int
rte_mp_sendmsg(struct rte_mp_msg *msg)
{
if (check_input(msg) != 0)
return 0;
}
-int __rte_experimental
+int
rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts)
{
return ret;
}
-int __rte_experimental
+int
rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
rte_mp_async_reply_t clb)
{
return -1;
}
-int __rte_experimental
+int
rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
{
RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
rte_pause();
}
-void __rte_experimental
+void
rte_delay_us_sleep(unsigned int us)
{
struct timespec wait[2];
/*
* Function to dump contents of all heaps
*/
-void __rte_experimental
+void
rte_malloc_dump_heaps(FILE *f)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
return -1;
}
-__rte_experimental
int
rte_option_register(struct rte_option *opt)
{
return __rte_rand_lfsr258(state);
}
-uint64_t __rte_experimental
+uint64_t
rte_rand_max(uint64_t upper_bound)
{
struct rte_rand_state *state;
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_may_be_active(uint32_t id)
{
uint32_t ids[RTE_MAX_LCORE] = {0};
}
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
uint64_t *attr_value)
{
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_attr_reset_all(uint32_t lcore)
{
struct core_state *cs;
return fctret;
}
-int __rte_experimental
+int
rte_eal_cleanup(void)
{
rte_service_finalize();
#include <rte_compat.h>
#include <rte_dev.h>
-int __rte_experimental
+int
rte_dev_event_monitor_start(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
-int __rte_experimental
+int
rte_dev_event_monitor_stop(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
-int __rte_experimental
+int
rte_dev_hotplug_handle_enable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return -1;
}
-int __rte_experimental
+int
rte_dev_hotplug_handle_disable(void)
{
RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
return ret;
}
-int __rte_experimental
+int
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
rte_spinlock_unlock(&failure_handle_lock);
}
-int __rte_experimental
+int
rte_dev_event_monitor_start(void)
{
int ret;
return 0;
}
-int __rte_experimental
+int
rte_dev_event_monitor_stop(void)
{
int ret;
return rte_errno;
}
-int __rte_experimental
+int
rte_dev_hotplug_handle_enable(void)
{
int ret = 0;
return ret;
}
-int __rte_experimental
+int
rte_dev_hotplug_handle_disable(void)
{
int ret = 0;
return ret;
}
-int __rte_experimental
+int
rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg,
rte_intr_unregister_callback_fn ucb_fn)
return port_id;
}
-int __rte_experimental
+int
rte_eth_dev_owner_new(uint64_t *owner_id)
{
rte_eth_dev_shared_data_prepare();
return 0;
}
-int __rte_experimental
+int
rte_eth_dev_owner_set(const uint16_t port_id,
const struct rte_eth_dev_owner *owner)
{
return ret;
}
-int __rte_experimental
+int
rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
{
const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
return ret;
}
-void __rte_experimental
+void
rte_eth_dev_owner_delete(const uint64_t owner_id)
{
uint16_t port_id;
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
}
-int __rte_experimental
+int
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
int ret = 0;
return eth_err(port_id, ret);
}
-int __rte_experimental
+int
rte_eth_dev_is_removed(uint16_t port_id)
{
struct rte_eth_dev *dev;
return 0;
}
-int __rte_experimental
+int
rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
{
struct rte_intr_handle *intr_handle;
RTE_MEMZONE_IOVA_CONTIG, align);
}
-int __rte_experimental
+int
rte_eth_dev_create(struct rte_device *device, const char *name,
size_t priv_data_size,
ethdev_bus_specific_init ethdev_bus_specific_init,
return retval;
}
-int __rte_experimental
+int
rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
ethdev_uninit_t ethdev_uninit)
{
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
-int __rte_experimental
+int
rte_eth_dev_get_module_info(uint16_t port_id,
struct rte_eth_dev_module_info *modinfo)
{
return (*dev->dev_ops->get_module_info)(dev, modinfo);
}
-int __rte_experimental
+int
rte_eth_dev_get_module_eeprom(uint16_t port_id,
struct rte_dev_eeprom_info *info)
{
enum rte_eth_switch_domain_state state;
} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
-int __rte_experimental
+int
rte_eth_switch_domain_alloc(uint16_t *domain_id)
{
unsigned int i;
return -ENOSPC;
}
-int __rte_experimental
+int
rte_eth_switch_domain_free(uint16_t domain_id)
{
if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
}
}
-int __rte_experimental
+int
rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
{
struct rte_kvargs args;
* Expand RSS flows into several possible flows according to the RSS hash
* fields requested and the driver capabilities.
*/
-int __rte_experimental
+int
rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
const struct rte_flow_item *pattern, uint64_t types,
const struct rte_flow_expand_node graph[],
})
/* MTR capabilities get */
-int __rte_experimental
+int
rte_mtr_capabilities_get(uint16_t port_id,
struct rte_mtr_capabilities *cap,
struct rte_mtr_error *error)
}
/* MTR meter profile add */
-int __rte_experimental
+int
rte_mtr_meter_profile_add(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_meter_profile *profile,
}
/** MTR meter profile delete */
-int __rte_experimental
+int
rte_mtr_meter_profile_delete(uint16_t port_id,
uint32_t meter_profile_id,
struct rte_mtr_error *error)
}
/** MTR object create */
-int __rte_experimental
+int
rte_mtr_create(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_params *params,
}
/** MTR object destroy */
-int __rte_experimental
+int
rte_mtr_destroy(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter enable */
-int __rte_experimental
+int
rte_mtr_meter_enable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter disable */
-int __rte_experimental
+int
rte_mtr_meter_disable(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_error *error)
}
/** MTR object meter profile update */
-int __rte_experimental
+int
rte_mtr_meter_profile_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t meter_profile_id,
}
/** MTR object meter DSCP table update */
-int __rte_experimental
+int
rte_mtr_meter_dscp_table_update(uint16_t port_id,
uint32_t mtr_id,
enum rte_color *dscp_table,
}
/** MTR object policer action update */
-int __rte_experimental
+int
rte_mtr_policer_actions_update(uint16_t port_id,
uint32_t mtr_id,
uint32_t action_mask,
}
/** MTR object enabled stats update */
-int __rte_experimental
+int
rte_mtr_stats_update(uint16_t port_id,
uint32_t mtr_id,
uint64_t stats_mask,
}
/** MTR object stats read */
-int __rte_experimental
+int
rte_mtr_stats_read(uint16_t port_id,
uint32_t mtr_id,
struct rte_mtr_stats *stats,
return rxa_ctrl(id, 0);
}
-int __rte_experimental
+int
rte_event_eth_rx_adapter_stats_get(uint8_t id,
struct rte_event_eth_rx_adapter_stats *stats)
{
return rx_adapter->service_inited ? 0 : -ESRCH;
}
-int __rte_experimental
+int
rte_event_eth_rx_adapter_cb_register(uint8_t id,
uint16_t eth_dev_id,
rte_event_eth_rx_adapter_cb_fn cb_fn,
void *entry_ptr; /* handle to the table entry for rule meta data */
};
-int __rte_experimental
+int
rte_flow_classify_validate(
struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
return 0;
}
-struct rte_flow_classifier * __rte_experimental
+struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params *params)
{
struct rte_flow_classifier *cls;
table->ops.f_free(table->h_table);
}
-int __rte_experimental
+int
rte_flow_classifier_free(struct rte_flow_classifier *cls)
{
uint32_t i;
return 0;
}
-int __rte_experimental
+int
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params)
{
return rule;
}
-struct rte_flow_classify_rule * __rte_experimental
+struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
return NULL;
}
-int __rte_experimental
+int
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
struct rte_flow_classify_rule *rule)
{
return ret;
}
-int __rte_experimental
+int
rte_flow_classifier_query(struct rte_flow_classifier *cls,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
return 0;
}
-int __rte_experimental
+int
rte_hash_free_key_with_position(const struct rte_hash *h,
const int32_t position)
{
}
/* Delete expired fragments */
-void __rte_experimental
+void
rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, uint64_t tms)
{
return 0;
}
-uint64_t __rte_experimental
+uint64_t
rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
{
return sa->type;
return sz;
}
-void __rte_experimental
+void
rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
{
memset(sa, 0, sa->size);
((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
}
-int __rte_experimental
+int
rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
{
uint64_t type;
return ipsec_sa_size(type, &wsz, &nb);
}
-int __rte_experimental
+int
rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
uint32_t size)
{
return 0;
}
-int __rte_experimental
+int
rte_ipsec_session_prepare(struct rte_ipsec_session *ss)
{
int32_t rc;
return 0;
}
-int __rte_experimental
+int
rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
{
char path[64];
return kvlist;
}
-__rte_experimental
struct rte_kvargs *
rte_kvargs_parse_delim(const char *args, const char * const valid_keys[],
const char *valid_ends)
return kvlist;
}
-__rte_experimental
int
rte_kvargs_strcmp(const char *key __rte_unused,
const char *value, void *opaque)
rte_panic("%s\n", reason);
}
-__rte_experimental
int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
const char **reason)
{
return 0;
}
-int __rte_experimental
+int
rte_meter_trtcm_rfc4115_profile_config(
struct rte_meter_trtcm_rfc4115_profile *p,
struct rte_meter_trtcm_rfc4115_params *params)
return 0;
}
-int __rte_experimental
+int
rte_meter_trtcm_rfc4115_config(
struct rte_meter_trtcm_rfc4115 *m,
struct rte_meter_trtcm_rfc4115_profile *p)
#include <rte_arp.h>
#define RARP_PKT_SIZE 64
-struct rte_mbuf * __rte_experimental
+struct rte_mbuf *
rte_net_make_rarp_packet(struct rte_mempool *mpool,
const struct rte_ether_addr *mac)
{
}
/* parse ipv6 extended headers, update offset and return next proto */
-int __rte_experimental
+int
rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
int *frag)
{
return 0;
}
-void __rte_experimental
+void
rte_empty_poll_detection(struct rte_timer *tim, void *arg)
{
}
-int __rte_experimental
+int
rte_power_empty_poll_stat_init(struct ep_params **eptr, uint8_t *freq_tlb,
struct ep_policy *policy)
{
return 0;
}
-void __rte_experimental
+void
rte_power_empty_poll_stat_free(void)
{
rte_free(ep_params);
}
-int __rte_experimental
+int
rte_power_empty_poll_stat_update(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
return 0;
}
-int __rte_experimental
+int
rte_power_poll_stat_update(unsigned int lcore_id, uint8_t nb_pkt)
{
}
-uint64_t __rte_experimental
+uint64_t
rte_power_empty_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
return poll_stats->empty_dequeues;
}
-uint64_t __rte_experimental
+uint64_t
rte_power_poll_stat_fetch(unsigned int lcore_id)
{
struct priority_worker *poll_stats;
#include "rte_rcu_qsbr.h"
/* Get the memory size of QSBR variable */
-size_t __rte_experimental
+size_t
rte_rcu_qsbr_get_memsize(uint32_t max_threads)
{
size_t sz;
}
/* Initialize a quiescent state variable */
-int __rte_experimental
+int
rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
{
size_t sz;
/* Register a reader thread to report its quiescent state
* on a QS variable.
*/
-int __rte_experimental
+int
rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
/* Remove a reader thread, from the list of threads reporting their
* quiescent state on a QS variable.
*/
-int __rte_experimental
+int
rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
unsigned int i, id, success;
}
/* Wait till the reader threads have entered quiescent state. */
-void __rte_experimental
+void
rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
{
uint64_t t;
}
/* Dump the details of a single quiescent state variable to a file. */
-int __rte_experimental
+int
rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
{
uint64_t bmap;
return 0;
}
-int __rte_experimental
+int
rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
struct rte_sched_pipe_params *params,
uint32_t *pipe_profile_id)
return sess;
}
-int __rte_experimental
+int
rte_security_session_update(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_session_conf *conf)
return instance->ops->session_get_size(instance->device);
}
-int __rte_experimental
+int
rte_security_session_stats_get(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_stats *stats)
sess, m, params);
}
-void * __rte_experimental
+void *
rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
{
void *userdata = NULL;
return -1;
}
-int32_t __rte_experimental
+int32_t
rte_telemetry_init()
{
int ret;
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_telemetry_cleanup(void)
{
int ret;
return sockfd;
}
-int32_t __rte_experimental
+int32_t
rte_telemetry_selftest(void)
{
const char *invalid_client_path = SELFTEST_INVALID_CLIENT;
return -1;
}
-int32_t __rte_experimental
+int32_t
rte_telemetry_parse(struct telemetry_impl *telemetry, char *socket_rx_data)
{
int ret, action_int;
timer_data = &rte_timer_data_arr[id]; \
} while (0)
-int __rte_experimental
+int
rte_timer_data_alloc(uint32_t *id_ptr)
{
int i;
return -ENOSPC;
}
-int __rte_experimental
+int
rte_timer_data_dealloc(uint32_t id)
{
struct rte_timer_data *timer_data;
rte_timer_subsystem_init_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05);
-void __rte_experimental
+void
rte_timer_subsystem_finalize(void)
{
if (!rte_timer_subsystem_initialized)
rte_timer_reset_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05);
-int __rte_experimental
+int
rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
uint64_t ticks, enum rte_timer_type type,
unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
rte_timer_stop_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05);
-int __rte_experimental
+int
rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
{
struct rte_timer_data *timer_data;
MAP_STATIC_SYMBOL(int rte_timer_manage(void), rte_timer_manage_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05);
-int __rte_experimental
+int
rte_timer_alt_manage(uint32_t timer_data_id,
unsigned int *poll_lcores,
int nb_poll_lcores,
}
/* Walk pending lists, stopping timers and calling user-specified function */
-int __rte_experimental
+int
rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
int nb_walk_lcores,
rte_timer_stop_all_cb_t f, void *f_arg)
rte_timer_dump_stats_v1905);
BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05);
-int __rte_experimental
+int
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
{
struct rte_timer_data *timer_data;
return vdpa_device_num;
}
-int __rte_experimental
+int
rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
{
struct virtio_net *dev = get_device(vid);
return processed;
}
-int __rte_experimental
+int
rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
struct rte_mempool *sess_pool,
struct rte_mempool *sess_priv_pool,
return ret;
}
-int __rte_experimental
+int
rte_vhost_crypto_free(int vid)
{
struct virtio_net *dev = get_device(vid);
return 0;
}
-int __rte_experimental
+int
rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
{
struct virtio_net *dev = get_device(vid);
return 0;
}
-uint16_t __rte_experimental
+uint16_t
rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
return i;
}
-uint16_t __rte_experimental
+uint16_t
rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
{