M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_ether/rte_mtr*
+Baseband API - EXPERIMENTAL
+M: Amr Mokhtar <amr.mokhtar@intel.com>
+F: lib/librte_bbdev/
+F: doc/guides/prog_guide/bbdev.rst
+
Crypto API
M: Declan Doherty <declan.doherty@intel.com>
T: git://dpdk.org/next/dpdk-next-crypto
#
CONFIG_RTE_PMD_PACKET_PREFETCH=y
+# Compile generic wireless base band device library
+# EXPERIMENTAL: API may change without prior notice
+#
+CONFIG_RTE_LIBRTE_BBDEV=y
+CONFIG_RTE_BBDEV_MAX_DEVS=128
+
#
# Compile generic crypto device library
#
[rte_flow] (@ref rte_flow.h),
[rte_tm] (@ref rte_tm.h),
[rte_mtr] (@ref rte_mtr.h),
+ [bbdev] (@ref rte_bbdev.h),
[cryptodev] (@ref rte_cryptodev.h),
[security] (@ref rte_security.h),
[eventdev] (@ref rte_eventdev.h),
lib/librte_eal/common/include \
lib/librte_eal/common/include/generic \
lib/librte_acl \
+ lib/librte_bbdev \
lib/librte_bitratestats \
lib/librte_cfgfile \
lib/librte_cmdline \
--- /dev/null
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2017 Intel Corporation
+
+Wireless Baseband Device Library
+================================
+
+The Wireless Baseband library provides a common programming framework that
+abstracts HW accelerators based on FPGA and/or Fixed Function Accelerators that
+assist with 3gpp Physical Layer processing. Furthermore, it decouples the
+application from the compute-intensive wireless functions by abstracting their
+optimized libraries to appear as virtual bbdev devices.
+
+The functional scope of the BBDEV library are those functions in relation to
+the 3gpp Layer 1 signal processing (channel coding, modulation, ...).
+
+The framework currently only supports Turbo Code FEC function.
+
+
+Design Principles
+-----------------
+
+The Wireless Baseband library follows the same ideology of DPDK's Ethernet
+Device and Crypto Device frameworks. Wireless Baseband provides a generic
+acceleration abstraction framework which supports both physical (hardware) and
+virtual (software) wireless acceleration functions.
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical bbdev devices are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, based on
+their PCI device identifier, each unique PCI BDF (bus/bridge, device,
+function).
+
+Virtual devices can be created by two mechanisms, either using the EAL command
+line options or from within the application using an EAL API directly.
+
+From the command line using the --vdev EAL option
+
+.. code-block:: console
+
+ --vdev 'turbo_sw,max_nb_queues=8,socket_id=0'
+
+Our using the rte_vdev_init API within the application code.
+
+.. code-block:: c
+
+ rte_vdev_init("turbo_sw", "max_nb_queues=2,socket_id=0")
+
+All virtual bbdev devices support the following initialization parameters:
+
+- ``max_nb_queues`` - maximum number of queues supported by the device.
+
+- ``socket_id`` - socket on which to allocate the device resources on.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each device, whether virtual or physical is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the bbdev device in all functions
+ exported by the bbdev API.
+
+- A device name used to designate the bbdev device in console messages, for
+ administration or debugging purposes. For ease of use, the port name includes
+ the port index.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+From the application point of view, each instance of a bbdev device consists of
+one or more queues identified by queue IDs. While different devices may have
+different capabilities (e.g. support different operation types), all queues on
+a device support identical configuration possibilities. A queue is configured
+for only one type of operation and is configured at initializations time.
+When an operation is enqueued to a specific queue ID, the result is dequeued
+from the same queue ID.
+
+Configuration of a device has two different levels: configuration that applies
+to the whole device, and configuration that applies to a single queue.
+
+Device configuration is applied with
+``rte_bbdev_setup_queues(dev_id,num_queues,socket_id)``
+and queue configuration is applied with
+``rte_bbdev_queue_configure(dev_id,queue_id,conf)``. Note that, although all
+queues on a device support same capabilities, they can be configured differently
+and will then behave differently.
+Devices supporting interrupts can enable them by using
+``rte_bbdev_intr_enable(dev_id)``.
+
+The configuration of each bbdev device includes the following operations:
+
+- Allocation of resources, including hardware resources if a physical device.
+- Resetting the device into a well-known default state.
+- Initialization of statistics counters.
+
+The ``rte_bbdev_setup_queues`` API is used to setup queues for a bbdev device.
+
+.. code-block:: c
+
+ int rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues,
+ int socket_id);
+
+- ``num_queues`` argument identifies the total number of queues to setup for
+ this device.
+
+- ``socket_id`` specifies which socket will be used to allocate the memory.
+
+
+The ``rte_bbdev_intr_enable`` API is used to enable interrupts for a bbdev
+device, if supported by the driver. Should be called before starting the device.
+
+.. code-block:: c
+
+ int rte_bbdev_intr_enable(uint16_t dev_id);
+
+
+Queues Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+Each bbdev devices queue is individually configured through the
+``rte_bbdev_queue_configure()`` API.
+Each queue resources may be allocated on a specified socket.
+
+.. code-block:: c
+
+ struct rte_bbdev_queue_conf {
+ int socket;
+ uint32_t queue_size;
+ uint8_t priority;
+ bool deferred_start;
+ enum rte_bbdev_op_type op_type;
+ };
+
+Device & Queues Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After initialization, devices are in a stopped state, so must be started by the
+application. If an application is finished using a device it can close the
+device. Once closed, it cannot be restarted.
+
+.. code-block:: c
+
+ int rte_bbdev_start(uint16_t dev_id)
+ int rte_bbdev_stop(uint16_t dev_id)
+ int rte_bbdev_close(uint16_t dev_id)
+ int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
+ int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
+
+
+By default, all queues are started when the device is started, but they can be
+stopped individually.
+
+.. code-block:: c
+
+ int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
+ int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
+
+
+Logical Cores, Memory and Queues Relationships
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The bbdev device Library as the Poll Mode Driver library support NUMA for when
+a processor’s logical cores and interfaces utilize its local memory. Therefore
+baseband operations, the mbuf being operated on should be allocated from memory
+pools created in the local memory. The buffers should, if possible, remain on
+the local processor to obtain the best performance results and buffer
+descriptors should be populated with mbufs allocated from a mempool allocated
+from local memory.
+
+The run-to-completion model also performs better, especially in the case of
+virtual bbdev devices, if the baseband operation and data buffers are in local
+memory instead of a remote processor's memory. This is also true for the
+pipe-line model provided all logical cores used are located on the same processor.
+
+Multiple logical cores should never share the same queue for enqueuing
+operations or dequeuing operations on the same bbdev device since this would
+require global locks and hinder performance. It is however possible to use a
+different logical core to dequeue an operation on a queue pair from the logical
+core which it was enqueued on. This means that a baseband burst enqueue/dequeue
+APIs are a logical place to transition from one logical core to another in a
+packet processing pipeline.
+
+
+Device Operation Capabilities
+-----------------------------
+
+Capabilities (in terms of operations supported, max number of queues, etc.)
+identify what a bbdev is capable of performing that differs from one device to
+another. For the full scope of the bbdev capability see the definition of the
+structure in the *DPDK API Reference*.
+
+.. code-block:: c
+
+ struct rte_bbdev_op_cap;
+
+A device reports its capabilities when registering itself in the bbdev framework.
+With the aid of this capabilities mechanism, an application can query devices to
+discover which operations within the 3gpp physical layer they are capable of
+performing. Below is an example of the capabilities for a PMD it supports in
+relation to Turbo Encoding and Decoding operations.
+
+.. code-block:: c
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ {
+ .type = RTE_BBDEV_OP_TURBO_DEC,
+ .cap.turbo_dec = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
+ RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_CRC_TYPE_24B,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_hard_out =
+ RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_soft_out = 0,
+ }
+ },
+ {
+ .type = RTE_BBDEV_OP_TURBO_ENC,
+ .cap.turbo_enc = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_CRC_24B_ATTACH |
+ RTE_BBDEV_TURBO_RATE_MATCH |
+ RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
+ }
+ },
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST()
+ };
+
+Capabilities Discovery
+~~~~~~~~~~~~~~~~~~~~~~
+
+Discovering the features and capabilities of a bbdev device poll mode driver
+is achieved through the ``rte_bbdev_info_get()`` function.
+
+.. code-block:: c
+
+ int rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
+
+This allows the user to query a specific bbdev PMD and get all the device
+capabilities. The ``rte_bbdev_info`` structure provides two levels of
+information:
+
+- Device relevant information, like: name and related rte_bus.
+
+- Driver specific information, as defined by the ``struct rte_bbdev_driver_info``
+ structure, this is where capabilities reside along with other specifics like:
+ maximum queue sizes and priority level.
+
+.. code-block:: c
+
+ struct rte_bbdev_info {
+ int socket_id;
+ const char *dev_name;
+ const struct rte_bus *bus;
+ uint16_t num_queues;
+ bool started;
+ struct rte_bbdev_driver_info drv;
+ };
+
+Operation Processing
+--------------------
+
+Scheduling of baseband operations on DPDK's application data path is
+performed using a burst oriented asynchronous API set. A queue on a bbdev
+device accepts a burst of baseband operations using enqueue burst API. On physical
+bbdev devices the enqueue burst API will place the operations to be processed
+on the device's hardware input queue, for virtual devices the processing of the
+baseband operations is usually completed during the enqueue call to the bbdev
+device. The dequeue burst API will retrieve any processed operations available
+from the queue on the bbdev device, from physical devices this is usually
+directly from the device's processed queue, and for virtual device's from a
+``rte_ring`` where processed operations are place after being processed on the
+enqueue call.
+
+
+Enqueue / Dequeue Burst APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The burst enqueue API uses a bbdev device identifier and a queue
+identifier to specify the bbdev device queue to schedule the processing on.
+The ``num_ops`` parameter is the number of operations to process which are
+supplied in the ``ops`` array of ``rte_bbdev_*_op`` structures.
+The enqueue function returns the number of operations it actually enqueued for
+processing, a return value equal to ``num_ops`` means that all packets have been
+enqueued.
+
+.. code-block:: c
+
+ uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+
+ uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+
+The dequeue API uses the same format as the enqueue API of processed but
+the ``num_ops`` and ``ops`` parameters are now used to specify the max processed
+operations the user wishes to retrieve and the location in which to store them.
+The API call returns the actual number of processed operations returned, this
+can never be larger than ``num_ops``.
+
+.. code-block:: c
+
+ uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+
+ uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+
+Operation Representation
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+An encode bbdev operation is represented by ``rte_bbdev_enc_op`` structure,
+and by ``rte_bbdev_dec_op`` for decode. These structures act as metadata
+containers for all necessary information required for the bbdev operation to be
+processed on a particular bbdev device poll mode driver.
+
+.. code-block:: c
+
+ struct rte_bbdev_enc_op {
+ int status;
+ struct rte_mempool *mempool;
+ void *opaque_data;
+ struct rte_bbdev_op_turbo_enc turbo_enc;
+ };
+
+ struct rte_bbdev_dec_op {
+ int status;
+ struct rte_mempool *mempool;
+ void *opaque_data;
+ struct rte_bbdev_op_turbo_dec turbo_dec;
+ };
+
+The operation structure by itself defines the operation type. It includes an
+operation status, a reference to the operation specific data, which can vary in
+size and content depending on the operation being provisioned. It also contains
+the source mempool for the operation, if it is allocated from a mempool.
+
+If bbdev operations are allocated from a bbdev operation mempool, see next
+section, there is also the ability to allocate private memory with the
+operation for applications purposes.
+
+Application software is responsible for specifying all the operation specific
+fields in the ``rte_bbdev_*_op`` structure which are then used by the bbdev PMD
+to process the requested operation.
+
+
+Operation Management and Allocation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The bbdev library provides an API set for managing bbdev operations which
+utilize the Mempool Library to allocate operation buffers. Therefore, it ensures
+that the bbdev operation is interleaved optimally across the channels and
+ranks for optimal processing.
+
+.. code-block:: c
+
+ struct rte_mempool *
+ rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
+ unsigned int num_elements, unsigned int cache_size,
+ int socket_id)
+
+``rte_bbdev_*_op_alloc_bulk()`` and ``rte_bbdev_*_op_free_bulk()`` are used to
+allocate bbdev operations of a specific type from a given bbdev operation mempool.
+
+.. code-block:: c
+
+ int rte_bbdev_enc_op_alloc_bulk(struct rte_mempool *mempool,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+
+ int rte_bbdev_dec_op_alloc_bulk(struct rte_mempool *mempool,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+
+``rte_bbdev_*_op_free_bulk()`` is called by the application to return an
+operation to its allocating pool.
+
+.. code-block:: c
+
+ void rte_bbdev_dec_op_free_bulk(struct rte_bbdev_dec_op **ops,
+ unsigned int num_ops)
+ void rte_bbdev_enc_op_free_bulk(struct rte_bbdev_enc_op **ops,
+ unsigned int num_ops)
+
+BBDEV Operations
+~~~~~~~~~~~~~~~~
+
+The bbdev operation structure contains all the mutable data relating to
+performing Turbo code processing on a referenced mbuf data buffer. It is used
+for either encode or decode operations.
+
+Turbo Encode operation accepts one input and one output.
+
+Turbo Decode operation accepts one input and two outputs, called *hard-decision*
+and *soft-decision* outputs. *Soft-decision* output is optional.
+
+It is expected that the application provides input and output ``mbuf`` pointers
+allocated and ready to use. The baseband framework supports turbo coding on
+Code Blocks (CB) and Transport Blocks (TB).
+
+For the output buffer(s), the application needs only to provide an allocated and
+free mbuf (containing only one mbuf segment), so that bbdev can write the
+operation outcome.
+
+**Turbo Encode Op structure**
+
+.. code-block:: c
+
+ struct rte_bbdev_op_turbo_enc {
+ struct rte_bbdev_op_data input;
+ struct rte_bbdev_op_data output;
+
+ uint32_t op_flags;
+ uint8_t rv_index;
+ uint8_t code_block_mode;
+ union {
+ struct rte_bbdev_op_enc_cb_params cb_params;
+ struct rte_bbdev_op_enc_tb_params tb_params;
+ };
+ };
+
+
+**Turbo Decode Op structure**
+
+.. code-block:: c
+
+ struct rte_bbdev_op_turbo_dec {
+ struct rte_bbdev_op_data input;
+ struct rte_bbdev_op_data hard_output;
+ struct rte_bbdev_op_data soft_output;
+
+ uint32_t op_flags;
+ uint8_t rv_index;
+ uint8_t iter_min:4;
+ uint8_t iter_max:4;
+ uint8_t iter_count;
+ uint8_t ext_scale;
+ uint8_t num_maps;
+ uint8_t code_block_mode;
+ union {
+ struct rte_bbdev_op_dec_cb_params cb_params;
+ struct rte_bbdev_op_dec_tb_params tb_params;
+ };
+ };
+
+Input and output data buffers are identified by ``rte_bbdev_op_data`` structure.
+This structure has three elements:
+
+- ``data`` - This is the mbuf reference
+
+- ``offset`` - The starting point for the Turbo input/output, in bytes, from the
+ start of the data in the data buffer. It must be smaller than data_len of the
+ mbuf's first segment
+
+- ``length`` - The length, in bytes, of the buffer on which the Turbo operation
+ will or has been computed. For the input, the length is set by the application.
+ For the output(s), the length is computed by the bbdev PMD driver.
+
+Sample code
+-----------
+
+The baseband device sample application gives an introduction on how to use the
+bbdev framework, by giving a sample code performing a loop-back operation with a
+baseband processor capable of transceiving data packets.
+
+The following sample C-like pseudo-code shows the basic steps to encode several
+buffers using (**sw_trubo**) bbdev PMD.
+
+.. code-block:: c
+
+ /* EAL Init */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+
+ /* Get number of available bbdev devices */
+ nb_bbdevs = rte_bbdev_count();
+ if (nb_bbdevs == 0)
+ rte_exit(EXIT_FAILURE, "No bbdevs detected!\n");
+
+ /* Create bbdev op pools */
+ bbdev_op_pool[RTE_BBDEV_OP_TURBO_ENC] =
+ rte_bbdev_op_pool_create("bbdev_op_pool_enc",
+ RTE_BBDEV_OP_TURBO_ENC, NB_MBUF, 128, rte_socket_id());
+
+ /* Get information for this device */
+ rte_bbdev_info_get(dev_id, &info);
+
+ /* Setup BBDEV device queues */
+ ret = rte_bbdev_setup_queues(dev_id, qs_nb, info.socket_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "ERROR(%d): BBDEV %u not configured properly\n",
+ ret, dev_id);
+
+ /* setup device queues */
+ qconf.socket = info.socket_id;
+ qconf.queue_size = info.drv.queue_size_lim;
+ qconf.op_type = RTE_BBDEV_OP_TURBO_ENC;
+
+ for (q_id = 0; q_id < qs_nb; q_id++) {
+ /* Configure all queues belonging to this bbdev device */
+ ret = rte_bbdev_queue_configure(dev_id, q_id, &qconf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "ERROR(%d): BBDEV %u queue %u not configured properly\n",
+ ret, dev_id, q_id);
+ }
+
+ /* Start bbdev device */
+ ret = rte_bbdev_start(dev_id);
+
+ /* Create the mbuf mempool for pkts */
+ mbuf_pool = rte_pktmbuf_pool_create("bbdev_mbuf_pool",
+ NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Unable to create '%s' pool\n", pool_name);
+
+ while (!global_exit_flag) {
+
+ /* Allocate burst of op structures in preparation for enqueue */
+ if (rte_bbdev_enc_op_alloc_bulk(bbdev_op_pool[RTE_BBDEV_OP_TURBO_ENC],
+ ops_burst, op_num) != 0)
+ continue;
+
+ /* Allocate input mbuf pkts */
+ ret = rte_pktmbuf_alloc_bulk(mbuf_pool, input_pkts_burst, MAX_PKT_BURST);
+ if (ret < 0)
+ continue;
+
+ /* Allocate output mbuf pkts */
+ ret = rte_pktmbuf_alloc_bulk(mbuf_pool, output_pkts_burst, MAX_PKT_BURST);
+ if (ret < 0)
+ continue;
+
+ for (j = 0; j < op_num; j++) {
+ /* Append the size of the ethernet header */
+ rte_pktmbuf_append(input_pkts_burst[j],
+ sizeof(struct ether_hdr));
+
+ /* set op */
+
+ ops_burst[j]->turbo_enc.input.offset =
+ sizeof(struct ether_hdr);
+
+ ops_burst[j]->turbo_enc->input.length =
+ rte_pktmbuf_pkt_len(bbdev_pkts[j]);
+
+ ops_burst[j]->turbo_enc->input.data =
+ input_pkts_burst[j];
+
+ ops_burst[j]->turbo_enc->output.offset =
+ sizeof(struct ether_hdr);
+
+ ops_burst[j]->turbo_enc->output.data =
+ output_pkts_burst[j];
+ }
+
+ /* Enqueue packets on BBDEV device */
+ op_num = rte_bbdev_enqueue_enc_ops(qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ MAX_PKT_BURST);
+
+ /* Dequeue packets from BBDEV device*/
+ op_num = rte_bbdev_dequeue_enc_ops(qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ MAX_PKT_BURST);
+ }
+
+
+BBDEV Device API
+~~~~~~~~~~~~~~~~
+
+The bbdev Library API is described in the *DPDK API Reference* document.
rte_flow
traffic_metering_and_policing
traffic_management
+ bbdev
cryptodev_lib
rte_security
link_bonding_poll_mode_drv_lib
* Rx/Tx descriptor status
* Link status update/event
+* **Added Wireless Base Band Device (bbdev) abstraction.**
+
+ The Wireless Baseband Device library is an acceleration abstraction
+ framework for 3gpp Layer 1 processing functions that provides a common
+ programming interface for seamless opeartion on integrated or discrete
+ hardware accelerators or using optimized software libraries for signal
+ processing.
+ The current release only supports 3GPP CRC, Turbo Coding and Rate
+ Matching operations, as specified in 3GPP TS 36.212.
+
+ See the :doc:`../prog_guide/bbdev` programmer's guide for more details.
+
API Changes
-----------
.. code-block:: diff
librte_acl.so.2
+ + librte_bbdev.so.1
librte_bitratestats.so.2
librte_bus_dpaa.so.1
librte_bus_fslmc.so.1
DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
DEPDIRS-librte_ether := librte_net librte_eal librte_mempool librte_ring
DEPDIRS-librte_ether += librte_mbuf
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += librte_bbdev
+DEPDIRS-librte_bbdev := librte_eal librte_mempool librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_cryptodev += librte_kvargs
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_bbdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf
+
+# library source files
+SRCS-y += rte_bbdev.c
+
+# export include files
+SYMLINK-y-include += rte_bbdev_op.h
+SYMLINK-y-include += rte_bbdev.h
+SYMLINK-y-include += rte_bbdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_bbdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_lcore.h>
+#include <rte_dev.h>
+#include <rte_spinlock.h>
+#include <rte_tailq.h>
+#include <rte_interrupts.h>
+
+#include "rte_bbdev_op.h"
+#include "rte_bbdev.h"
+#include "rte_bbdev_pmd.h"
+
+#define DEV_NAME "BBDEV"
+
+
+/* Helper macro to check dev_id is valid */
+#define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
+ if (dev == NULL) { \
+ rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
+ return -ENODEV; \
+ } \
+} while (0)
+
+/* Helper macro to check dev_ops is valid */
+#define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
+ if (dev->dev_ops == NULL) { \
+ rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
+ dev_id); \
+ return -ENODEV; \
+ } \
+} while (0)
+
+/* Helper macro to check that driver implements required function pointer */
+#define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
+ if (func == NULL) { \
+ rte_bbdev_log(ERR, "device %u does not support %s", \
+ dev_id, #func); \
+ return -ENOTSUP; \
+ } \
+} while (0)
+
+/* Helper macro to check that queue is valid */
+#define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
+ if (queue_id >= dev->data->num_queues) { \
+ rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
+ queue_id, dev->data->dev_id); \
+ return -ERANGE; \
+ } \
+} while (0)
+
+/* List of callback functions registered by an application */
+struct rte_bbdev_callback {
+ TAILQ_ENTRY(rte_bbdev_callback) next; /* Callbacks list */
+ rte_bbdev_cb_fn cb_fn; /* Callback address */
+ void *cb_arg; /* Parameter for callback */
+ void *ret_param; /* Return parameter */
+ enum rte_bbdev_event_type event; /* Interrupt event type */
+ uint32_t active; /* Callback is executing */
+};
+
+/* spinlock for bbdev device callbacks */
+static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/*
+ * Global array of all devices. This is not static because it's used by the
+ * inline enqueue and dequeue functions
+ */
+struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
+
+/* Global array with rte_bbdev_data structures */
+static struct rte_bbdev_data *rte_bbdev_data;
+
+/* Memzone name for global bbdev data pool */
+static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
+
+/* Number of currently valid devices */
+static uint16_t num_devs;
+
+/* Return pointer to device structure, with validity check */
+static struct rte_bbdev *
+get_dev(uint16_t dev_id)
+{
+ if (rte_bbdev_is_valid(dev_id))
+ return &rte_bbdev_devices[dev_id];
+ return NULL;
+}
+
+/* Allocate global data array */
+static int
+rte_bbdev_data_alloc(void)
+{
+ const unsigned int flags = 0;
+ const struct rte_memzone *mz;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
+ RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
+ rte_socket_id(), flags);
+ } else
+ mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
+ if (mz == NULL) {
+ rte_bbdev_log(CRIT,
+ "Cannot allocate memzone for bbdev port data");
+ return -ENOMEM;
+ }
+
+ rte_bbdev_data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(rte_bbdev_data, 0,
+ RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
+ return 0;
+}
+
+/*
+ * Find data alocated for the device or if not found return first unused bbdev
+ * data. If all structures are in use and none is used by the device return
+ * NULL.
+ */
+static struct rte_bbdev_data *
+find_bbdev_data(const char *name)
+{
+ uint16_t data_id;
+
+ for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
+ if (strlen(rte_bbdev_data[data_id].name) == 0) {
+ memset(&rte_bbdev_data[data_id], 0,
+ sizeof(struct rte_bbdev_data));
+ return &rte_bbdev_data[data_id];
+ } else if (strncmp(rte_bbdev_data[data_id].name, name,
+ RTE_BBDEV_NAME_MAX_LEN) == 0)
+ return &rte_bbdev_data[data_id];
+ }
+
+ return NULL;
+}
+
+/* Find lowest device id with no attached device */
+static uint16_t
+find_free_dev_id(void)
+{
+ uint16_t i;
+ for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
+ if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
+ return i;
+ }
+ return RTE_BBDEV_MAX_DEVS;
+}
+
+struct rte_bbdev *
+rte_bbdev_allocate(const char *name)
+{
+ int ret;
+ struct rte_bbdev *bbdev;
+ uint16_t dev_id;
+
+ if (name == NULL) {
+ rte_bbdev_log(ERR, "Invalid null device name");
+ return NULL;
+ }
+
+ if (rte_bbdev_get_named_dev(name) != NULL) {
+ rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
+ return NULL;
+ }
+
+ dev_id = find_free_dev_id();
+ if (dev_id == RTE_BBDEV_MAX_DEVS) {
+ rte_bbdev_log(ERR, "Reached maximum number of devices");
+ return NULL;
+ }
+
+ bbdev = &rte_bbdev_devices[dev_id];
+
+ if (rte_bbdev_data == NULL) {
+ ret = rte_bbdev_data_alloc();
+ if (ret != 0)
+ return NULL;
+ }
+
+ bbdev->data = find_bbdev_data(name);
+ if (bbdev->data == NULL) {
+ rte_bbdev_log(ERR,
+ "Max BBDevs already allocated in multi-process environment!");
+ return NULL;
+ }
+
+ rte_atomic16_inc(&bbdev->data->process_cnt);
+ bbdev->data->dev_id = dev_id;
+ bbdev->state = RTE_BBDEV_INITIALIZED;
+
+ ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
+ if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
+ rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
+ return NULL;
+ }
+
+ /* init user callbacks */
+ TAILQ_INIT(&(bbdev->list_cbs));
+
+ num_devs++;
+
+ rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
+ name, dev_id, num_devs);
+
+ return bbdev;
+}
+
+int
+rte_bbdev_release(struct rte_bbdev *bbdev)
+{
+ uint16_t dev_id;
+ struct rte_bbdev_callback *cb, *next;
+
+ if (bbdev == NULL) {
+ rte_bbdev_log(ERR, "NULL bbdev");
+ return -ENODEV;
+ }
+ dev_id = bbdev->data->dev_id;
+
+ /* free all callbacks from the device's list */
+ for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+ TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
+ rte_free(cb);
+ }
+
+ /* clear shared BBDev Data if no process is using the device anymore */
+ if (rte_atomic16_dec_and_test(&bbdev->data->process_cnt))
+ memset(bbdev->data, 0, sizeof(*bbdev->data));
+
+ memset(bbdev, 0, sizeof(*bbdev));
+ num_devs--;
+ bbdev->state = RTE_BBDEV_UNUSED;
+
+ rte_bbdev_log_debug(
+ "Un-initialised device id = %u. Num devices = %u",
+ dev_id, num_devs);
+ return 0;
+}
+
+struct rte_bbdev *
+rte_bbdev_get_named_dev(const char *name)
+{
+ unsigned int i;
+
+ if (name == NULL) {
+ rte_bbdev_log(ERR, "NULL driver name");
+ return NULL;
+ }
+
+ for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
+ struct rte_bbdev *dev = get_dev(i);
+ if (dev && (strncmp(dev->data->name,
+ name, RTE_BBDEV_NAME_MAX_LEN) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+uint16_t
+rte_bbdev_count(void)
+{
+ return num_devs;
+}
+
+bool
+rte_bbdev_is_valid(uint16_t dev_id)
+{
+ if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
+ rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
+ return true;
+ return false;
+}
+
+uint16_t
+rte_bbdev_find_next(uint16_t dev_id)
+{
+ dev_id++;
+ for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
+ if (rte_bbdev_is_valid(dev_id))
+ break;
+ return dev_id;
+}
+
+int
+rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
+{
+ unsigned int i;
+ int ret;
+ struct rte_bbdev_driver_info dev_info;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (dev->data->started) {
+ rte_bbdev_log(ERR,
+ "Device %u cannot be configured when started",
+ dev_id);
+ return -EBUSY;
+ }
+
+ /* Get device driver information to get max number of queues */
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev->dev_ops->info_get(dev, &dev_info);
+
+ if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
+ rte_bbdev_log(ERR,
+ "Device %u supports 0 < N <= %u queues, not %u",
+ dev_id, dev_info.max_num_queues, num_queues);
+ return -EINVAL;
+ }
+
+ /* If re-configuration, get driver to free existing internal memory */
+ if (dev->data->queues != NULL) {
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
+ for (i = 0; i < dev->data->num_queues; i++) {
+ int ret = dev->dev_ops->queue_release(dev, i);
+ if (ret < 0) {
+ rte_bbdev_log(ERR,
+ "Device %u queue %u release failed",
+ dev_id, i);
+ return ret;
+ }
+ }
+ /* Call optional device close */
+ if (dev->dev_ops->close) {
+ ret = dev->dev_ops->close(dev);
+ if (ret < 0) {
+ rte_bbdev_log(ERR,
+ "Device %u couldn't be closed",
+ dev_id);
+ return ret;
+ }
+ }
+ rte_free(dev->data->queues);
+ }
+
+ /* Allocate queue pointers */
+ dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
+ sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (dev->data->queues == NULL) {
+ rte_bbdev_log(ERR,
+ "calloc of %u queues for device %u on socket %i failed",
+ num_queues, dev_id, dev->data->socket_id);
+ return -ENOMEM;
+ }
+
+ dev->data->num_queues = num_queues;
+
+ /* Call optional device configuration */
+ if (dev->dev_ops->setup_queues) {
+ ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
+ if (ret < 0) {
+ rte_bbdev_log(ERR,
+ "Device %u memory configuration failed",
+ dev_id);
+ goto error;
+ }
+ }
+
+ rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
+ num_queues);
+ return 0;
+
+error:
+ dev->data->num_queues = 0;
+ rte_free(dev->data->queues);
+ dev->data->queues = NULL;
+ return ret;
+}
+
+int
+rte_bbdev_intr_enable(uint16_t dev_id)
+{
+ int ret;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (dev->data->started) {
+ rte_bbdev_log(ERR,
+ "Device %u cannot be configured when started",
+ dev_id);
+ return -EBUSY;
+ }
+
+ if (dev->dev_ops->intr_enable) {
+ ret = dev->dev_ops->intr_enable(dev);
+ if (ret < 0) {
+ rte_bbdev_log(ERR,
+ "Device %u interrupts configuration failed",
+ dev_id);
+ return ret;
+ }
+ rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
+ return 0;
+ }
+
+ rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
+ return -ENOTSUP;
+}
+
+int
+rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
+ const struct rte_bbdev_queue_conf *conf)
+{
+ int ret = 0;
+ struct rte_bbdev_driver_info dev_info;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ const struct rte_bbdev_op_cap *p;
+ struct rte_bbdev_queue_conf *stored_conf;
+ const char *op_type_str;
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+
+ if (dev->data->queues[queue_id].started || dev->data->started) {
+ rte_bbdev_log(ERR,
+ "Queue %u of device %u cannot be configured when started",
+ queue_id, dev_id);
+ return -EBUSY;
+ }
+
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
+
+ /* Get device driver information to verify config is valid */
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev->dev_ops->info_get(dev, &dev_info);
+
+ /* Check configuration is valid */
+ if (conf != NULL) {
+ if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
+ (dev_info.capabilities[0].type ==
+ RTE_BBDEV_OP_NONE)) {
+ ret = 1;
+ } else {
+ for (p = dev_info.capabilities;
+ p->type != RTE_BBDEV_OP_NONE; p++) {
+ if (conf->op_type == p->type) {
+ ret = 1;
+ break;
+ }
+ }
+ }
+ if (ret == 0) {
+ rte_bbdev_log(ERR, "Invalid operation type");
+ return -EINVAL;
+ }
+ if (conf->queue_size > dev_info.queue_size_lim) {
+ rte_bbdev_log(ERR,
+ "Size (%u) of queue %u of device %u must be: <= %u",
+ conf->queue_size, queue_id, dev_id,
+ dev_info.queue_size_lim);
+ return -EINVAL;
+ }
+ if (!rte_is_power_of_2(conf->queue_size)) {
+ rte_bbdev_log(ERR,
+ "Size (%u) of queue %u of device %u must be a power of 2",
+ conf->queue_size, queue_id, dev_id);
+ return -EINVAL;
+ }
+ if (conf->priority > dev_info.max_queue_priority) {
+ rte_bbdev_log(ERR,
+ "Priority (%u) of queue %u of bdev %u must be <= %u",
+ conf->priority, queue_id, dev_id,
+ dev_info.max_queue_priority);
+ return -EINVAL;
+ }
+ }
+
+ /* Release existing queue (in case of queue reconfiguration) */
+ if (dev->data->queues[queue_id].queue_private != NULL) {
+ ret = dev->dev_ops->queue_release(dev, queue_id);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u queue %u release failed",
+ dev_id, queue_id);
+ return ret;
+ }
+ }
+
+ /* Get driver to setup the queue */
+ ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
+ conf : &dev_info.default_queue_conf);
+ if (ret < 0) {
+ rte_bbdev_log(ERR,
+ "Device %u queue %u setup failed", dev_id,
+ queue_id);
+ return ret;
+ }
+
+ /* Store configuration */
+ stored_conf = &dev->data->queues[queue_id].conf;
+ memcpy(stored_conf,
+ (conf != NULL) ? conf : &dev_info.default_queue_conf,
+ sizeof(*stored_conf));
+
+ op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
+ if (op_type_str == NULL)
+ return -EINVAL;
+
+ rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
+ dev_id, queue_id, stored_conf->queue_size, op_type_str,
+ stored_conf->priority);
+
+ return 0;
+}
+
+int
+rte_bbdev_start(uint16_t dev_id)
+{
+ int i;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (dev->data->started) {
+ rte_bbdev_log_debug("Device %u is already started", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->start) {
+ int ret = dev->dev_ops->start(dev);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u start failed", dev_id);
+ return ret;
+ }
+ }
+
+ /* Store new state */
+ for (i = 0; i < dev->data->num_queues; i++)
+ if (!dev->data->queues[i].conf.deferred_start)
+ dev->data->queues[i].started = true;
+ dev->data->started = true;
+
+ rte_bbdev_log_debug("Started device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_stop(uint16_t dev_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (!dev->data->started) {
+ rte_bbdev_log_debug("Device %u is already stopped", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->stop)
+ dev->dev_ops->stop(dev);
+ dev->data->started = false;
+
+ rte_bbdev_log_debug("Stopped device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_close(uint16_t dev_id)
+{
+ int ret;
+ uint16_t i;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (dev->data->started) {
+ ret = rte_bbdev_stop(dev_id);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
+ return ret;
+ }
+ }
+
+ /* Free memory used by queues */
+ for (i = 0; i < dev->data->num_queues; i++) {
+ ret = dev->dev_ops->queue_release(dev, i);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u queue %u release failed",
+ dev_id, i);
+ return ret;
+ }
+ }
+ rte_free(dev->data->queues);
+
+ if (dev->dev_ops->close) {
+ ret = dev->dev_ops->close(dev);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u close failed", dev_id);
+ return ret;
+ }
+ }
+
+ /* Clear configuration */
+ dev->data->queues = NULL;
+ dev->data->num_queues = 0;
+
+ rte_bbdev_log_debug("Closed device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+
+ if (dev->data->queues[queue_id].started) {
+ rte_bbdev_log_debug("Queue %u of device %u already started",
+ queue_id, dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->queue_start) {
+ int ret = dev->dev_ops->queue_start(dev, queue_id);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u queue %u start failed",
+ dev_id, queue_id);
+ return ret;
+ }
+ }
+ dev->data->queues[queue_id].started = true;
+
+ rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+
+ if (!dev->data->queues[queue_id].started) {
+ rte_bbdev_log_debug("Queue %u of device %u already stopped",
+ queue_id, dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->queue_stop) {
+ int ret = dev->dev_ops->queue_stop(dev, queue_id);
+ if (ret < 0) {
+ rte_bbdev_log(ERR, "Device %u queue %u stop failed",
+ dev_id, queue_id);
+ return ret;
+ }
+ }
+ dev->data->queues[queue_id].started = false;
+
+ rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
+ return 0;
+}
+
+/* Get device statistics */
+static void
+get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
+{
+ unsigned int q_id;
+ for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
+ struct rte_bbdev_stats *q_stats =
+ &dev->data->queues[q_id].queue_stats;
+
+ stats->enqueued_count += q_stats->enqueued_count;
+ stats->dequeued_count += q_stats->dequeued_count;
+ stats->enqueue_err_count += q_stats->enqueue_err_count;
+ stats->dequeue_err_count += q_stats->dequeue_err_count;
+ }
+ rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
+}
+
+static void
+reset_stats_in_queues(struct rte_bbdev *dev)
+{
+ unsigned int q_id;
+ for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
+ struct rte_bbdev_stats *q_stats =
+ &dev->data->queues[q_id].queue_stats;
+
+ memset(q_stats, 0, sizeof(*q_stats));
+ }
+ rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
+}
+
+int
+rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (stats == NULL) {
+ rte_bbdev_log(ERR, "NULL stats structure");
+ return -EINVAL;
+ }
+
+ memset(stats, 0, sizeof(*stats));
+ if (dev->dev_ops->stats_get != NULL)
+ dev->dev_ops->stats_get(dev, stats);
+ else
+ get_stats_from_queues(dev, stats);
+
+ rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_stats_reset(uint16_t dev_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+
+ if (dev->dev_ops->stats_reset != NULL)
+ dev->dev_ops->stats_reset(dev);
+ else
+ reset_stats_in_queues(dev);
+
+ rte_bbdev_log_debug("Reset stats of device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
+
+ if (dev_info == NULL) {
+ rte_bbdev_log(ERR, "NULL dev info structure");
+ return -EINVAL;
+ }
+
+ /* Copy data maintained by device interface layer */
+ memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->dev_name = dev->data->name;
+ dev_info->num_queues = dev->data->num_queues;
+ dev_info->bus = rte_bus_find_by_device(dev->device);
+ dev_info->socket_id = dev->data->socket_id;
+ dev_info->started = dev->data->started;
+
+ /* Copy data maintained by device driver layer */
+ dev->dev_ops->info_get(dev, &dev_info->drv);
+
+ rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
+ return 0;
+}
+
+int
+rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_queue_info *queue_info)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+
+ if (queue_info == NULL) {
+ rte_bbdev_log(ERR, "NULL queue info structure");
+ return -EINVAL;
+ }
+
+ /* Copy data to output */
+ memset(queue_info, 0, sizeof(*queue_info));
+ queue_info->conf = dev->data->queues[queue_id].conf;
+ queue_info->started = dev->data->queues[queue_id].started;
+
+ rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
+ queue_id, dev_id);
+ return 0;
+}
+
+/* Calculate size needed to store bbdev_op, depending on type */
+static unsigned int
+get_bbdev_op_size(enum rte_bbdev_op_type type)
+{
+ unsigned int result = 0;
+ switch (type) {
+ case RTE_BBDEV_OP_NONE:
+ result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
+ sizeof(struct rte_bbdev_enc_op));
+ break;
+ case RTE_BBDEV_OP_TURBO_DEC:
+ result = sizeof(struct rte_bbdev_dec_op);
+ break;
+ case RTE_BBDEV_OP_TURBO_ENC:
+ result = sizeof(struct rte_bbdev_enc_op);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+/* Initialise a bbdev_op structure */
+static void
+bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
+ __rte_unused unsigned int n)
+{
+ enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
+
+ if (type == RTE_BBDEV_OP_TURBO_DEC) {
+ struct rte_bbdev_dec_op *op = element;
+ memset(op, 0, mempool->elt_size);
+ op->mempool = mempool;
+ } else if (type == RTE_BBDEV_OP_TURBO_ENC) {
+ struct rte_bbdev_enc_op *op = element;
+ memset(op, 0, mempool->elt_size);
+ op->mempool = mempool;
+ }
+}
+
+struct rte_mempool *
+rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
+ unsigned int num_elements, unsigned int cache_size,
+ int socket_id)
+{
+ struct rte_bbdev_op_pool_private *priv;
+ struct rte_mempool *mp;
+ const char *op_type_str;
+
+ if (name == NULL) {
+ rte_bbdev_log(ERR, "NULL name for op pool");
+ return NULL;
+ }
+
+ if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
+ rte_bbdev_log(ERR,
+ "Invalid op type (%u), should be less than %u",
+ type, RTE_BBDEV_OP_TYPE_COUNT);
+ return NULL;
+ }
+
+ mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
+ cache_size, sizeof(struct rte_bbdev_op_pool_private),
+ NULL, NULL, bbdev_op_init, &type, socket_id, 0);
+ if (mp == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
+ name, num_elements, get_bbdev_op_size(type),
+ rte_strerror(rte_errno));
+ return NULL;
+ }
+
+ op_type_str = rte_bbdev_op_type_str(type);
+ if (op_type_str == NULL)
+ return NULL;
+
+ rte_bbdev_log_debug(
+ "Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
+ name, num_elements, op_type_str, cache_size, socket_id,
+ get_bbdev_op_size(type));
+
+ priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
+ priv->type = type;
+
+ return mp;
+}
+
+int
+rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
+ rte_bbdev_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_bbdev_callback *user_cb;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ if (event >= RTE_BBDEV_EVENT_MAX) {
+ rte_bbdev_log(ERR,
+ "Invalid event type (%u), should be less than %u",
+ event, RTE_BBDEV_EVENT_MAX);
+ return -EINVAL;
+ }
+
+ if (cb_fn == NULL) {
+ rte_bbdev_log(ERR, "NULL callback function");
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&rte_bbdev_cb_lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
+ if (user_cb->cb_fn == cb_fn &&
+ user_cb->cb_arg == cb_arg &&
+ user_cb->event == event)
+ break;
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_bbdev_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
+ }
+ }
+
+ rte_spinlock_unlock(&rte_bbdev_cb_lock);
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int
+rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
+ rte_bbdev_cb_fn cb_fn, void *cb_arg)
+{
+ int ret = 0;
+ struct rte_bbdev_callback *cb, *next;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+
+ if (event >= RTE_BBDEV_EVENT_MAX) {
+ rte_bbdev_log(ERR,
+ "Invalid event type (%u), should be less than %u",
+ event, RTE_BBDEV_EVENT_MAX);
+ return -EINVAL;
+ }
+
+ if (cb_fn == NULL) {
+ rte_bbdev_log(ERR,
+ "NULL callback function cannot be unregistered");
+ return -EINVAL;
+ }
+
+ dev = &rte_bbdev_devices[dev_id];
+ rte_spinlock_lock(&rte_bbdev_cb_lock);
+
+ for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
+ continue;
+
+ /* If this callback is not executing right now, remove it. */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->list_cbs), cb, next);
+ rte_free(cb);
+ } else
+ ret = -EAGAIN;
+ }
+
+ rte_spinlock_unlock(&rte_bbdev_cb_lock);
+ return ret;
+}
+
+void
+rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
+ enum rte_bbdev_event_type event, void *ret_param)
+{
+ struct rte_bbdev_callback *cb_lst;
+ struct rte_bbdev_callback dev_cb;
+
+ if (dev == NULL) {
+ rte_bbdev_log(ERR, "NULL device");
+ return;
+ }
+
+ if (dev->data == NULL) {
+ rte_bbdev_log(ERR, "NULL data structure");
+ return;
+ }
+
+ if (event >= RTE_BBDEV_EVENT_MAX) {
+ rte_bbdev_log(ERR,
+ "Invalid event type (%u), should be less than %u",
+ event, RTE_BBDEV_EVENT_MAX);
+ return;
+ }
+
+ rte_spinlock_lock(&rte_bbdev_cb_lock);
+ TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
+ if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+ continue;
+ dev_cb = *cb_lst;
+ cb_lst->active = 1;
+ if (ret_param != NULL)
+ dev_cb.ret_param = ret_param;
+
+ rte_spinlock_unlock(&rte_bbdev_cb_lock);
+ dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
+ dev_cb.cb_arg, dev_cb.ret_param);
+ rte_spinlock_lock(&rte_bbdev_cb_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&rte_bbdev_cb_lock);
+}
+
+int
+rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
+ return dev->dev_ops->queue_intr_enable(dev, queue_id);
+}
+
+int
+rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
+{
+ struct rte_bbdev *dev = get_dev(dev_id);
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+ VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
+ VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
+ return dev->dev_ops->queue_intr_disable(dev, queue_id);
+}
+
+int
+rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
+ void *data)
+{
+ uint32_t vec;
+ struct rte_bbdev *dev = get_dev(dev_id);
+ struct rte_intr_handle *intr_handle;
+ int ret;
+
+ VALID_DEV_OR_RET_ERR(dev, dev_id);
+ VALID_QUEUE_OR_RET_ERR(queue_id, dev);
+
+ intr_handle = dev->intr_handle;
+ if (!intr_handle || !intr_handle->intr_vec) {
+ rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
+ return -ENOTSUP;
+ }
+
+ if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
+ dev_id, queue_id);
+ return -ENOTSUP;
+ }
+
+ vec = intr_handle->intr_vec[queue_id];
+ ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
+ if (ret && (ret != -EEXIST)) {
+ rte_bbdev_log(ERR,
+ "dev %u q %u int ctl error op %d epfd %d vec %u\n",
+ dev_id, queue_id, op, epfd, vec);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+const char *
+rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
+{
+ static const char * const op_types[] = {
+ "RTE_BBDEV_OP_NONE",
+ "RTE_BBDEV_OP_TURBO_DEC",
+ "RTE_BBDEV_OP_TURBO_ENC",
+ };
+
+ if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
+ return op_types[op_type];
+
+ rte_bbdev_log(ERR, "Invalid operation type");
+ return NULL;
+}
+
+
+int bbdev_logtype;
+
+RTE_INIT(rte_bbdev_init_log);
+static void
+rte_bbdev_init_log(void)
+{
+ bbdev_logtype = rte_log_register("lib.bbdev");
+ if (bbdev_logtype >= 0)
+ rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_BBDEV_H_
+#define _RTE_BBDEV_H_
+
+/**
+ * @file rte_bbdev.h
+ *
+ * Wireless base band device abstraction APIs.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API allows an application to discover, configure and use a device to
+ * process operations. An asynchronous API (enqueue, followed by later dequeue)
+ * is used for processing operations.
+ *
+ * The functions in this API are not thread-safe when called on the same
+ * target object (a device, or a queue on a device), with the exception that
+ * one thread can enqueue operations to a queue while another thread dequeues
+ * from the same queue.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_bus.h>
+#include <rte_cpuflags.h>
+#include <rte_memory.h>
+
+#include "rte_bbdev_op.h"
+
+#ifndef RTE_BBDEV_MAX_DEVS
+#define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
+#endif
+
+/** Flags indiciate current state of BBDEV device */
+enum rte_bbdev_state {
+ RTE_BBDEV_UNUSED,
+ RTE_BBDEV_INITIALIZED
+};
+
+/**
+ * Get the total number of devices that have been successfully initialised.
+ *
+ * @return
+ * The total number of usable devices.
+ */
+uint16_t
+rte_bbdev_count(void);
+
+/**
+ * Check if a device is valid.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * true if device ID is valid and device is attached, false otherwise.
+ */
+bool
+rte_bbdev_is_valid(uint16_t dev_id);
+
+/**
+ * Get the next enabled device.
+ *
+ * @param dev_id
+ * The current device
+ *
+ * @return
+ * - The next device, or
+ * - RTE_BBDEV_MAX_DEVS if none found
+ */
+uint16_t
+rte_bbdev_find_next(uint16_t dev_id);
+
+/** Iterate through all enabled devices */
+#define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
+ i < RTE_BBDEV_MAX_DEVS; \
+ i = rte_bbdev_find_next(i))
+
+/**
+ * Setup up device queues.
+ * This function must be called on a device before setting up the queues and
+ * starting the device. It can also be called when a device is in the stopped
+ * state. If any device queues have been configured their configuration will be
+ * cleared by a call to this function.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param num_queues
+ * Number of queues to configure on device.
+ * @param socket_id
+ * ID of a socket which will be used to allocate memory.
+ *
+ * @return
+ * - 0 on success
+ * - -ENODEV if dev_id is invalid or the device is corrupted
+ * - -EINVAL if num_queues is invalid, 0 or greater than maximum
+ * - -EBUSY if the identified device has already started
+ * - -ENOMEM if unable to allocate memory
+ */
+int
+rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
+
+/**
+ * Enable interrupts.
+ * This function may be called before starting the device to enable the
+ * interrupts if they are available.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * - 0 on success
+ * - -ENODEV if dev_id is invalid or the device is corrupted
+ * - -EBUSY if the identified device has already started
+ * - -ENOTSUP if the interrupts are not supported by the device
+ */
+int
+rte_bbdev_intr_enable(uint16_t dev_id);
+
+/** Device queue configuration structure */
+struct rte_bbdev_queue_conf {
+ int socket; /**< NUMA socket used for memory allocation */
+ uint32_t queue_size; /**< Size of queue */
+ uint8_t priority; /**< Queue priority */
+ bool deferred_start; /**< Do not start queue when device is started. */
+ enum rte_bbdev_op_type op_type; /**< Operation type */
+};
+
+/**
+ * Configure a queue on a device.
+ * This function can be called after device configuration, and before starting.
+ * It can also be called when the device or the queue is in the stopped state.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param conf
+ * The queue configuration. If NULL, a default configuration will be used.
+ *
+ * @return
+ * - 0 on success
+ * - EINVAL if the identified queue size or priority are invalid
+ * - EBUSY if the identified queue or its device have already started
+ */
+int
+rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
+ const struct rte_bbdev_queue_conf *conf);
+
+/**
+ * Start a device.
+ * This is the last step needed before enqueueing operations is possible.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * - 0 on success
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_start(uint16_t dev_id);
+
+/**
+ * Stop a device.
+ * The device can be reconfigured, and restarted after being stopped.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * - 0 on success
+ */
+int
+rte_bbdev_stop(uint16_t dev_id);
+
+/**
+ * Close a device.
+ * The device cannot be restarted without reconfiguration!
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * - 0 on success
+ */
+int
+rte_bbdev_close(uint16_t dev_id);
+
+/**
+ * Start a specified queue on a device.
+ * This is only needed if the queue has been stopped, or if the deferred_start
+ * flag has been set when configuring the queue.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ *
+ * @return
+ * - 0 on success
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
+
+/**
+ * Stop a specified queue on a device, to allow re configuration.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ *
+ * @return
+ * - 0 on success
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
+
+/** Device statistics. */
+struct rte_bbdev_stats {
+ uint64_t enqueued_count; /**< Count of all operations enqueued */
+ uint64_t dequeued_count; /**< Count of all operations dequeued */
+ /** Total error count on operations enqueued */
+ uint64_t enqueue_err_count;
+ /** Total error count on operations dequeued */
+ uint64_t dequeue_err_count;
+};
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param stats
+ * Pointer to structure to where statistics will be copied. On error, this
+ * location may or may not have been modified.
+ *
+ * @return
+ * - 0 on success
+ * - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
+
+/**
+ * Reset the statistics of a device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * - 0 on success
+ */
+int
+rte_bbdev_stats_reset(uint16_t dev_id);
+
+/** Device information supplied by the device's driver */
+struct rte_bbdev_driver_info {
+ /** Driver name */
+ const char *driver_name;
+
+ /** Maximum number of queues supported by the device */
+ unsigned int max_num_queues;
+ /** Queue size limit (queue size must also be power of 2) */
+ uint32_t queue_size_lim;
+ /** Set if device off-loads operation to hardware */
+ bool hardware_accelerated;
+ /** Max value supported by queue priority */
+ uint8_t max_queue_priority;
+ /** Set if device supports per-queue interrupts */
+ bool queue_intr_supported;
+ /** Minimum alignment of buffers, in bytes */
+ uint16_t min_alignment;
+ /** Default queue configuration used if none is supplied */
+ struct rte_bbdev_queue_conf default_queue_conf;
+ /** Device operation capabilities */
+ const struct rte_bbdev_op_cap *capabilities;
+ /** Device cpu_flag requirements */
+ const enum rte_cpu_flag_t *cpu_flag_reqs;
+};
+
+/** Macro used at end of bbdev PMD list */
+#define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
+ { RTE_BBDEV_OP_NONE }
+
+/**
+ * Device information structure used by an application to discover a devices
+ * capabilities and current configuration
+ */
+struct rte_bbdev_info {
+ int socket_id; /**< NUMA socket that device is on */
+ const char *dev_name; /**< Unique device name */
+ const struct rte_bus *bus; /**< Bus information */
+ uint16_t num_queues; /**< Number of queues currently configured */
+ bool started; /**< Set if device is currently started */
+ struct rte_bbdev_driver_info drv; /**< Info from device driver */
+};
+
+/**
+ * Retrieve information about a device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param dev_info
+ * Pointer to structure to where information will be copied. On error, this
+ * location may or may not have been modified.
+ *
+ * @return
+ * - 0 on success
+ * - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
+
+/** Queue information */
+struct rte_bbdev_queue_info {
+ /** Current device configuration */
+ struct rte_bbdev_queue_conf conf;
+ /** Set if queue is currently started */
+ bool started;
+};
+
+/**
+ * Retrieve information about a specific queue on a device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param queue_info
+ * Pointer to structure to where information will be copied. On error, this
+ * location may or may not have been modified.
+ *
+ * @return
+ * - 0 on success
+ * - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_queue_info *queue_info);
+
+/** @internal The data structure associated with each queue of a device. */
+struct rte_bbdev_queue_data {
+ void *queue_private; /**< Driver-specific per-queue data */
+ struct rte_bbdev_queue_conf conf; /**< Current configuration */
+ struct rte_bbdev_stats queue_stats; /**< Queue statistics */
+ bool started; /**< Queue state */
+};
+
+/** @internal Enqueue encode operations for processing on queue of a device. */
+typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
+ struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops,
+ uint16_t num);
+
+/** @internal Enqueue decode operations for processing on queue of a device. */
+typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
+ struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops,
+ uint16_t num);
+
+/** @internal Dequeue encode operations from a queue of a device. */
+typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
+ struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t num);
+
+/** @internal Dequeue decode operations from a queue of a device. */
+typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
+ struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t num);
+
+#define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
+
+/**
+ * @internal The data associated with a device, with no function pointers.
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration. Drivers can access
+ * these fields, but should never write to them!
+ */
+struct rte_bbdev_data {
+ char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
+ void *dev_private; /**< Driver-specific private data */
+ uint16_t num_queues; /**< Number of currently configured queues */
+ struct rte_bbdev_queue_data *queues; /**< Queue structures */
+ uint16_t dev_id; /**< Device ID */
+ int socket_id; /**< NUMA socket that device is on */
+ bool started; /**< Device run-time state */
+ /** Counter of processes using the device */
+ rte_atomic16_t process_cnt;
+};
+
+/* Forward declarations */
+struct rte_bbdev_ops;
+struct rte_bbdev_callback;
+struct rte_intr_handle;
+
+/** Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
+
+/**
+ * @internal The data structure associated with a device. Drivers can access
+ * these fields, but should only write to the *_ops fields.
+ */
+struct __rte_cache_aligned rte_bbdev {
+ /**< Enqueue encode function */
+ rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
+ /**< Enqueue decode function */
+ rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
+ /**< Dequeue encode function */
+ rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
+ /**< Dequeue decode function */
+ rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
+ const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
+ struct rte_bbdev_data *data; /**< Pointer to device data */
+ enum rte_bbdev_state state; /**< If device is currently used or not */
+ struct rte_device *device; /**< Backing device */
+ /** User application callback for interrupts if present */
+ struct rte_bbdev_cb_list list_cbs;
+ struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
+};
+
+/** @internal array of all devices */
+extern struct rte_bbdev rte_bbdev_devices[];
+
+/**
+ * Enqueue a burst of processed encode operations to a queue of the device.
+ * This functions only enqueues as many operations as currently possible and
+ * does not block until @p num_ops entries in the queue are available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param ops
+ * Pointer array containing operations to be enqueued Must have at least
+ * @p num_ops entries
+ * @param num_ops
+ * The maximum number of operations to enqueue.
+ *
+ * @return
+ * The number of operations actually enqueued (this is the number of processed
+ * entries in the @p ops array).
+ */
+static inline uint16_t
+rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+ struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+ uint16_t n = dev->enqueue_enc_ops(q_data, ops, num_ops);
+
+ rte_bbdev_log_verbose("%u encode ops enqueued to dev%u,q%u.\n",
+ num_ops, dev_id, queue_id);
+
+ return n;
+}
+
+/**
+ * Enqueue a burst of processed decode operations to a queue of the device.
+ * This functions only enqueues as many operations as currently possible and
+ * does not block until @p num_ops entries in the queue are available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param ops
+ * Pointer array containing operations to be enqueued Must have at least
+ * @p num_ops entries
+ * @param num_ops
+ * The maximum number of operations to enqueue.
+ *
+ * @return
+ * The number of operations actually enqueued (this is the number of processed
+ * entries in the @p ops array).
+ */
+static inline uint16_t
+rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+ struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+ uint16_t n = dev->enqueue_dec_ops(q_data, ops, num_ops);
+
+ rte_bbdev_log_verbose("%u decode ops enqueued to dev%u,q%u.\n",
+ num_ops, dev_id, queue_id);
+
+ return n;
+}
+
+/**
+ * Dequeue a burst of processed encode operations from a queue of the device.
+ * This functions returns only the current contents of the queue, and does not
+ * block until @ num_ops is available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param ops
+ * Pointer array where operations will be dequeued to. Must have at least
+ * @p num_ops entries
+ * @param num_ops
+ * The maximum number of operations to dequeue.
+ *
+ * @return
+ * The number of operations actually dequeued (this is the number of entries
+ * copied into the @p ops array).
+ */
+static inline uint16_t
+rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+ struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+ uint16_t n = dev->dequeue_enc_ops(q_data, ops, num_ops);
+
+ rte_bbdev_log_verbose("%u encode ops dequeued to dev%u,q%u\n",
+ n, dev_id, queue_id);
+
+ return n;
+}
+
+/**
+ * Dequeue a burst of processed decode operations from a queue of the device.
+ * This functions returns only the current contents of the queue, and does not
+ * block until @ num_ops is available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the queue.
+ * @param ops
+ * Pointer array where operations will be dequeued to. Must have at least
+ * @p num_ops entries
+ * @param num_ops
+ * The maximum number of operations to dequeue.
+ *
+ * @return
+ * The number of operations actually dequeued (this is the number of entries
+ * copied into the @p ops array).
+ */
+
+static inline uint16_t
+rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+ struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+ uint16_t n = dev->dequeue_dec_ops(q_data, ops, num_ops);
+
+ rte_bbdev_log_verbose("%u decode ops dequeued to dev%u,q%u\n",
+ n, dev_id, queue_id);
+
+ return n;
+}
+
+/** Definitions of device event types */
+enum rte_bbdev_event_type {
+ RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
+ RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
+ RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
+ RTE_BBDEV_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Typedef for application callback function registered by application
+ * software for notification of device events
+ *
+ * @param dev_id
+ * Device identifier
+ * @param event
+ * Device event to register for notification of.
+ * @param cb_arg
+ * User specified parameter to be passed to user's callback function.
+ * @param ret_param
+ * To pass data back to user application.
+ */
+typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
+ enum rte_bbdev_event_type event, void *cb_arg,
+ void *ret_param);
+
+/**
+ * Register a callback function for specific device id. Multiple callbacks can
+ * be added and will be called in the order they are added when an event is
+ * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
+ *
+ * @param dev_id
+ * Device id.
+ * @param event
+ * The event that the callback will be registered for.
+ * @param cb_fn
+ * User supplied callback function to be called.
+ * @param cb_arg
+ * Pointer to parameter that will be passed to the callback.
+ *
+ * @return
+ * Zero on success, negative value on failure.
+ */
+int
+rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
+ rte_bbdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific device id.
+ *
+ * @param dev_id
+ * The device identifier.
+ * @param event
+ * The event that the callback will be unregistered for.
+ * @param cb_fn
+ * User supplied callback function to be unregistered.
+ * @param cb_arg
+ * Pointer to the parameter supplied when registering the callback.
+ * (void *)-1 means to remove all registered callbacks with the specified
+ * function address.
+ *
+ * @return
+ * - 0 on success
+ * - EINVAL if invalid parameter pointer is provided
+ * - EAGAIN if the provided callback pointer does not exist
+ */
+int
+rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
+ rte_bbdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Enable a one-shot interrupt on the next operation enqueued to a particular
+ * queue. The interrupt will be triggered when the operation is ready to be
+ * dequeued. To handle the interrupt, an epoll file descriptor must be
+ * registered using rte_bbdev_queue_intr_ctl(), and then an application
+ * thread/lcore can wait for the interrupt using rte_epoll_wait().
+ *
+ * @param dev_id
+ * The device identifier.
+ * @param queue_id
+ * The index of the queue.
+ *
+ * @return
+ * - 0 on success
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
+
+/**
+ * Disable a one-shot interrupt on the next operation enqueued to a particular
+ * queue (if it has been enabled).
+ *
+ * @param dev_id
+ * The device identifier.
+ * @param queue_id
+ * The index of the queue.
+ *
+ * @return
+ * - 0 on success
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
+
+/**
+ * Control interface for per-queue interrupts.
+ *
+ * @param dev_id
+ * The device identifier.
+ * @param queue_id
+ * The index of the queue.
+ * @param epfd
+ * Epoll file descriptor that will be associated with the interrupt source.
+ * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
+ * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
+ * be used when calling rte_epoll_wait()).
+ * @param op
+ * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
+ * RTE_INTR_EVENT_DEL.
+ * @param data
+ * User context, that will be returned in the epdata.data field of the
+ * rte_epoll_event structure filled in by rte_epoll_wait().
+ *
+ * @return
+ * - 0 on success
+ * - ENOTSUP if interrupts are not supported by the identified device
+ * - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
+ void *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_BBDEV_OP_H_
+#define _RTE_BBDEV_OP_H_
+
+/**
+ * @file rte_bbdev_op.h
+ *
+ * Defines wireless base band layer 1 operations and capabilities
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#define RTE_BBDEV_MAX_CODE_BLOCKS 64
+
+extern int bbdev_logtype;
+
+/**
+ * Helper macro for logging
+ *
+ * @param level
+ * Log level: EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, or DEBUG
+ * @param fmt
+ * The format string, as in printf(3).
+ * @param ...
+ * The variable arguments required by the format string.
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+#define rte_bbdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
+
+/**
+ * Helper macro for debug logging with extra source info
+ *
+ * @param fmt
+ * The format string, as in printf(3).
+ * @param ...
+ * The variable arguments required by the format string.
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+#define rte_bbdev_log_debug(fmt, ...) \
+ rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+/**
+ * Helper macro for extra conditional logging from datapath
+ *
+ * @param fmt
+ * The format string, as in printf(3).
+ * @param ...
+ * The variable arguments required by the format string.
+ *
+ * @return
+ * - 0 on success
+ * - Negative on error
+ */
+#define rte_bbdev_log_verbose(fmt, ...) \
+ (void)((RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL) ? \
+ rte_log(RTE_LOG_DEBUG, \
+ bbdev_logtype, ": " fmt "\n", ##__VA_ARGS__) : 0)
+
+/** Flags for turbo decoder operation and capability structure */
+enum rte_bbdev_op_td_flag_bitmasks {
+ /**< If sub block de-interleaving is to be performed. */
+ RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE = (1ULL << 0),
+ /**< To use CRC Type 24B (otherwise use CRC Type 24A). */
+ RTE_BBDEV_TURBO_CRC_TYPE_24B = (1ULL << 1),
+ /**< If turbo equalization is to be performed. */
+ RTE_BBDEV_TURBO_EQUALIZER = (1ULL << 2),
+ /**< If set, saturate soft output to +/-127 */
+ RTE_BBDEV_TURBO_SOFT_OUT_SATURATE = (1ULL << 3),
+ /**< Set to 1 to start iteration from even, else odd; one iteration =
+ * max_iteration + 0.5
+ */
+ RTE_BBDEV_TURBO_HALF_ITERATION_EVEN = (1ULL << 4),
+ /**< If 0, TD stops after CRC matches; else if 1, runs to end of next
+ * odd iteration after CRC matches
+ */
+ RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH = (1ULL << 5),
+ /**< Set if soft output is required to be output */
+ RTE_BBDEV_TURBO_SOFT_OUTPUT = (1ULL << 6),
+ /**< Set to enable early termination mode */
+ RTE_BBDEV_TURBO_EARLY_TERMINATION = (1ULL << 7),
+ /**< Set if a device supports decoder dequeue interrupts */
+ RTE_BBDEV_TURBO_DEC_INTERRUPTS = (1ULL << 9),
+ /**< Set if positive LLR encoded input is supported. Positive LLR value
+ * represents the level of confidence for bit '1', and vice versa for
+ * bit '0'.
+ * This is mutually exclusive with RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
+ * when used to formalize the input data format.
+ */
+ RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN = (1ULL << 10),
+ /**< Set if negative LLR encoded input is supported. Negative LLR value
+ * represents the level of confidence for bit '1', and vice versa for
+ * bit '0'.
+ * This is mutually exclusive with RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN
+ * when used to formalize the input data format.
+ */
+ RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN = (1ULL << 11),
+ /**< Set if positive LLR soft output is supported. Positive LLR value
+ * represents the level of confidence for bit '1', and vice versa for
+ * bit '0'.
+ * This is mutually exclusive with
+ * RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT when used to formalize
+ * the input data format.
+ */
+ RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT = (1ULL << 12),
+ /**< Set if negative LLR soft output is supported. Negative LLR value
+ * represents the level of confidence for bit '1', and vice versa for
+ * bit '0'.
+ * This is mutually exclusive with
+ * RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT when used to formalize the
+ * input data format.
+ */
+ RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT = (1ULL << 13),
+ /**< Set if driver supports flexible parallel MAP engine decoding. If
+ * not supported, num_maps (number of MAP engines) argument is unusable.
+ */
+ RTE_BBDEV_TURBO_MAP_DEC = (1ULL << 14),
+ /**< Set if a device supports scatter-gather functionality */
+ RTE_BBDEV_TURBO_DEC_SCATTER_GATHER = (1ULL << 15)
+};
+
+/** Flags for turbo encoder operation and capability structure */
+enum rte_bbdev_op_te_flag_bitmasks {
+ /**< Ignore rv_index and set K0 = 0 */
+ RTE_BBDEV_TURBO_RV_INDEX_BYPASS = (1ULL << 0),
+ /**< If rate matching is to be performed */
+ RTE_BBDEV_TURBO_RATE_MATCH = (1ULL << 1),
+ /**< This bit must be set to enable CRC-24B generation */
+ RTE_BBDEV_TURBO_CRC_24B_ATTACH = (1ULL << 2),
+ /**< This bit must be set to enable CRC-24A generation */
+ RTE_BBDEV_TURBO_CRC_24A_ATTACH = (1ULL << 3),
+ /**< Set if a device supports encoder dequeue interrupts */
+ RTE_BBDEV_TURBO_ENC_INTERRUPTS = (1ULL << 4),
+ /**< Set if a device supports scatter-gather functionality */
+ RTE_BBDEV_TURBO_ENC_SCATTER_GATHER = (1ULL << 5)
+};
+
+/**< Data input and output buffer for BBDEV operations */
+struct rte_bbdev_op_data {
+ /**< The mbuf data structure representing the data for BBDEV operation.
+ *
+ * This mbuf pointer can point to one Code Block (CB) data buffer or
+ * multiple CBs contiguously located next to each other.
+ * A Transport Block (TB) represents a whole piece of data that is
+ * divided into one or more CBs. Maximum number of CBs can be contained
+ * in one TB is defined by RTE_BBDEV_MAX_CODE_BLOCKS.
+ *
+ * An mbuf data structure cannot represent more than one TB. The
+ * smallest piece of data that can be contained in one mbuf is one CB.
+ * An mbuf can include one contiguous CB, subset of contiguous CBs that
+ * are belonging to one TB, or all contiguous CBs that are belonging to
+ * one TB.
+ *
+ * If a BBDEV PMD supports the extended capability "Scatter-Gather",
+ * then it is capable of collecting (gathering) non-contiguous
+ * (scattered) data from multiple locations in the memory.
+ * This capability is reported by the capability flags:
+ * - RTE_BBDEV_TURBO_ENC_SCATTER_GATHER and
+ * - RTE_BBDEV_TURBO_DEC_SCATTER_GATHER.
+ * Only if a BBDEV PMD supports this feature, chained mbuf data
+ * structures are accepted. A chained mbuf can represent one
+ * non-contiguous CB or multiple non-contiguous CBs.
+ * If BBDEV PMD does not support this feature, it will assume inbound
+ * mbuf data contains one segment.
+ *
+ * The output mbuf data though is always one segment, even if the input
+ * was a chained mbuf.
+ */
+ struct rte_mbuf *data;
+ /**< The starting point of the BBDEV (encode/decode) operation,
+ * in bytes.
+ *
+ * BBDEV starts to read data past this offset.
+ * In case of chained mbuf, this offset applies only to the first mbuf
+ * segment.
+ */
+ uint32_t offset;
+ /**< The total data length to be processed in one operation, in bytes.
+ *
+ * In case the mbuf data is representing one CB, this is the length of
+ * the CB undergoing the operation.
+ * If it's for multiple CBs, this is the total length of those CBs
+ * undergoing the operation.
+ * If it's for one TB, this is the total length of the TB under
+ * operation.
+ *
+ * In case of chained mbuf, this data length includes the lengths of the
+ * "scattered" data segments undergoing the operation.
+ */
+ uint32_t length;
+};
+
+struct rte_bbdev_op_dec_cb_params {
+ /**< The K size of the input CB, in bits [40:6144], as specified in
+ * 3GPP TS 36.212.
+ * This size is inclusive of CRC bits, regardless whether it was
+ * pre-calculated by the application or not.
+ */
+ uint16_t k;
+ /**< The E length of the CB rate matched LLR output, in bytes, as in
+ * 3GPP TS 36.212.
+ */
+ uint32_t e;
+};
+
+struct rte_bbdev_op_dec_tb_params {
+ /**< The K- size of the input CB, in bits [40:6144], that is in the
+ * Turbo operation when r < C-, as in 3GPP TS 36.212.
+ */
+ uint16_t k_neg;
+ /**< The K+ size of the input CB, in bits [40:6144], that is in the
+ * Turbo operation when r >= C-, as in 3GPP TS 36.212.
+ */
+ uint16_t k_pos;
+ /**< The number of CBs that have K- size, [0:63] */
+ uint8_t c_neg;
+ /**< The total number of CBs in the TB, [1:RTE_BBDEV_MAX_CODE_BLOCKS] */
+ uint8_t c;
+ /**< The number of CBs that uses Ea before switching to Eb, [0:63] */
+ uint8_t cab;
+ /**< The E size of the CB rate matched output to use in the Turbo
+ * operation when r < cab
+ */
+ uint32_t ea;
+ /**< The E size of the CB rate matched output to use in the Turbo
+ * operation when r >= cab
+ */
+ uint32_t eb;
+};
+
+/**< Operation structure for Turbo decode.
+ * An operation can perform on one CB at a time "CB-mode".
+ * An operation can perform on one or multiple CBs that are logically belonging
+ * to one TB "TB-mode".
+ * The provided K size parameter of the CB is its size out coming from the
+ * decode operation.
+ * CRC24A/B check is requested by the application by setting the flag
+ * RTE_BBDEV_TURBO_CRC_TYPE_24B for CRC24B check or CRC24A otherwise.
+ * In TB-mode, BBDEV concatenates the decoded CBs one next to the other with
+ * relevant CRC24B in between.
+ *
+ * The input encoded CB data is the Virtual Circular Buffer data stream, wk,
+ * with the null padding included as described in 3GPP TS 36.212
+ * section 5.1.4.1.2 and shown in 3GPP TS 36.212 section 5.1.4.1 Figure 5.1.4-1.
+ * The size of the virtual circular buffer is 3*Kpi, where Kpi is the 32 byte
+ * aligned value of K, as specified in 3GPP TS 36.212 section 5.1.4.1.1.
+ *
+ * Each byte in the input circular buffer is the LLR value of each bit of the
+ * original CB.
+ *
+ * Hard output is a mandatory capability that all BBDEV PMDs support. This is
+ * the decoded CBs of K sizes (CRC24A/B is the last 24-bit in each decoded CB).
+ * Soft output is an optional capability for BBDEV PMDs. If supported, an LLR
+ * rate matched output is computed in the soft_output buffer structure.
+ *
+ * The output mbuf data structure is expected to be allocated by the
+ * application with enough room for the output data.
+ */
+struct rte_bbdev_op_turbo_dec {
+ /**< The Virtual Circular Buffer, wk, size 3*Kpi for each CB */
+ struct rte_bbdev_op_data input;
+ /**< The hard decisions buffer for the decoded output,
+ * size K for each CB
+ */
+ struct rte_bbdev_op_data hard_output;
+ /**< The soft LLR output buffer - optional */
+ struct rte_bbdev_op_data soft_output;
+
+ uint32_t op_flags; /**< Flags from rte_bbdev_op_td_flag_bitmasks */
+ uint8_t rv_index; /**< Rv index for rate matching [0:3] */
+ /**< The minimum number of iterations to perform in decoding all CBs in
+ * this operation - input
+ */
+ uint8_t iter_min:4;
+ /**< The maximum number of iterations to perform in decoding all CBs in
+ * this operation - input
+ */
+ uint8_t iter_max:4;
+ /**< The maximum number of iterations that were perform in decoding all
+ * CBs in this decode operation - output
+ */
+ uint8_t iter_count;
+ /**< 5 bit extrinsic scale (scale factor on extrinsic info) */
+ uint8_t ext_scale;
+ /**< Number of MAP engines to use in decode,
+ * must be power of 2 (or 0 to auto-select)
+ */
+ uint8_t num_maps;
+
+ uint8_t code_block_mode; /**< [0 - TB : 1 - CB] */
+ union {
+ /**< Struct which stores Code Block specific parameters */
+ struct rte_bbdev_op_dec_cb_params cb_params;
+ /**< Struct which stores Transport Block specific parameters */
+ struct rte_bbdev_op_dec_tb_params tb_params;
+ };
+};
+
+struct rte_bbdev_op_enc_cb_params {
+ /**< The K size of the input CB, in bits [40:6144], as specified in
+ * 3GPP TS 36.212.
+ * This size is inclusive of CRC24A, regardless whether it was
+ * pre-calculated by the application or not.
+ */
+ uint16_t k;
+ /**< The E length of the CB rate matched output, in bits, as in
+ * 3GPP TS 36.212.
+ */
+ uint32_t e;
+ /**< The Ncb soft buffer size of the CB rate matched output [K:3*Kpi],
+ * in bits, as specified in 3GPP TS 36.212.
+ */
+ uint16_t ncb;
+};
+
+struct rte_bbdev_op_enc_tb_params {
+ /**< The K- size of the input CB, in bits [40:6144], that is in the
+ * Turbo operation when r < C-, as in 3GPP TS 36.212.
+ * This size is inclusive of CRC24B, regardless whether it was
+ * pre-calculated and appended by the application or not.
+ */
+ uint16_t k_neg;
+ /**< The K+ size of the input CB, in bits [40:6144], that is in the
+ * Turbo operation when r >= C-, as in 3GPP TS 36.212.
+ * This size is inclusive of CRC24B, regardless whether it was
+ * pre-calculated and appended by the application or not.
+ */
+ uint16_t k_pos;
+ /**< The number of CBs that have K- size, [0:63] */
+ uint8_t c_neg;
+ /**< The total number of CBs in the TB, [1:RTE_BBDEV_MAX_CODE_BLOCKS] */
+ uint8_t c;
+ /**< The number of CBs that uses Ea before switching to Eb, [0:63] */
+ uint8_t cab;
+ /**< The E size of the CB rate matched output to use in the Turbo
+ * operation when r < cab
+ */
+ uint32_t ea;
+ /**< The E size of the CB rate matched output to use in the Turbo
+ * operation when r >= cab
+ */
+ uint32_t eb;
+ /**< The Ncb soft buffer size for the rate matched CB that is used in
+ * the Turbo operation when r < C-, [K:3*Kpi]
+ */
+ uint16_t ncb_neg;
+ /**< The Ncb soft buffer size for the rate matched CB that is used in
+ * the Turbo operation when r >= C-, [K:3*Kpi]
+ */
+ uint16_t ncb_pos;
+ /**< The index of the first CB in the inbound mbuf data, default is 0 */
+ uint8_t r;
+};
+
+/**< Operation structure for Turbo encode.
+ * An operation can perform on one CB at a time "CB-mode".
+ * An operation can perform on one or multiple CBs that are logically
+ * belonging to one TB "TB-mode".
+ *
+ * In CB-mode, CRC24A/B is an optional operation. K size parameter is not
+ * affected by CRC24A/B inclusion, this only affects the inbound mbuf data
+ * length. Not all BBDEV PMDs are capable of CRC24A/B calculation. Flags
+ * RTE_BBDEV_TURBO_CRC_24A_ATTACH and RTE_BBDEV_TURBO_CRC_24B_ATTACH informs
+ * the application with relevant capability. These flags can be set in the
+ * op_flags parameter to indicate BBDEV to calculate and append CRC24A to CB
+ * before going forward with Turbo encoding.
+ *
+ * In TB-mode, CRC24A is assumed to be pre-calculated and appended to the
+ * inbound TB mbuf data buffer.
+ *
+ * The output mbuf data structure is expected to be allocated by the
+ * application with enough room for the output data.
+ */
+struct rte_bbdev_op_turbo_enc {
+ /**< The input CB or TB data */
+ struct rte_bbdev_op_data input;
+ /**< The rate matched CB or TB output buffer */
+ struct rte_bbdev_op_data output;
+
+ uint32_t op_flags; /**< Flags from rte_bbdev_op_te_flag_bitmasks */
+ uint8_t rv_index; /**< Rv index for rate matching [0:3] */
+
+ uint8_t code_block_mode; /**< [0 - TB : 1 - CB] */
+ union {
+ /**< Struct which stores Code Block specific parameters */
+ struct rte_bbdev_op_enc_cb_params cb_params;
+ /**< Struct which stores Transport Block specific parameters */
+ struct rte_bbdev_op_enc_tb_params tb_params;
+ };
+};
+
+/**< List of the capabilities for the Turbo Decoder */
+struct rte_bbdev_op_cap_turbo_dec {
+ /**< Flags from rte_bbdev_op_td_flag_bitmasks */
+ uint32_t capability_flags;
+ uint8_t num_buffers_src; /**< Num input code block buffers */
+ /**< Num hard output code block buffers */
+ uint8_t num_buffers_hard_out;
+ /**< Num soft output code block buffers if supported by the driver */
+ uint8_t num_buffers_soft_out;
+};
+
+/**< List of the capabilities for the Turbo Encoder */
+struct rte_bbdev_op_cap_turbo_enc {
+ /**< Flags from rte_bbdev_op_te_flag_bitmasks */
+ uint32_t capability_flags;
+ uint8_t num_buffers_src; /**< Num input code block buffers */
+ uint8_t num_buffers_dst; /**< Num output code block buffers */
+};
+
+/** Different operation types supported by the device */
+enum rte_bbdev_op_type {
+ RTE_BBDEV_OP_NONE, /**< Dummy operation that does nothing */
+ RTE_BBDEV_OP_TURBO_DEC, /**< Turbo decode */
+ RTE_BBDEV_OP_TURBO_ENC, /**< Turbo encode */
+ RTE_BBDEV_OP_TYPE_COUNT, /**< Count of different op types */
+};
+
+/**< Bit indexes of possible errors reported through status field */
+enum {
+ RTE_BBDEV_DRV_ERROR,
+ RTE_BBDEV_DATA_ERROR,
+ RTE_BBDEV_CRC_ERROR,
+};
+
+/**< Structure specifying a single encode operation */
+struct rte_bbdev_enc_op {
+ int status; /**< Status of operation that was performed */
+ struct rte_mempool *mempool; /**< Mempool which op instance is in */
+ void *opaque_data; /**< Opaque pointer for user data */
+ /**< Contains encoder specific parameters */
+ struct rte_bbdev_op_turbo_enc turbo_enc;
+};
+
+/**< Structure specifying a single decode operation */
+struct rte_bbdev_dec_op {
+ int status; /**< Status of operation that was performed */
+ struct rte_mempool *mempool; /**< Mempool which op instance is in */
+ void *opaque_data; /**< Opaque pointer for user data */
+ /**< Contains decoder specific parameters */
+ struct rte_bbdev_op_turbo_dec turbo_dec;
+};
+
+/**< Operation capabilities supported by a device */
+struct rte_bbdev_op_cap {
+ enum rte_bbdev_op_type type; /**< Type of operation */
+ union {
+ struct rte_bbdev_op_cap_turbo_dec turbo_dec;
+ struct rte_bbdev_op_cap_turbo_enc turbo_enc;
+ } cap; /**< Operation-type specific capabilities */
+};
+
+/**< @internal Private data structure stored with operation pool. */
+struct rte_bbdev_op_pool_private {
+ enum rte_bbdev_op_type type; /**< Type of operations in a pool */
+};
+
+/**
+ * Converts queue operation type from enum to string
+ *
+ * @param op_type
+ * Operation type as enum
+ *
+ * @returns
+ * Operation type as string or NULL if op_type is invalid
+ *
+ */
+const char*
+rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type);
+
+/**
+ * Creates a bbdev operation mempool
+ *
+ * @param name
+ * Pool name.
+ * @param type
+ * Operation type, use RTE_BBDEV_OP_NONE for a pool which supports all
+ * operation types.
+ * @param num_elements
+ * Number of elements in the pool.
+ * @param cache_size
+ * Number of elements to cache on an lcore, see rte_mempool_create() for
+ * further details about cache size.
+ * @param socket_id
+ * Socket to allocate memory on.
+ *
+ * @return
+ * - Pointer to a mempool on success,
+ * - NULL pointer on failure.
+ */
+struct rte_mempool *
+rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
+ unsigned int num_elements, unsigned int cache_size,
+ int socket_id);
+
+/**
+ * Bulk allocate encode operations from a mempool with parameter defaults reset.
+ *
+ * @param mempool
+ * Operation mempool, created by rte_bbdev_op_pool_create().
+ * @param ops
+ * Output array to place allocated operations
+ * @param num_ops
+ * Number of operations to allocate
+ *
+ * @returns
+ * - 0 on success
+ * - EINVAL if invalid mempool is provided
+ */
+static inline int
+rte_bbdev_enc_op_alloc_bulk(struct rte_mempool *mempool,
+ struct rte_bbdev_enc_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev_op_pool_private *priv;
+ int ret;
+
+ /* Check type */
+ priv = (struct rte_bbdev_op_pool_private *)
+ rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != RTE_BBDEV_OP_TURBO_ENC))
+ return -EINVAL;
+
+ /* Get elements */
+ ret = rte_mempool_get_bulk(mempool, (void **)ops, num_ops);
+ if (unlikely(ret < 0))
+ return ret;
+
+ rte_bbdev_log_verbose("%u encode ops allocated from %s\n",
+ num_ops, mempool->name);
+
+ return 0;
+}
+
+/**
+ * Bulk allocate decode operations from a mempool with parameter defaults reset.
+ *
+ * @param mempool
+ * Operation mempool, created by rte_bbdev_op_pool_create().
+ * @param ops
+ * Output array to place allocated operations
+ * @param num_ops
+ * Number of operations to allocate
+ *
+ * @returns
+ * - 0 on success
+ * - EINVAL if invalid mempool is provided
+ */
+static inline int
+rte_bbdev_dec_op_alloc_bulk(struct rte_mempool *mempool,
+ struct rte_bbdev_dec_op **ops, uint16_t num_ops)
+{
+ struct rte_bbdev_op_pool_private *priv;
+ int ret;
+
+ /* Check type */
+ priv = (struct rte_bbdev_op_pool_private *)
+ rte_mempool_get_priv(mempool);
+ if (unlikely(priv->type != RTE_BBDEV_OP_TURBO_DEC))
+ return -EINVAL;
+
+ /* Get elements */
+ ret = rte_mempool_get_bulk(mempool, (void **)ops, num_ops);
+ if (unlikely(ret < 0))
+ return ret;
+
+ rte_bbdev_log_verbose("%u encode ops allocated from %s\n",
+ num_ops, mempool->name);
+
+ return 0;
+}
+
+/**
+ * Free decode operation structures that were allocated by
+ * rte_bbdev_dec_op_alloc_bulk().
+ * All structures must belong to the same mempool.
+ *
+ * @param ops
+ * Operation structures
+ * @param num_ops
+ * Number of structures
+ */
+static inline void
+rte_bbdev_dec_op_free_bulk(struct rte_bbdev_dec_op **ops, unsigned int num_ops)
+{
+ if (num_ops > 0) {
+ rte_mempool_put_bulk(ops[0]->mempool, (void **)ops, num_ops);
+ rte_bbdev_log_verbose("%u decode ops freed to %s\n", num_ops,
+ ops[0]->mempool->name);
+ }
+}
+
+/**
+ * Free encode operation structures that were allocated by
+ * rte_bbdev_enc_op_alloc_bulk().
+ * All structures must belong to the same mempool.
+ *
+ * @param ops
+ * Operation structures
+ * @param num_ops
+ * Number of structures
+ */
+static inline void
+rte_bbdev_enc_op_free_bulk(struct rte_bbdev_enc_op **ops, unsigned int num_ops)
+{
+ if (num_ops > 0) {
+ rte_mempool_put_bulk(ops[0]->mempool, (void **)ops, num_ops);
+ rte_bbdev_log_verbose("%u encode ops freed to %s\n", num_ops,
+ ops[0]->mempool->name);
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_OP_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_BBDEV_PMD_H_
+#define _RTE_BBDEV_PMD_H_
+
+/**
+ * @file rte_bbdev_pmd.h
+ *
+ * Wireless base band driver-facing APIs.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API provides the mechanism for device drivers to register with the
+ * bbdev interface. User applications should not use this API.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_log.h>
+
+#include "rte_bbdev.h"
+
+/** Suggested value for SW based devices */
+#define RTE_BBDEV_DEFAULT_MAX_NB_QUEUES RTE_MAX_LCORE
+
+/** Suggested value for SW based devices */
+#define RTE_BBDEV_QUEUE_SIZE_LIMIT 16384
+
+/**
+ * @internal
+ * Allocates a new slot for a bbdev and returns the pointer to that slot
+ * for the driver to use.
+ *
+ * @param name
+ * Unique identifier name for each bbdev device
+ *
+ * @return
+ * - Slot in the rte_bbdev array for a new device;
+ */
+struct rte_bbdev *
+rte_bbdev_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified bbdev.
+ *
+ * @param bbdev
+ * The *bbdev* pointer is the address of the *rte_bbdev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_bbdev_release(struct rte_bbdev *bbdev);
+
+/**
+ * Get the device structure for a named device.
+ *
+ * @param name
+ * Name of the device
+ *
+ * @return
+ * - The device structure pointer, or
+ * - NULL otherwise
+ *
+ */
+struct rte_bbdev *
+rte_bbdev_get_named_dev(const char *name);
+
+/**
+ * Definitions of all functions exported by a driver through the the generic
+ * structure of type *rte_bbdev_ops* supplied in the *rte_bbdev* structure
+ * associated with a device.
+ */
+
+/** @internal Function used to configure device memory. */
+typedef int (*rte_bbdev_setup_queues_t)(struct rte_bbdev *dev,
+ uint16_t num_queues, int socket_id);
+
+/** @internal Function used to configure interrupts for a device. */
+typedef int (*rte_bbdev_intr_enable_t)(struct rte_bbdev *dev);
+
+/** @internal Function to allocate and configure a device queue. */
+typedef int (*rte_bbdev_queue_setup_t)(struct rte_bbdev *dev,
+ uint16_t queue_id, const struct rte_bbdev_queue_conf *conf);
+
+/*
+ * @internal
+ * Function to release memory resources allocated for a device queue.
+ */
+typedef int (*rte_bbdev_queue_release_t)(struct rte_bbdev *dev,
+ uint16_t queue_id);
+
+/** @internal Function to start a configured device. */
+typedef int (*rte_bbdev_start_t)(struct rte_bbdev *dev);
+
+/** @internal Function to stop a device. */
+typedef void (*rte_bbdev_stop_t)(struct rte_bbdev *dev);
+
+/** @internal Function to close a device. */
+typedef int (*rte_bbdev_close_t)(struct rte_bbdev *dev);
+
+/** @internal Function to start a device queue. */
+typedef int (*rte_bbdev_queue_start_t)(struct rte_bbdev *dev,
+ uint16_t queue_id);
+
+/** @internal Function to stop a device queue. */
+typedef int (*rte_bbdev_queue_stop_t)(struct rte_bbdev *dev, uint16_t queue_id);
+
+/** @internal Function to read stats from a device. */
+typedef void (*rte_bbdev_stats_get_t)(struct rte_bbdev *dev,
+ struct rte_bbdev_stats *stats);
+
+/** @internal Function to reset stats on a device. */
+typedef void (*rte_bbdev_stats_reset_t)(struct rte_bbdev *dev);
+
+/** @internal Function to retrieve specific information of a device. */
+typedef void (*rte_bbdev_info_get_t)(struct rte_bbdev *dev,
+ struct rte_bbdev_driver_info *dev_info);
+
+/*
+ * @internal
+ * Function to enable interrupt for next op on a queue of a device.
+ */
+typedef int (*rte_bbdev_queue_intr_enable_t)(struct rte_bbdev *dev,
+ uint16_t queue_id);
+
+/*
+ * @internal
+ * Function to disable interrupt for next op on a queue of a device.
+ */
+typedef int (*rte_bbdev_queue_intr_disable_t)(struct rte_bbdev *dev,
+ uint16_t queue_id);
+
+/**
+ * Operations implemented by drivers. Fields marked as "Required" must be
+ * provided by a driver for a device to have basic functionality. "Optional"
+ * fields are for non-vital operations
+ */
+struct rte_bbdev_ops {
+ /**< Allocate and configure device memory. Optional. */
+ rte_bbdev_setup_queues_t setup_queues;
+ /**< Configure interrupts. Optional. */
+ rte_bbdev_intr_enable_t intr_enable;
+ /**< Start device. Optional. */
+ rte_bbdev_start_t start;
+ /**< Stop device. Optional. */
+ rte_bbdev_stop_t stop;
+ /**< Close device. Optional. */
+ rte_bbdev_close_t close;
+
+ /**< Get device info. Required. */
+ rte_bbdev_info_get_t info_get;
+ /** Get device statistics. Optional. */
+ rte_bbdev_stats_get_t stats_get;
+ /** Reset device statistics. Optional. */
+ rte_bbdev_stats_reset_t stats_reset;
+
+ /** Set up a device queue. Required. */
+ rte_bbdev_queue_setup_t queue_setup;
+ /** Release a queue. Required. */
+ rte_bbdev_queue_release_t queue_release;
+ /** Start a queue. Optional. */
+ rte_bbdev_queue_start_t queue_start;
+ /**< Stop a queue pair. Optional. */
+ rte_bbdev_queue_stop_t queue_stop;
+
+ /** Enable queue interrupt. Optional */
+ rte_bbdev_queue_intr_enable_t queue_intr_enable;
+ /** Disable queue interrupt. Optional */
+ rte_bbdev_queue_intr_disable_t queue_intr_disable;
+};
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device and event type.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param event
+ * Event type.
+ * @param ret_param
+ * To pass data back to user application.
+ */
+void
+rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
+ enum rte_bbdev_event_type event, void *ret_param);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_PMD_H_ */
--- /dev/null
+EXPERIMENTAL {
+ global:
+
+ bbdev_logtype;
+ rte_bbdev_allocate;
+ rte_bbdev_callback_register;
+ rte_bbdev_callback_unregister;
+ rte_bbdev_close;
+ rte_bbdev_count;
+ rte_bbdev_dequeue_dec_ops;
+ rte_bbdev_dequeue_enc_ops;
+ rte_bbdev_devices;
+ rte_bbdev_enqueue_dec_ops;
+ rte_bbdev_enqueue_enc_ops;
+ rte_bbdev_find_next;
+ rte_bbdev_get_named_dev;
+ rte_bbdev_info_get;
+ rte_bbdev_intr_enable;
+ rte_bbdev_is_valid;
+ rte_bbdev_op_pool_create;
+ rte_bbdev_op_type_str;
+ rte_bbdev_pmd_callback_process;
+ rte_bbdev_queue_configure;
+ rte_bbdev_queue_info_get;
+ rte_bbdev_queue_intr_ctl;
+ rte_bbdev_queue_intr_disable;
+ rte_bbdev_queue_intr_enable;
+ rte_bbdev_queue_start;
+ rte_bbdev_queue_stop;
+ rte_bbdev_release;
+ rte_bbdev_setup_queues;
+ rte_bbdev_start;
+ rte_bbdev_stats_get;
+ rte_bbdev_stats_reset;
+ rte_bbdev_stop;
+
+ local: *;
+};
_LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF) += -lrte_mbuf
_LDLIBS-$(CONFIG_RTE_LIBRTE_NET) += -lrte_net
_LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lrte_ethdev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BBDEV) += -lrte_bbdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
_LDLIBS-$(CONFIG_RTE_LIBRTE_SECURITY) += -lrte_security
_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev