This patch add control plane API for dmadev.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
- A device name used to designate the DMA device in console messages, for
administration or debugging purposes.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
* Added a DMA device framework for management and provision of
hardware and software DMA devices.
+ * Added generic API which support a number of different DMA
+ operations.
* **Added new RSS offload types for IPv4/L4 checksum in RSS flow.**
if (dev == NULL)
return -EINVAL;
+ if (dev->state == RTE_DMA_DEV_READY)
+ return rte_dma_close(dev->dev_id);
+
dma_release(dev);
return 0;
}
return count;
}
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+ memset(dev_info, 0, sizeof(struct rte_dma_info));
+ ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+ sizeof(struct rte_dma_info));
+ if (ret != 0)
+ return ret;
+
+ dev_info->dev_name = dev->dev_name;
+ dev_info->numa_node = dev->device->numa_node;
+ dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+ return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ struct rte_dma_info dev_info;
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
+ return -EINVAL;
+
+ if (dev->dev_started != 0) {
+ RTE_DMA_LOG(ERR,
+ "Device %d must be stopped to allow configuration",
+ dev_id);
+ return -EBUSY;
+ }
+
+ ret = rte_dma_info_get(dev_id, &dev_info);
+ if (ret != 0) {
+ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_vchans == 0) {
+ RTE_DMA_LOG(ERR,
+ "Device %d configure zero vchans", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_vchans > dev_info.max_vchans) {
+ RTE_DMA_LOG(ERR,
+ "Device %d configure too many vchans", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->enable_silent &&
+ !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+ RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+ sizeof(struct rte_dma_conf));
+ if (ret == 0)
+ memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+ return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ if (dev->dev_conf.nb_vchans == 0) {
+ RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+ return -EINVAL;
+ }
+
+ if (dev->dev_started != 0) {
+ RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->dev_start == NULL)
+ goto mark_started;
+
+ ret = (*dev->dev_ops->dev_start)(dev);
+ if (ret != 0)
+ return ret;
+
+mark_started:
+ dev->dev_started = 1;
+ return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ if (dev->dev_started == 0) {
+ RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->dev_stop == NULL)
+ goto mark_stopped;
+
+ ret = (*dev->dev_ops->dev_stop)(dev);
+ if (ret != 0)
+ return ret;
+
+mark_stopped:
+ dev->dev_started = 0;
+ return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ /* Device must be stopped before it can be closed */
+ if (dev->dev_started == 1) {
+ RTE_DMA_LOG(ERR,
+ "Device %d must be stopped before closing", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ ret = (*dev->dev_ops->dev_close)(dev);
+ if (ret == 0)
+ dma_release(dev);
+
+ return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+ const struct rte_dma_vchan_conf *conf)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ struct rte_dma_info dev_info;
+ bool src_is_dev, dst_is_dev;
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id) || conf == NULL)
+ return -EINVAL;
+
+ if (dev->dev_started != 0) {
+ RTE_DMA_LOG(ERR,
+ "Device %d must be stopped to allow configuration",
+ dev_id);
+ return -EBUSY;
+ }
+
+ ret = rte_dma_info_get(dev_id, &dev_info);
+ if (ret != 0) {
+ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+ return -EINVAL;
+ }
+ if (dev->dev_conf.nb_vchans == 0) {
+ RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+ return -EINVAL;
+ }
+ if (vchan >= dev_info.nb_vchans) {
+ RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+ conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+ conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+ conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+ RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+ !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+ RTE_DMA_LOG(ERR,
+ "Device %d don't support mem2mem transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+ !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+ RTE_DMA_LOG(ERR,
+ "Device %d don't support mem2dev transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+ !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+ RTE_DMA_LOG(ERR,
+ "Device %d don't support dev2mem transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+ !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+ RTE_DMA_LOG(ERR,
+ "Device %d don't support dev2dev transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->nb_desc < dev_info.min_desc ||
+ conf->nb_desc > dev_info.max_desc) {
+ RTE_DMA_LOG(ERR,
+ "Device %d number of descriptors invalid", dev_id);
+ return -EINVAL;
+ }
+ src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+ if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+ (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+ RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+ return -EINVAL;
+ }
+ dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+ if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+ (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+ RTE_DMA_LOG(ERR,
+ "Device %d destination port type invalid", dev_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+ return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+ sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+ if (!rte_dma_is_valid(dev_id) || stats == NULL)
+ return -EINVAL;
+
+ if (vchan >= dev->dev_conf.nb_vchans &&
+ vchan != RTE_DMA_ALL_VCHAN) {
+ RTE_DMA_LOG(ERR,
+ "Device %d vchan %u out of range", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ memset(stats, 0, sizeof(struct rte_dma_stats));
+ return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+ sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+ struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+ if (!rte_dma_is_valid(dev_id))
+ return -EINVAL;
+
+ if (vchan >= dev->dev_conf.nb_vchans &&
+ vchan != RTE_DMA_ALL_VCHAN) {
+ RTE_DMA_LOG(ERR,
+ "Device %d vchan %u out of range", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+ return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+ static const struct {
+ uint64_t capability;
+ const char *name;
+ } capa_names[] = {
+ { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" },
+ { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" },
+ { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" },
+ { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" },
+ { RTE_DMA_CAPA_SVA, "sva" },
+ { RTE_DMA_CAPA_SILENT, "silent" },
+ { RTE_DMA_CAPA_OPS_COPY, "copy" },
+ { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+ { RTE_DMA_CAPA_OPS_FILL, "fill" },
+ };
+
+ const char *name = "unknown";
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(capa_names); i++) {
+ if (capability == capa_names[i].capability) {
+ name = capa_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+ uint64_t capa;
+
+ (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa);
+ while (dev_capa > 0) {
+ capa = 1ull << __builtin_ctzll(dev_capa);
+ (void)fprintf(f, " %s", dma_capability_name(capa));
+ dev_capa &= ~capa;
+ }
+ (void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+ const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+ struct rte_dma_info dev_info;
+ int ret;
+
+ if (!rte_dma_is_valid(dev_id) || f == NULL)
+ return -EINVAL;
+
+ ret = rte_dma_info_get(dev_id, &dev_info);
+ if (ret != 0) {
+ RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+ return -EINVAL;
+ }
+
+ (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+ dev->dev_id,
+ dev->dev_name,
+ dev->dev_started ? "started" : "stopped");
+ dma_dump_capability(f, dev_info.dev_capa);
+ (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans);
+ (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans);
+ (void)fprintf(f, " silent_mode: %s\n",
+ dev->dev_conf.enable_silent ? "on" : "off");
+
+ if (dev->dev_ops->dev_dump != NULL)
+ return (*dev->dev_ops->dev_dump)(dev, f);
+
+ return 0;
+}
* This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
* and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
*
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ * - rte_dma_configure()
+ * - rte_dma_vchan_setup()
+ * - rte_dma_start()
+ *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API implemented by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
*/
#include <stdint.h>
__rte_experimental
uint16_t rte_dma_count_avail(void);
+/**@{@name DMA capability
+ * @see struct rte_dma_info::dev_capa
+ */
+/** Support memory-to-memory transfer */
+#define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0)
+/** Support memory-to-device transfer. */
+#define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1)
+/** Support device-to-memory transfer. */
+#define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2)
+/** Support device-to-device transfer. */
+#define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3)
+/** Support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ */
+#define RTE_DMA_CAPA_SVA RTE_BIT64(4)
+/** Support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT RTE_BIT64(5)
+/** Support copy operation.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ */
+#define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32)
+/** Support scatter-gather list copy operation. */
+#define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33)
+/** Support fill operation. */
+#define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34)
+/**@}*/
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+ const char *dev_name; /**< Unique device name. */
+ /** Device capabilities (RTE_DMA_CAPA_*). */
+ uint64_t dev_capa;
+ /** Maximum number of virtual DMA channels supported. */
+ uint16_t max_vchans;
+ /** Maximum allowed number of virtual DMA channel descriptors. */
+ uint16_t max_desc;
+ /** Minimum allowed number of virtual DMA channel descriptors. */
+ uint16_t min_desc;
+ /** Maximum number of source or destination scatter-gather entry
+ * supported.
+ * If the device does not support COPY_SG capability, this value can be
+ * zero.
+ * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+ * parameter nb_src/nb_dst should not exceed this value.
+ */
+ uint16_t max_sges;
+ /** NUMA node connection, -1 if unknown. */
+ int16_t numa_node;
+ /** Number of virtual DMA channel configured. */
+ uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param[out] dev_info
+ * A pointer to a structure of type *rte_dma_info* to be filled with the
+ * information of the device.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+ /** The number of virtual DMA channels to set up for the DMA device.
+ * This value cannot be greater than the field 'max_vchans' of struct
+ * rte_dma_info which get from rte_dma_info_get().
+ */
+ uint16_t nb_vchans;
+ /** Indicates whether to enable silent mode.
+ * false-default mode, true-silent mode.
+ * This value can be set to true only when the SILENT capability is
+ * supported.
+ *
+ * @see RTE_DMA_CAPA_SILENT
+ */
+ bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ * @param dev_conf
+ * The DMA device configuration structure encapsulated into rte_dma_conf
+ * object.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+ /** DMA transfer direction - from memory to memory.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+ RTE_DMA_DIR_MEM_TO_MEM,
+ /** DMA transfer direction - from memory to device.
+ * In a typical scenario, the SoCs are installed on host servers as
+ * iNICs through the PCIe interface. In this case, the SoCs works in
+ * EP(endpoint) mode, it could initiate a DMA move request from memory
+ * (which is SoCs memory) to device (which is host memory).
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+ RTE_DMA_DIR_MEM_TO_DEV,
+ /** DMA transfer direction - from device to memory.
+ * In a typical scenario, the SoCs are installed on host servers as
+ * iNICs through the PCIe interface. In this case, the SoCs works in
+ * EP(endpoint) mode, it could initiate a DMA move request from device
+ * (which is host memory) to memory (which is SoCs memory).
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+ RTE_DMA_DIR_DEV_TO_MEM,
+ /** DMA transfer direction - from device to device.
+ * In a typical scenario, the SoCs are installed on host servers as
+ * iNICs through the PCIe interface. In this case, the SoCs works in
+ * EP(endpoint) mode, it could initiate a DMA move request from device
+ * (which is host memory) to the device (which is another host memory).
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+ RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+ RTE_DMA_PORT_NONE,
+ RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+ /** The device access port type.
+ *
+ * @see enum rte_dma_port_type
+ */
+ enum rte_dma_port_type port_type;
+ RTE_STD_C11
+ union {
+ /** PCIe access port parameters.
+ *
+ * The following model shows SoC's PCIe module connects to
+ * multiple PCIe hosts and multiple endpoints. The PCIe module
+ * has an integrated DMA controller.
+ *
+ * If the DMA wants to access the memory of host A, it can be
+ * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+ *
+ * \code{.unparsed}
+ * System Bus
+ * | ----------PCIe module----------
+ * | Bus
+ * | Interface
+ * | ----- ------------------
+ * | | | | PCIe Core0 |
+ * | | | | | -----------
+ * | | | | PF-0 -- VF-0 | | Host A |
+ * | | |--------| |- VF-1 |--------| Root |
+ * | | | | PF-1 | | Complex |
+ * | | | | PF-2 | -----------
+ * | | | ------------------
+ * | | |
+ * | | | ------------------
+ * | | | | PCIe Core1 |
+ * | | | | | -----------
+ * | | | | PF-0 -- VF-0 | | Host B |
+ * |-----| |--------| PF-1 -- VF-0 |--------| Root |
+ * | | | | |- VF-1 | | Complex |
+ * | | | | PF-2 | -----------
+ * | | | ------------------
+ * | | |
+ * | | | ------------------
+ * | |DMA| | | ------
+ * | | | | |--------| EP |
+ * | | |--------| PCIe Core2 | ------
+ * | | | | | ------
+ * | | | | |--------| EP |
+ * | | | | | ------
+ * | ----- ------------------
+ *
+ * \endcode
+ *
+ * @note If some fields can not be supported by the
+ * hardware/driver, then the driver ignores those fields.
+ * Please check driver-specific documentation for limitations
+ * and capablites.
+ */
+ __extension__
+ struct {
+ uint64_t coreid : 4; /**< PCIe core id used. */
+ uint64_t pfid : 8; /**< PF id used. */
+ uint64_t vfen : 1; /**< VF enable bit. */
+ uint64_t vfid : 16; /**< VF id used. */
+ /** The pasid filed in TLP packet. */
+ uint64_t pasid : 20;
+ /** The attributes filed in TLP packet. */
+ uint64_t attr : 3;
+ /** The processing hint filed in TLP packet. */
+ uint64_t ph : 2;
+ /** The steering tag filed in TLP packet. */
+ uint64_t st : 16;
+ } pcie;
+ };
+ uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+ /** Transfer direction
+ *
+ * @see enum rte_dma_direction
+ */
+ enum rte_dma_direction direction;
+ /** Number of descriptor for the virtual DMA channel */
+ uint16_t nb_desc;
+ /** 1) Used to describes the device access port parameter in the
+ * device-to-memory transfer scenario.
+ * 2) Used to describes the source device access port parameter in the
+ * device-to-device transfer scenario.
+ *
+ * @see struct rte_dma_port_param
+ */
+ struct rte_dma_port_param src_port;
+ /** 1) Used to describes the device access port parameter in the
+ * memory-to-device transfer scenario.
+ * 2) Used to describes the destination device access port parameter in
+ * the device-to-device transfer scenario.
+ *
+ * @see struct rte_dma_port_param
+ */
+ struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel. The value must be in the range
+ * [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ * The virtual DMA channel configuration structure encapsulated into
+ * rte_dma_vchan_conf object.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+ const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+ /** Count of operations which were submitted to hardware. */
+ uint64_t submitted;
+ /** Count of operations which were completed, including successful and
+ * failed completions.
+ */
+ uint64_t completed;
+ /** Count of operations which failed to complete. */
+ uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN 0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ * The basic statistics structure encapsulated into rte_dma_stats
+ * object.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+ struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param f
+ * The file to write the output to.
+ *
+ * @return
+ * 0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
#ifdef __cplusplus
}
#endif
extern "C" {
#endif
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+ struct rte_dma_info *dev_info,
+ uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+ const struct rte_dma_conf *dev_conf,
+ uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+ const struct rte_dma_vchan_conf *conf,
+ uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+ uint16_t vchan, struct rte_dma_stats *stats,
+ uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+ rte_dma_info_get_t dev_info_get;
+ rte_dma_configure_t dev_configure;
+ rte_dma_start_t dev_start;
+ rte_dma_stop_t dev_stop;
+ rte_dma_close_t dev_close;
+
+ rte_dma_vchan_setup_t vchan_setup;
+
+ rte_dma_stats_get_t stats_get;
+ rte_dma_stats_reset_t stats_reset;
+
+ rte_dma_dump_t dev_dump;
+};
/**
* Possible states of a DMA device.
*
void *dev_private; /**< PMD-specific private data. */
/** Device info which supplied during device initialization. */
struct rte_device *device;
+ /** Functions implemented by PMD. */
+ const struct rte_dma_dev_ops *dev_ops;
+ struct rte_dma_conf dev_conf; /**< DMA device configuration. */
enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+ __extension__
+ uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
uint64_t reserved[2]; /**< Reserved for future fields. */
} __rte_cache_aligned;
EXPERIMENTAL {
global:
+ rte_dma_close;
+ rte_dma_configure;
rte_dma_count_avail;
rte_dma_dev_max;
+ rte_dma_dump;
rte_dma_get_dev_id_by_name;
+ rte_dma_info_get;
rte_dma_is_valid;
+ rte_dma_start;
+ rte_dma_stats_get;
+ rte_dma_stats_reset;
+ rte_dma_stop;
+ rte_dma_vchan_setup;
local: *;
};