From 2f7d42c6e11a4fb448b1a176680463d49635b397 Mon Sep 17 00:00:00 2001 From: Kevin Laatz Date: Wed, 20 Oct 2021 16:30:04 +0000 Subject: [PATCH] dma/idxd: add configure and info Add functions for device configuration. The info_get function is included here since it can be useful for checking successful configuration. Documentation is also updated to add device configuration usage info. Signed-off-by: Bruce Richardson Signed-off-by: Kevin Laatz Reviewed-by: Conor Walsh Reviewed-by: Chengwen Feng --- doc/guides/dmadevs/idxd.rst | 15 +++++++ drivers/dma/idxd/idxd_bus.c | 3 ++ drivers/dma/idxd/idxd_common.c | 71 ++++++++++++++++++++++++++++++++ drivers/dma/idxd/idxd_internal.h | 6 +++ drivers/dma/idxd/idxd_pci.c | 3 ++ 5 files changed, 98 insertions(+) diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst index ce33e2857a..62ffd39ee0 100644 --- a/doc/guides/dmadevs/idxd.rst +++ b/doc/guides/dmadevs/idxd.rst @@ -120,3 +120,18 @@ use a subset of configured queues. Once probed successfully, irrespective of kernel driver, the device will appear as a ``dmadev``, that is a "DMA device type" inside DPDK, and can be accessed using APIs from the ``rte_dmadev`` library. + +Using IDXD DMAdev Devices +-------------------------- + +To use the devices from an application, the dmadev API can be used. + +Device Configuration +~~~~~~~~~~~~~~~~~~~~~ + +IDXD configuration requirements: + +* ``ring_size`` must be a power of two, between 64 and 4096. +* Only one ``vchan`` is supported per device (work queue). +* IDXD devices do not support silent mode. +* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM`` to copy from memory to memory. diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c index 2d5490b2df..971fe34b88 100644 --- a/drivers/dma/idxd/idxd_bus.c +++ b/drivers/dma/idxd/idxd_bus.c @@ -97,6 +97,9 @@ idxd_dev_close(struct rte_dma_dev *dev) static const struct rte_dma_dev_ops idxd_bus_ops = { .dev_close = idxd_dev_close, .dev_dump = idxd_dump, + .dev_configure = idxd_configure, + .vchan_setup = idxd_vchan_setup, + .dev_info_get = idxd_info_get, }; static void * diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c index 46598c368c..70d094e3a2 100644 --- a/drivers/dma/idxd/idxd_common.c +++ b/drivers/dma/idxd/idxd_common.c @@ -39,6 +39,77 @@ idxd_dump(const struct rte_dma_dev *dev, FILE *f) return 0; } +int +idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size) +{ + struct idxd_dmadev *idxd = dev->fp_obj->dev_private; + + if (size < sizeof(*info)) + return -EINVAL; + + *info = (struct rte_dma_info) { + .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS | + RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL, + .max_vchans = 1, + .max_desc = 4096, + .min_desc = 64, + }; + if (idxd->sva_support) + info->dev_capa |= RTE_DMA_CAPA_SVA; + return 0; +} + +int +idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf, + uint32_t conf_sz) +{ + if (sizeof(struct rte_dma_conf) != conf_sz) + return -EINVAL; + + if (dev_conf->nb_vchans != 1) + return -EINVAL; + return 0; +} + +int +idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused, + const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz) +{ + struct idxd_dmadev *idxd = dev->fp_obj->dev_private; + uint16_t max_desc = qconf->nb_desc; + + if (sizeof(struct rte_dma_vchan_conf) != qconf_sz) + return -EINVAL; + + idxd->qcfg = *qconf; + + if (!rte_is_power_of_2(max_desc)) + max_desc = rte_align32pow2(max_desc); + IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc); + idxd->desc_ring_mask = max_desc - 1; + idxd->qcfg.nb_desc = max_desc; + + /* in case we are reconfiguring a device, free any existing memory */ + rte_free(idxd->desc_ring); + + /* allocate the descriptor ring at 2x size as batches can't wrap */ + idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0); + if (idxd->desc_ring == NULL) + return -ENOMEM; + idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring); + + idxd->batch_idx_read = 0; + idxd->batch_idx_write = 0; + idxd->batch_start = 0; + idxd->batch_size = 0; + idxd->ids_returned = 0; + idxd->ids_avail = 0; + + memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) * + (idxd->max_batches + 1)); + return 0; +} + int idxd_dmadev_create(const char *name, struct rte_device *dev, const struct idxd_dmadev *base_idxd, diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h index 5e253fdfbc..1dbe31abcd 100644 --- a/drivers/dma/idxd/idxd_internal.h +++ b/drivers/dma/idxd/idxd_internal.h @@ -81,5 +81,11 @@ struct idxd_dmadev { int idxd_dmadev_create(const char *name, struct rte_device *dev, const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops); int idxd_dump(const struct rte_dma_dev *dev, FILE *f); +int idxd_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *dev_conf, + uint32_t conf_sz); +int idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, + const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz); +int idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, + uint32_t size); #endif /* _IDXD_INTERNAL_H_ */ diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c index 0b3a6ee4bc..c9e193a11d 100644 --- a/drivers/dma/idxd/idxd_pci.c +++ b/drivers/dma/idxd/idxd_pci.c @@ -85,6 +85,9 @@ idxd_pci_dev_close(struct rte_dma_dev *dev) static const struct rte_dma_dev_ops idxd_pci_ops = { .dev_close = idxd_pci_dev_close, .dev_dump = idxd_dump, + .dev_configure = idxd_configure, + .vchan_setup = idxd_vchan_setup, + .dev_info_get = idxd_info_get, }; /* each portal uses 4 x 4k pages */ -- 2.20.1