Once probed successfully, irrespective of kernel driver, the device will appear as a ``dmadev``,
that is a "DMA device type" inside DPDK, and can be accessed using APIs from the
``rte_dmadev`` library.
+
+Using IDXD DMAdev Devices
+--------------------------
+
+To use the devices from an application, the dmadev API can be used.
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~~
+
+IDXD configuration requirements:
+
+* ``ring_size`` must be a power of two, between 64 and 4096.
+* Only one ``vchan`` is supported per device (work queue).
+* IDXD devices do not support silent mode.
+* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM`` to copy from memory to memory.
return 0;
}
+int
+idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
+{
+ struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+
+ if (size < sizeof(*info))
+ return -EINVAL;
+
+ *info = (struct rte_dma_info) {
+ .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
+ RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
+ .max_vchans = 1,
+ .max_desc = 4096,
+ .min_desc = 64,
+ };
+ if (idxd->sva_support)
+ info->dev_capa |= RTE_DMA_CAPA_SVA;
+ return 0;
+}
+
+int
+idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
+ uint32_t conf_sz)
+{
+ if (sizeof(struct rte_dma_conf) != conf_sz)
+ return -EINVAL;
+
+ if (dev_conf->nb_vchans != 1)
+ return -EINVAL;
+ return 0;
+}
+
+int
+idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+ const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
+{
+ struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+ uint16_t max_desc = qconf->nb_desc;
+
+ if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
+ return -EINVAL;
+
+ idxd->qcfg = *qconf;
+
+ if (!rte_is_power_of_2(max_desc))
+ max_desc = rte_align32pow2(max_desc);
+ IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
+ idxd->desc_ring_mask = max_desc - 1;
+ idxd->qcfg.nb_desc = max_desc;
+
+ /* in case we are reconfiguring a device, free any existing memory */
+ rte_free(idxd->desc_ring);
+
+ /* allocate the descriptor ring at 2x size as batches can't wrap */
+ idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
+ if (idxd->desc_ring == NULL)
+ return -ENOMEM;
+ idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
+
+ idxd->batch_idx_read = 0;
+ idxd->batch_idx_write = 0;
+ idxd->batch_start = 0;
+ idxd->batch_size = 0;
+ idxd->ids_returned = 0;
+ idxd->ids_avail = 0;
+
+ memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
+ (idxd->max_batches + 1));
+ return 0;
+}
+
int
idxd_dmadev_create(const char *name, struct rte_device *dev,
const struct idxd_dmadev *base_idxd,
int idxd_dmadev_create(const char *name, struct rte_device *dev,
const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops);
int idxd_dump(const struct rte_dma_dev *dev, FILE *f);
+int idxd_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *dev_conf,
+ uint32_t conf_sz);
+int idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+ const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz);
+int idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+ uint32_t size);
#endif /* _IDXD_INTERNAL_H_ */