- If IOVA table is not specified, IOVA addresses will be assumed to be
unavailable
- Other processes must attach to the memory area before they can use it
-* Perform DMA mapping with ``rte_vfio_dma_map`` if needed
+* Perform DMA mapping with ``rte_dev_dma_map`` if needed
* Use the memory area in your application
* If memory area is no longer needed, it can be unregistered
- If the area was mapped for DMA, unmapping must be performed before
return ret;
}
+static int
+pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
+
+ if (!pdev || !pdev->driver) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (pdev->driver->dma_map)
+ return pdev->driver->dma_map(pdev, addr, iova, len);
+ /**
+ * In case driver don't provides any specific mapping
+ * try fallback to VFIO.
+ */
+ if (pdev->kdrv == RTE_KDRV_VFIO)
+ return rte_vfio_container_dma_map
+ (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
+ iova, len);
+ rte_errno = ENOTSUP;
+ return -1;
+}
+
+static int
+pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
+
+ if (!pdev || !pdev->driver) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (pdev->driver->dma_unmap)
+ return pdev->driver->dma_unmap(pdev, addr, iova, len);
+ /**
+ * In case driver don't provides any specific mapping
+ * try fallback to VFIO.
+ */
+ if (pdev->kdrv == RTE_KDRV_VFIO)
+ return rte_vfio_container_dma_unmap
+ (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
+ iova, len);
+ rte_errno = ENOTSUP;
+ return -1;
+}
+
struct rte_pci_bus rte_pci_bus = {
.bus = {
.scan = rte_pci_scan,
.plug = pci_plug,
.unplug = pci_unplug,
.parse = pci_parse,
+ .dma_map = pci_dma_map,
+ .dma_unmap = pci_dma_unmap,
.get_iommu_class = rte_pci_get_iommu_class,
.dev_iterate = rte_pci_dev_iterate,
.hot_unplug_handler = pci_hot_unplug_handler,
*/
typedef int (pci_remove_t)(struct rte_pci_device *);
+/**
+ * Driver-specific DMA mapping. After a successful call the device
+ * will be able to read/write from/to this segment.
+ *
+ * @param dev
+ * Pointer to the PCI device.
+ * @param addr
+ * Starting virtual address of memory to be mapped.
+ * @param iova
+ * Starting IOVA address of memory to be mapped.
+ * @param len
+ * Length of memory segment being mapped.
+ * @return
+ * - 0 On success.
+ * - Negative value and rte_errno is set otherwise.
+ */
+typedef int (pci_dma_map_t)(struct rte_pci_device *dev, void *addr,
+ uint64_t iova, size_t len);
+
+/**
+ * Driver-specific DMA un-mapping. After a successful call the device
+ * will not be able to read/write from/to this segment.
+ *
+ * @param dev
+ * Pointer to the PCI device.
+ * @param addr
+ * Starting virtual address of memory to be unmapped.
+ * @param iova
+ * Starting IOVA address of memory to be unmapped.
+ * @param len
+ * Length of memory segment being unmapped.
+ * @return
+ * - 0 On success.
+ * - Negative value and rte_errno is set otherwise.
+ */
+typedef int (pci_dma_unmap_t)(struct rte_pci_device *dev, void *addr,
+ uint64_t iova, size_t len);
+
/**
* A structure describing a PCI driver.
*/
struct rte_pci_bus *bus; /**< PCI bus reference. */
pci_probe_t *probe; /**< Device Probe function. */
pci_remove_t *remove; /**< Device Remove function. */
+ pci_dma_map_t *dma_map; /**< device dma map function. */
+ pci_dma_unmap_t *dma_unmap; /**< device dma unmap function. */
const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
uint32_t drv_flags; /**< Flags RTE_PCI_DRV_*. */
};
free(cls_str);
return it->device;
}
+
+int
+rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova,
+ size_t len)
+{
+ if (dev->bus->dma_map == NULL || len == 0) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+ /* Memory must be registered through rte_extmem_* APIs */
+ if (rte_mem_virt2memseg_list(addr) == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return dev->bus->dma_map(dev, addr, iova, len);
+}
+
+int
+rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
+ size_t len)
+{
+ if (dev->bus->dma_unmap == NULL || len == 0) {
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+ /* Memory must be registered through rte_extmem_* APIs */
+ if (rte_mem_virt2memseg_list(addr) == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return dev->bus->dma_unmap(dev, addr, iova, len);
+}
*/
typedef int (*rte_bus_parse_t)(const char *name, void *addr);
+/**
+ * Device level DMA map function.
+ * After a successful call, the memory segment will be mapped to the
+ * given device.
+ *
+ * @param dev
+ * Device pointer.
+ * @param addr
+ * Virtual address to map.
+ * @param iova
+ * IOVA address to map.
+ * @param len
+ * Length of the memory segment being mapped.
+ *
+ * @return
+ * 0 if mapping was successful.
+ * Negative value and rte_errno is set otherwise.
+ */
+typedef int (*rte_dev_dma_map_t)(struct rte_device *dev, void *addr,
+ uint64_t iova, size_t len);
+
+/**
+ * Device level DMA unmap function.
+ * After a successful call, the memory segment will no longer be
+ * accessible by the given device.
+ *
+ * @param dev
+ * Device pointer.
+ * @param addr
+ * Virtual address to unmap.
+ * @param iova
+ * IOVA address to unmap.
+ * @param len
+ * Length of the memory segment being mapped.
+ *
+ * @return
+ * 0 if un-mapping was successful.
+ * Negative value and rte_errno is set otherwise.
+ */
+typedef int (*rte_dev_dma_unmap_t)(struct rte_device *dev, void *addr,
+ uint64_t iova, size_t len);
+
/**
* Implement a specific hot-unplug handler, which is responsible for
* handle the failure when device be hot-unplugged. When the event of
rte_bus_plug_t plug; /**< Probe single device for drivers */
rte_bus_unplug_t unplug; /**< Remove single device from driver */
rte_bus_parse_t parse; /**< Parse a device name */
+ rte_dev_dma_map_t dma_map; /**< DMA map for device in the bus */
+ rte_dev_dma_unmap_t dma_unmap; /**< DMA unmap for device in the bus */
struct rte_bus_conf conf; /**< Bus configuration */
rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */
rte_dev_iterate_t dev_iterate; /**< Device iterator. */
int __rte_experimental
rte_dev_hotplug_handle_disable(void);
+/**
+ * Device level DMA map function.
+ * After a successful call, the memory segment will be mapped to the
+ * given device.
+ *
+ * @note: Memory must be registered in advance using rte_extmem_* APIs.
+ *
+ * @param dev
+ * Device pointer.
+ * @param addr
+ * Virtual address to map.
+ * @param iova
+ * IOVA address to map.
+ * @param len
+ * Length of the memory segment being mapped.
+ *
+ * @return
+ * 0 if mapping was successful.
+ * Negative value and rte_errno is set otherwise.
+ */
+int __rte_experimental
+rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len);
+
+/**
+ * Device level DMA unmap function.
+ * After a successful call, the memory segment will no longer be
+ * accessible by the given device.
+ *
+ * @note: Memory must be registered in advance using rte_extmem_* APIs.
+ *
+ * @param dev
+ * Device pointer.
+ * @param addr
+ * Virtual address to unmap.
+ * @param iova
+ * IOVA address to unmap.
+ * @param len
+ * Length of the memory segment being mapped.
+ *
+ * @return
+ * 0 if un-mapping was successful.
+ * Negative value and rte_errno is set otherwise.
+ */
+int __rte_experimental
+rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
+ size_t len);
+
#endif /* _RTE_DEV_H_ */
rte_class_unregister;
rte_ctrl_thread_create;
rte_delay_us_sleep;
+ rte_dev_dma_map;
+ rte_dev_dma_unmap;
rte_dev_event_callback_process;
rte_dev_event_callback_register;
rte_dev_event_callback_unregister;