rte_vfio_clear_group(vfio_group_fd);
return -1;
} else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
+ RTE_LOG(ERR, EAL, " %s VFIO group is not viable! "
+ "Not all devices in IOMMU group bound to VFIO or unbound\n",
+ dev_addr);
close(vfio_group_fd);
rte_vfio_clear_group(vfio_group_fd);
return -1;
ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
if (ret) {
- RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
- errno, strerror(errno));
+ /**
+ * In case the mapping was already done EEXIST will be
+ * returned from kernel.
+ */
+ if (errno == EEXIST) {
+ RTE_LOG(DEBUG, EAL,
+ " Memory segment is allready mapped,"
+ " skipping");
+ } else {
+ RTE_LOG(ERR, EAL,
+ " cannot set up DMA remapping,"
+ " error %i (%s)\n",
+ errno, strerror(errno));
return -1;
+ }
}
} else {
memset(&dma_unmap, 0, sizeof(dma_unmap));
ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
if (ret) {
- RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
- errno, strerror(errno));
+ /**
+ * In case the mapping was already done EBUSY will be
+ * returned from kernel.
+ */
+ if (errno == EBUSY) {
+ RTE_LOG(DEBUG, EAL,
+ " Memory segment is allready mapped,"
+ " skipping");
+ } else {
+ RTE_LOG(ERR, EAL,
+ " cannot set up DMA remapping,"
+ " error %i (%s)\n", errno,
+ strerror(errno));
return -1;
+ }
}
} else {
return vfio_cfgs[i].vfio_container_fd;
}
-int __rte_experimental
+int
rte_vfio_container_destroy(int container_fd)
{
struct vfio_config *vfio_cfg;