X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fbus%2Ffslmc%2Ffslmc_vfio.c;h=c8373e627ae3e33a763483ac109501244f49d479;hb=fb15afa295af939a7c9143a8641ac29a338409f1;hp=247b2a4a196c09f0721b6c56542916b95575a792;hpb=2b5fa25708cfe6594ba755faf864eef4efce4aad;p=dpdk.git diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index 247b2a4a19..c8373e627a 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include #include @@ -40,20 +40,26 @@ #include "portal/dpaa2_hw_pvt.h" #include "portal/dpaa2_hw_dpio.h" -/** Pathname of FSL-MC devices directory. */ -#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices" - #define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */ /* Number of VFIO containers & groups with in */ static struct fslmc_vfio_group vfio_group; static struct fslmc_vfio_container vfio_container; static int container_device_fd; -static char *fslmc_container; +char *fslmc_container; static int fslmc_iommu_type; static uint32_t *msi_intr_vaddr; void *(*rte_mcp_ptr_list); +void * +dpaa2_get_mcp_ptr(int portal_idx) +{ + if (rte_mcp_ptr_list) + return rte_mcp_ptr_list[portal_idx]; + else + return NULL; +} + static struct rte_dpaa2_object_list dpaa2_obj_list = TAILQ_HEAD_INITIALIZER(dpaa2_obj_list); @@ -384,8 +390,9 @@ rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size) dma_map.vaddr = vaddr; dma_map.iova = iova; - DPAA2_BUS_DEBUG("VFIO dmamap 0x%llx:0x%llx, size 0x%llx\n", - dma_map.vaddr, dma_map.iova, dma_map.size); + DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n", + (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova, + (uint64_t)dma_map.size); ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map); if (ret) { @@ -434,28 +441,139 @@ int rte_fslmc_vfio_dmamap(void) return 0; } -static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj) +static int +fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr, + int *vfio_dev_fd, struct vfio_device_info *device_info) +{ + struct vfio_group_status group_status = { + .argsz = sizeof(group_status) + }; + int vfio_group_fd, vfio_container_fd, iommu_group_no, ret; + + /* get group number */ + ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no); + if (ret < 0) + return -1; + + /* get the actual group fd */ + vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no); + if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT) + return -1; + + /* + * if vfio_group_fd == -ENOENT, that means the device + * isn't managed by VFIO + */ + if (vfio_group_fd == -ENOENT) { + RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", + dev_addr); + return 1; + } + + /* Opens main vfio file descriptor which represents the "container" */ + vfio_container_fd = rte_vfio_get_container_fd(); + if (vfio_container_fd < 0) { + DPAA2_BUS_ERR("Failed to open VFIO container"); + return -errno; + } + + /* check if the group is viable */ + ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status); + if (ret) { + DPAA2_BUS_ERR(" %s cannot get group status, " + "error %i (%s)\n", dev_addr, + errno, strerror(errno)); + close(vfio_group_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) { + DPAA2_BUS_ERR(" %s VFIO group is not viable!\n", dev_addr); + close(vfio_group_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } + /* At this point, we know that this group is viable (meaning, + * all devices are either bound to VFIO or not bound to anything) + */ + + /* check if group does not have a container yet */ + if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) { + + /* add group to a container */ + ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER, + &vfio_container_fd); + if (ret) { + DPAA2_BUS_ERR(" %s cannot add VFIO group to container, " + "error %i (%s)\n", dev_addr, + errno, strerror(errno)); + close(vfio_group_fd); + close(vfio_container_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } + + /* + * set an IOMMU type for container + * + */ + if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, + fslmc_iommu_type)) { + ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, + fslmc_iommu_type); + if (ret) { + DPAA2_BUS_ERR("Failed to setup VFIO iommu"); + close(vfio_group_fd); + close(vfio_container_fd); + return -errno; + } + } else { + DPAA2_BUS_ERR("No supported IOMMU available"); + close(vfio_group_fd); + close(vfio_container_fd); + return -EINVAL; + } + } + + /* get a file descriptor for the device */ + *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr); + if (*vfio_dev_fd < 0) { + /* if we cannot get a device fd, this implies a problem with + * the VFIO group or the container not having IOMMU configured. + */ + + DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr); + close(vfio_group_fd); + close(vfio_container_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } + + /* test and setup the device */ + ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info); + if (ret) { + DPAA2_BUS_ERR(" %s cannot get device info, error %i (%s)", + dev_addr, errno, strerror(errno)); + close(*vfio_dev_fd); + close(vfio_group_fd); + close(vfio_container_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } + + return 0; +} + +static intptr_t vfio_map_mcp_obj(const char *mcp_obj) { intptr_t v_addr = (intptr_t)MAP_FAILED; int32_t ret, mc_fd; + struct vfio_group_status status = { .argsz = sizeof(status) }; struct vfio_device_info d_info = { .argsz = sizeof(d_info) }; struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; - /* getting the mcp object's fd*/ - mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj); - if (mc_fd < 0) { - DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d", - mcp_obj, group->fd); - return v_addr; - } - - /* getting device info*/ - ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info); - if (ret < 0) { - DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO"); - goto MC_FAILURE; - } + fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj, + &mc_fd, &d_info); /* getting device region info*/ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); @@ -588,19 +706,8 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; struct rte_dpaa2_object *object = NULL; - dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, - dev->device.name); - if (dev_fd <= 0) { - DPAA2_BUS_ERR("Unable to obtain device FD for device:%s", - dev->device.name); - return -1; - } - - if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) { - DPAA2_BUS_ERR("Unable to obtain information for device:%s", - dev->device.name); - return -1; - } + fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name, + &dev_fd, &device_info); switch (dev->dev_type) { case DPAA2_ETH: @@ -611,6 +718,7 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev) case DPAA2_IO: case DPAA2_CI: case DPAA2_BPOOL: + case DPAA2_DPRTC: case DPAA2_MUX: TAILQ_FOREACH(object, &dpaa2_obj_list, next) { if (dev->dev_type == object->dev_type) @@ -634,25 +742,17 @@ fslmc_process_mcp(struct rte_dpaa2_device *dev) { int ret; intptr_t v_addr; - char *dev_name = NULL; struct fsl_mc_io dpmng = {0}; struct mc_version mc_ver_info = {0}; - rte_mcp_ptr_list = malloc(sizeof(void *) * 1); + rte_mcp_ptr_list = malloc(sizeof(void *) * (MC_PORTAL_INDEX + 1)); if (!rte_mcp_ptr_list) { DPAA2_BUS_ERR("Unable to allocate MC portal memory"); ret = -ENOMEM; goto cleanup; } - dev_name = strdup(dev->device.name); - if (!dev_name) { - DPAA2_BUS_ERR("Unable to allocate MC device name memory"); - ret = -ENOMEM; - goto cleanup; - } - - v_addr = vfio_map_mcp_obj(&vfio_group, dev_name); + v_addr = vfio_map_mcp_obj(dev->device.name); if (v_addr == (intptr_t)MAP_FAILED) { DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno); ret = -1; @@ -666,7 +766,7 @@ fslmc_process_mcp(struct rte_dpaa2_device *dev) * required. */ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - rte_mcp_ptr_list[0] = (void *)v_addr; + rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr; return 0; } @@ -686,15 +786,11 @@ fslmc_process_mcp(struct rte_dpaa2_device *dev) ret = -1; goto cleanup; } - rte_mcp_ptr_list[0] = (void *)v_addr; + rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr; - free(dev_name); return 0; cleanup: - if (dev_name) - free(dev_name); - if (rte_mcp_ptr_list) { free(rte_mcp_ptr_list); rte_mcp_ptr_list = NULL; @@ -709,26 +805,56 @@ fslmc_vfio_process_group(void) int ret; int found_mportal = 0; struct rte_dpaa2_device *dev, *dev_temp; + bool is_dpmcp_in_blocklist = false, is_dpio_in_blocklist = false; + int dpmcp_count = 0, dpio_count = 0, current_device; + + TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { + if (dev->dev_type == DPAA2_MPORTAL) { + dpmcp_count++; + if (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLOCKED) + is_dpmcp_in_blocklist = true; + } + if (dev->dev_type == DPAA2_IO) { + dpio_count++; + if (dev->device.devargs && + dev->device.devargs->policy == RTE_DEV_BLOCKED) + is_dpio_in_blocklist = true; + } + } /* Search the MCP as that should be initialized first. */ + current_device = 0; TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { if (dev->dev_type == DPAA2_MPORTAL) { + current_device++; if (dev->device.devargs && - dev->device.devargs->policy == RTE_DEV_BLACKLISTED) { - DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping", + dev->device.devargs->policy == RTE_DEV_BLOCKED) { + DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping", dev->device.name); TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); continue; } - ret = fslmc_process_mcp(dev); - if (ret) { - DPAA2_BUS_ERR("Unable to map MC Portal"); - return -1; + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + !is_dpmcp_in_blocklist) { + if (dpmcp_count == 1 || + current_device != dpmcp_count) { + TAILQ_REMOVE(&rte_fslmc_bus.device_list, + dev, next); + continue; + } } - if (!found_mportal) + + if (!found_mportal) { + ret = fslmc_process_mcp(dev); + if (ret) { + DPAA2_BUS_ERR("Unable to map MC Portal"); + return -1; + } found_mportal = 1; + } TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); free(dev); @@ -745,14 +871,25 @@ fslmc_vfio_process_group(void) return -1; } + current_device = 0; TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) { + if (dev->dev_type == DPAA2_IO) + current_device++; if (dev->device.devargs && - dev->device.devargs->policy == RTE_DEV_BLACKLISTED) { - DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping", + dev->device.devargs->policy == RTE_DEV_BLOCKED) { + DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping", dev->device.name); TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); continue; } + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + dev->dev_type != DPAA2_ETH && + dev->dev_type != DPAA2_CRYPTO && + dev->dev_type != DPAA2_QDMA && + dev->dev_type != DPAA2_IO) { + TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next); + continue; + } switch (dev->dev_type) { case DPAA2_ETH: case DPAA2_CRYPTO: @@ -767,6 +904,7 @@ fslmc_vfio_process_group(void) case DPAA2_CON: case DPAA2_CI: case DPAA2_BPOOL: + case DPAA2_DPRTC: case DPAA2_MUX: /* IN case of secondary processes, all control objects * like dpbp, dpcon, dpci are not initialized/required @@ -788,6 +926,21 @@ fslmc_vfio_process_group(void) break; case DPAA2_IO: + if (!is_dpio_in_blocklist && dpio_count > 1) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY + && current_device != dpio_count) { + TAILQ_REMOVE(&rte_fslmc_bus.device_list, + dev, next); + break; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY + && current_device == dpio_count) { + TAILQ_REMOVE(&rte_fslmc_bus.device_list, + dev, next); + break; + } + } + ret = fslmc_process_iodevices(dev); if (ret) { DPAA2_BUS_DEBUG("Dev (%s) init failed",