/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <eal_filesystem.h>
#include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-/** Pathname of FSL-MC devices directory. */
-#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
-
#define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
/* Number of VFIO containers & groups with in */
static struct fslmc_vfio_group vfio_group;
static struct fslmc_vfio_container vfio_container;
static int container_device_fd;
-static char *g_container;
+char *fslmc_container;
+static int fslmc_iommu_type;
static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
+void *
+dpaa2_get_mcp_ptr(int portal_idx)
+{
+ if (rte_mcp_ptr_list)
+ return rte_mcp_ptr_list[portal_idx];
+ else
+ return NULL;
+}
+
static struct rte_dpaa2_object_list dpaa2_obj_list =
TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
int ret;
char *container;
- if (!g_container) {
+ if (!fslmc_container) {
container = getenv("DPRC");
if (container == NULL) {
- DPAA2_BUS_INFO("DPAA2: DPRC not available");
+ DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
return -EINVAL;
}
return -1;
}
- g_container = strdup(container);
- if (!g_container) {
+ fslmc_container = strdup(container);
+ if (!fslmc_container) {
DPAA2_BUS_ERR("Mem alloc failure; Container name");
return -ENOMEM;
}
}
+ fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ?
+ RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU;
+
/* get group number */
ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
- g_container, groupid);
+ fslmc_container, groupid);
if (ret <= 0) {
- DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
+ DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container);
return -1;
}
DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
- g_container, *groupid);
+ fslmc_container, *groupid);
return 0;
}
}
/* Check whether support for SMMU type IOMMU present or not */
- if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+ if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) {
/* Connect group to container */
ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
return -errno;
}
- ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+ ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type);
if (ret) {
DPAA2_BUS_ERR("Failed to setup VFIO iommu");
close(fd);
vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
if (vaddr == MAP_FAILED) {
- DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+ DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
return -errno;
}
static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
static void
-fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len)
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
{
struct rte_memseg_list *msl;
struct rte_memseg *ms;
"alloc" : "dealloc",
va, virt_addr, iova_addr, map_len);
+ /* iova_addr may be set to RTE_BAD_IOVA */
+ if (iova_addr == RTE_BAD_IOVA) {
+ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+ cur_len += map_len;
+ continue;
+ }
+
if (type == RTE_MEM_EVENT_ALLOC)
ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
else
};
int ret;
+ if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+ DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+ return 0;
+ }
+
dma_map.size = len;
dma_map.vaddr = vaddr;
return -1;
}
- DPAA2_BUS_DEBUG("--> Map address: %llX, size: 0x%llX",
- dma_map.vaddr, dma_map.size);
+ DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
if (ret) {
DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
};
int ret;
+ if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+ DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+ return 0;
+ }
+
dma_unmap.size = len;
dma_unmap.iova = vaddr;
return -1;
}
- DPAA2_BUS_DEBUG("--> Unmap address: %llX, size: 0x%llX",
- dma_unmap.iova, dma_unmap.size);
+ DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
if (ret) {
DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
static int
fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
- const struct rte_memseg *ms, void *arg)
+ const struct rte_memseg *ms, void *arg)
{
int *n_segs = arg;
int ret;
+ /* if IOVA address is invalid, skip */
+ if (ms->iova == RTE_BAD_IOVA)
+ return 0;
+
ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
if (ret)
DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
return ret;
}
+int
+rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
+{
+ int ret;
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ };
+
+ if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+ DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+ return 0;
+ }
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected");
+ return -1;
+ }
+
+ dma_map.size = size;
+ dma_map.vaddr = vaddr;
+ dma_map.iova = iova;
+
+ DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova,
+ (uint64_t)dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
+ &dma_map);
+ if (ret) {
+ printf("Unable to map DMA address (errno = %d)\n",
+ errno);
+ return ret;
+ }
+
+ return 0;
+}
+
int rte_fslmc_vfio_dmamap(void)
{
int i = 0, ret;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
/* Lock before parsing and registering callback to memory subsystem */
- rte_rwlock_read_lock(mem_lock);
+ rte_mcfg_mem_read_lock();
if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
return -1;
}
ret = rte_mem_event_callback_register("fslmc_memevent_clb",
- fslmc_memevent_cb);
+ fslmc_memevent_cb, NULL);
if (ret && rte_errno == ENOTSUP)
DPAA2_BUS_DEBUG("Memory event callbacks not supported");
else if (ret)
/* Existing segments have been mapped and memory callback for hotplug
* has been installed.
*/
- rte_rwlock_read_unlock(mem_lock);
+ rte_mcfg_mem_read_unlock();
+
+ return 0;
+}
+
+static int
+fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+ int *vfio_dev_fd, struct vfio_device_info *device_info)
+{
+ struct vfio_group_status group_status = {
+ .argsz = sizeof(group_status)
+ };
+ int vfio_group_fd, vfio_container_fd, iommu_group_no, ret;
+
+ /* get group number */
+ ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no);
+ if (ret < 0)
+ return -1;
+
+ /* get the actual group fd */
+ vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no);
+ if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
+ return -1;
+
+ /*
+ * if vfio_group_fd == -ENOENT, that means the device
+ * isn't managed by VFIO
+ */
+ if (vfio_group_fd == -ENOENT) {
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ dev_addr);
+ return 1;
+ }
+
+ /* Opens main vfio file descriptor which represents the "container" */
+ vfio_container_fd = rte_vfio_get_container_fd();
+ if (vfio_container_fd < 0) {
+ DPAA2_BUS_ERR("Failed to open VFIO container");
+ return -errno;
+ }
+
+ /* check if the group is viable */
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
+ if (ret) {
+ DPAA2_BUS_ERR(" %s cannot get group status, "
+ "error %i (%s)\n", dev_addr,
+ errno, strerror(errno));
+ close(vfio_group_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ DPAA2_BUS_ERR(" %s VFIO group is not viable!\n", dev_addr);
+ close(vfio_group_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ }
+ /* At this point, we know that this group is viable (meaning,
+ * all devices are either bound to VFIO or not bound to anything)
+ */
+
+ /* check if group does not have a container yet */
+ if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+
+ /* add group to a container */
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+ &vfio_container_fd);
+ if (ret) {
+ DPAA2_BUS_ERR(" %s cannot add VFIO group to container, "
+ "error %i (%s)\n", dev_addr,
+ errno, strerror(errno));
+ close(vfio_group_fd);
+ close(vfio_container_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ }
+
+ /*
+ * set an IOMMU type for container
+ *
+ */
+ if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
+ fslmc_iommu_type)) {
+ ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
+ fslmc_iommu_type);
+ if (ret) {
+ DPAA2_BUS_ERR("Failed to setup VFIO iommu");
+ close(vfio_group_fd);
+ close(vfio_container_fd);
+ return -errno;
+ }
+ } else {
+ DPAA2_BUS_ERR("No supported IOMMU available");
+ close(vfio_group_fd);
+ close(vfio_container_fd);
+ return -EINVAL;
+ }
+ }
+
+ /* get a file descriptor for the device */
+ *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
+ if (*vfio_dev_fd < 0) {
+ /* if we cannot get a device fd, this implies a problem with
+ * the VFIO group or the container not having IOMMU configured.
+ */
+
+ DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr);
+ close(vfio_group_fd);
+ close(vfio_container_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ }
+
+ /* test and setup the device */
+ ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
+ if (ret) {
+ DPAA2_BUS_ERR(" %s cannot get device info, error %i (%s)",
+ dev_addr, errno, strerror(errno));
+ close(*vfio_dev_fd);
+ close(vfio_group_fd);
+ close(vfio_container_fd);
+ rte_vfio_clear_group(vfio_group_fd);
+ return -1;
+ }
return 0;
}
-static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
+static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
{
intptr_t v_addr = (intptr_t)MAP_FAILED;
int32_t ret, mc_fd;
+ struct vfio_group_status status = { .argsz = sizeof(status) };
struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
- /* getting the mcp object's fd*/
- mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
- if (mc_fd < 0) {
- DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
- mcp_obj, group->fd);
- return v_addr;
- }
-
- /* getting device info*/
- ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
- if (ret < 0) {
- DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
- goto MC_FAILURE;
- }
+ fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj,
+ &mc_fd, &d_info);
/* getting device region info*/
ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
goto MC_FAILURE;
}
- DPAA2_BUS_DEBUG("Region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
-
v_addr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
mc_fd, reg_info.offset);
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
struct rte_dpaa2_object *object = NULL;
- dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
- dev->device.name);
- if (dev_fd <= 0) {
- DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
- dev->device.name);
- return -1;
- }
-
- if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
- DPAA2_BUS_ERR("Unable to obtain information for device:%s",
- dev->device.name);
- return -1;
- }
+ fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name,
+ &dev_fd, &device_info);
switch (dev->dev_type) {
case DPAA2_ETH:
case DPAA2_IO:
case DPAA2_CI:
case DPAA2_BPOOL:
+ case DPAA2_DPRTC:
+ case DPAA2_MUX:
TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
if (dev->dev_type == object->dev_type)
object->create(dev_fd, &device_info,
break;
}
- DPAA2_BUS_DEBUG("Device (%s) abstracted from VFIO",
- dev->device.name);
+ DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
return 0;
}
static int
fslmc_process_mcp(struct rte_dpaa2_device *dev)
{
+ int ret;
intptr_t v_addr;
- char *dev_name;
struct fsl_mc_io dpmng = {0};
struct mc_version mc_ver_info = {0};
- rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
+ rte_mcp_ptr_list = malloc(sizeof(void *) * (MC_PORTAL_INDEX + 1));
if (!rte_mcp_ptr_list) {
DPAA2_BUS_ERR("Unable to allocate MC portal memory");
- return -ENOMEM;
- }
-
- dev_name = strdup(dev->device.name);
- if (!dev_name) {
- DPAA2_BUS_ERR("Unable to allocate MC device name memory");
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto cleanup;
}
- v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
+ v_addr = vfio_map_mcp_obj(dev->device.name);
if (v_addr == (intptr_t)MAP_FAILED) {
DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -1;
+ ret = -1;
+ goto cleanup;
}
/* check the MC version compatibility */
dpmng.regs = (void *)v_addr;
+
+ /* In case of secondary processes, MC version check is no longer
+ * required.
+ */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
+ return 0;
+ }
+
if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
DPAA2_BUS_ERR("Unable to obtain MC version");
- return -1;
+ ret = -1;
+ goto cleanup;
}
if ((mc_ver_info.major != MC_VER_MAJOR) ||
MC_VER_MAJOR, MC_VER_MINOR,
mc_ver_info.major, mc_ver_info.minor,
mc_ver_info.revision);
+ ret = -1;
+ goto cleanup;
+ }
+ rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
+
+ return 0;
+
+cleanup:
+ if (rte_mcp_ptr_list) {
free(rte_mcp_ptr_list);
rte_mcp_ptr_list = NULL;
- return -1;
}
- rte_mcp_ptr_list[0] = (void *)v_addr;
- return 0;
+ return ret;
}
int
int ret;
int found_mportal = 0;
struct rte_dpaa2_device *dev, *dev_temp;
+ bool is_dpmcp_in_blocklist = false, is_dpio_in_blocklist = false;
+ int dpmcp_count = 0, dpio_count = 0, current_device;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_MPORTAL) {
+ dpmcp_count++;
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLOCKED)
+ is_dpmcp_in_blocklist = true;
+ }
+ if (dev->dev_type == DPAA2_IO) {
+ dpio_count++;
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLOCKED)
+ is_dpio_in_blocklist = true;
+ }
+ }
/* Search the MCP as that should be initialized first. */
+ current_device = 0;
TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
if (dev->dev_type == DPAA2_MPORTAL) {
- ret = fslmc_process_mcp(dev);
- if (ret) {
- DPAA2_BUS_ERR("Unable to map MC Portal");
- return -1;
+ current_device++;
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLOCKED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping",
+ dev->device.name);
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list,
+ dev, next);
+ continue;
}
- if (!found_mportal)
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ !is_dpmcp_in_blocklist) {
+ if (dpmcp_count == 1 ||
+ current_device != dpmcp_count) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list,
+ dev, next);
+ continue;
+ }
+ }
+
+ if (!found_mportal) {
+ ret = fslmc_process_mcp(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to map MC Portal");
+ return -1;
+ }
found_mportal = 1;
+ }
TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
free(dev);
return -1;
}
+ current_device = 0;
TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
- if (!dev)
- break;
-
+ if (dev->dev_type == DPAA2_IO)
+ current_device++;
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLOCKED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blocked, skipping",
+ dev->device.name);
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ continue;
+ }
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ dev->dev_type != DPAA2_ETH &&
+ dev->dev_type != DPAA2_CRYPTO &&
+ dev->dev_type != DPAA2_QDMA &&
+ dev->dev_type != DPAA2_IO) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ continue;
+ }
switch (dev->dev_type) {
case DPAA2_ETH:
case DPAA2_CRYPTO:
+ case DPAA2_QDMA:
ret = fslmc_process_iodevices(dev);
if (ret) {
DPAA2_BUS_DEBUG("Dev (%s) init failed",
}
break;
case DPAA2_CON:
- case DPAA2_IO:
case DPAA2_CI:
case DPAA2_BPOOL:
+ case DPAA2_DPRTC:
+ case DPAA2_MUX:
+ /* IN case of secondary processes, all control objects
+ * like dpbp, dpcon, dpci are not initialized/required
+ * - all of these are assumed to be initialized and made
+ * available by primary.
+ */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ continue;
+
/* Call the object creation routine and remove the
* device entry from device list
*/
return -1;
}
- /* This device is not required to be in the DPDK
- * exposed device list.
- */
- TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
- free(dev);
- dev = NULL;
+ break;
+ case DPAA2_IO:
+ if (!is_dpio_in_blocklist && dpio_count > 1) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY
+ && current_device != dpio_count) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list,
+ dev, next);
+ break;
+ }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY
+ && current_device == dpio_count) {
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list,
+ dev, next);
+ break;
+ }
+ }
+
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
+ return -1;
+ }
+
break;
case DPAA2_UNKNOWN:
default:
}
/* Get Device information */
- ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
+ ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container);
if (ret < 0) {
DPAA2_BUS_ERR("Error getting device %s fd from group %d",
- g_container, vfio_group.groupid);
+ fslmc_container, vfio_group.groupid);
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return ret;