config: add pkgconfig for arm64
[dpdk.git] / drivers / bus / fslmc / fslmc_vfio.c
index 0c048dc..970969d 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP
+ *   Copyright 2016-2019 NXP
  *
  */
 
@@ -30,6 +30,7 @@
 #include <rte_kvargs.h>
 #include <rte_dev.h>
 #include <rte_bus.h>
+#include <rte_eal_memconfig.h>
 
 #include "rte_fslmc.h"
 #include "fslmc_vfio.h"
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-/** Pathname of FSL-MC devices directory. */
-#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
-
 #define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
 
 /* Number of VFIO containers & groups with in */
 static struct fslmc_vfio_group vfio_group;
 static struct fslmc_vfio_container vfio_container;
 static int container_device_fd;
-static char *g_container;
+char *fslmc_container;
+static int fslmc_iommu_type;
 static uint32_t *msi_intr_vaddr;
 void *(*rte_mcp_ptr_list);
-static int is_dma_done;
 
 static struct rte_dpaa2_object_list dpaa2_obj_list =
        TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
@@ -71,10 +69,10 @@ fslmc_get_container_group(int *groupid)
        int ret;
        char *container;
 
-       if (!g_container) {
+       if (!fslmc_container) {
                container = getenv("DPRC");
                if (container == NULL) {
-                       DPAA2_BUS_INFO("DPAA2: DPRC not available");
+                       DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
                        return -EINVAL;
                }
 
@@ -83,22 +81,26 @@ fslmc_get_container_group(int *groupid)
                        return -1;
                }
 
-               g_container = strdup(container);
-               if (!g_container) {
+               fslmc_container = strdup(container);
+               if (!fslmc_container) {
                        DPAA2_BUS_ERR("Mem alloc failure; Container name");
                        return -ENOMEM;
                }
        }
 
+       fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ?
+               RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU;
+
        /* get group number */
-       ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid);
+       ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
+                                    fslmc_container, groupid);
        if (ret <= 0) {
-               DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
+               DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container);
                return -1;
        }
 
        DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
-                       g_container, *groupid);
+                       fslmc_container, *groupid);
 
        return 0;
 }
@@ -124,14 +126,14 @@ vfio_connect_container(void)
        }
 
        /* Opens main vfio file descriptor which represents the "container" */
-       fd = vfio_get_container_fd();
+       fd = rte_vfio_get_container_fd();
        if (fd < 0) {
                DPAA2_BUS_ERR("Failed to open VFIO container");
                return -errno;
        }
 
        /* Check whether support for SMMU type IOMMU present or not */
-       if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+       if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) {
                /* Connect group to container */
                ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
                if (ret) {
@@ -140,7 +142,7 @@ vfio_connect_container(void)
                        return -errno;
                }
 
-               ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+               ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type);
                if (ret) {
                        DPAA2_BUS_ERR("Failed to setup VFIO iommu");
                        close(fd);
@@ -175,7 +177,7 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
        vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
                PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
        if (vaddr == MAP_FAILED) {
-               DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+               DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
                return -errno;
        }
 
@@ -189,10 +191,70 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
        return -errno;
 }
 
+static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+               void *arg __rte_unused)
+{
+       struct rte_memseg_list *msl;
+       struct rte_memseg *ms;
+       size_t cur_len = 0, map_len = 0;
+       uint64_t virt_addr;
+       rte_iova_t iova_addr;
+       int ret;
+
+       msl = rte_mem_virt2memseg_list(addr);
+
+       while (cur_len < len) {
+               const void *va = RTE_PTR_ADD(addr, cur_len);
+
+               ms = rte_mem_virt2memseg(va, msl);
+               iova_addr = ms->iova;
+               virt_addr = ms->addr_64;
+               map_len = ms->len;
+
+               DPAA2_BUS_DEBUG("Request for %s, va=%p, "
+                               "virt_addr=0x%" PRIx64 ", "
+                               "iova=0x%" PRIx64 ", map_len=%zu",
+                               type == RTE_MEM_EVENT_ALLOC ?
+                                       "alloc" : "dealloc",
+                               va, virt_addr, iova_addr, map_len);
+
+               /* iova_addr may be set to RTE_BAD_IOVA */
+               if (iova_addr == RTE_BAD_IOVA) {
+                       DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+                       cur_len += map_len;
+                       continue;
+               }
+
+               if (type == RTE_MEM_EVENT_ALLOC)
+                       ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+               else
+                       ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+               if (ret != 0) {
+                       DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
+                                       "Map=%d, addr=%p, len=%zu, err:(%d)",
+                                       type, va, map_len, ret);
+                       return;
+               }
+
+               cur_len += map_len;
+       }
+
+       if (type == RTE_MEM_EVENT_ALLOC)
+               DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
+                               addr, len);
+       else
+               DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
+                               addr, len);
+}
+
 static int
-fslmc_vfio_map(const struct rte_memseg *ms, void *arg)
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
 {
-       int *n_segs = arg;
        struct fslmc_vfio_group *group;
        struct vfio_iommu_type1_dma_map dma_map = {
                .argsz = sizeof(struct vfio_iommu_type1_dma_map),
@@ -200,10 +262,16 @@ fslmc_vfio_map(const struct rte_memseg *ms, void *arg)
        };
        int ret;
 
-       dma_map.size = ms->len;
-       dma_map.vaddr = ms->addr_64;
+       if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+               DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+               return 0;
+       }
+
+       dma_map.size = len;
+       dma_map.vaddr = vaddr;
+
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-       dma_map.iova = ms->iova;
+       dma_map.iova = iovaddr;
 #else
        dma_map.iova = dma_map.vaddr;
 #endif
@@ -216,42 +284,138 @@ fslmc_vfio_map(const struct rte_memseg *ms, void *arg)
                return -1;
        }
 
-       DPAA2_BUS_DEBUG("-->Initial SHM Virtual ADDR %llX",
-                       dma_map.vaddr);
-       DPAA2_BUS_DEBUG("-----> DMA size 0x%llX", dma_map.size);
-       ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
-                       &dma_map);
+       DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+                       (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
+       ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
        if (ret) {
                DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
                                errno);
                return -1;
        }
-       (*n_segs)++;
+
        return 0;
 }
 
-int rte_fslmc_vfio_dmamap(void)
+static int
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+{
+       struct fslmc_vfio_group *group;
+       struct vfio_iommu_type1_dma_unmap dma_unmap = {
+               .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
+               .flags = 0,
+       };
+       int ret;
+
+       if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+               DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+               return 0;
+       }
+
+       dma_unmap.size = len;
+       dma_unmap.iova = vaddr;
+
+       /* SET DMA MAP for IOMMU */
+       group = &vfio_group;
+
+       if (!group->container) {
+               DPAA2_BUS_ERR("Container is not connected ");
+               return -1;
+       }
+
+       DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+                       (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
+       ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+       if (ret) {
+               DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
+                               errno);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
+               const struct rte_memseg *ms, void *arg)
 {
-       const struct rte_memseg *memseg;
-       int i = 0;
+       int *n_segs = arg;
+       int ret;
 
-       if (is_dma_done)
+       /* if IOVA address is invalid, skip */
+       if (ms->iova == RTE_BAD_IOVA)
                return 0;
 
-       memseg = rte_eal_get_physmem_layout();
-       if (memseg == NULL) {
-               DPAA2_BUS_ERR("Cannot get physical layout");
-               return -ENODEV;
+       ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
+       if (ret)
+               DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
+                               ms->addr, ms->len);
+       else
+               (*n_segs)++;
+
+       return ret;
+}
+
+int
+rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
+{
+       int ret;
+       struct fslmc_vfio_group *group;
+       struct vfio_iommu_type1_dma_map dma_map = {
+               .argsz = sizeof(struct vfio_iommu_type1_dma_map),
+               .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+       };
+
+       if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+               DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
+               return 0;
        }
 
-       if (rte_memseg_walk(fslmc_vfio_map, &i) < 0)
+       /* SET DMA MAP for IOMMU */
+       group = &vfio_group;
+       if (!group->container) {
+               DPAA2_BUS_ERR("Container is not connected");
                return -1;
+       }
+
+       dma_map.size = size;
+       dma_map.vaddr = vaddr;
+       dma_map.iova = iova;
+
+       DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n",
+                       (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova,
+                       (uint64_t)dma_map.size);
+       ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
+                   &dma_map);
+       if (ret) {
+               printf("Unable to map DMA address (errno = %d)\n",
+                       errno);
+               return ret;
+       }
+
+       return 0;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+       int i = 0, ret;
 
-       /* Verifying that at least single segment is available */
-       if (i <= 0) {
-               DPAA2_BUS_ERR("No Segments found for VFIO Mapping");
+       /* Lock before parsing and registering callback to memory subsystem */
+       rte_mcfg_mem_read_lock();
+
+       if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+               rte_mcfg_mem_read_unlock();
                return -1;
        }
+
+       ret = rte_mem_event_callback_register("fslmc_memevent_clb",
+                       fslmc_memevent_cb, NULL);
+       if (ret && rte_errno == ENOTSUP)
+               DPAA2_BUS_DEBUG("Memory event callbacks not supported");
+       else if (ret)
+               DPAA2_BUS_DEBUG("Unable to install memory handler");
+       else
+               DPAA2_BUS_DEBUG("Installed memory callback handler");
+
        DPAA2_BUS_DEBUG("Total %d segments found.", i);
 
        /* TODO - This is a W.A. as VFIO currently does not add the mapping of
@@ -260,33 +424,144 @@ int rte_fslmc_vfio_dmamap(void)
         */
        vfio_map_irq_region(&vfio_group);
 
-       is_dma_done = 1;
+       /* Existing segments have been mapped and memory callback for hotplug
+        * has been installed.
+        */
+       rte_mcfg_mem_read_unlock();
 
        return 0;
 }
 
-static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
+static int
+fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+               int *vfio_dev_fd, struct vfio_device_info *device_info)
+{
+       struct vfio_group_status group_status = {
+                       .argsz = sizeof(group_status)
+       };
+       int vfio_group_fd, vfio_container_fd, iommu_group_no, ret;
+
+       /* get group number */
+       ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no);
+       if (ret < 0)
+               return -1;
+
+       /* get the actual group fd */
+       vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no);
+       if (vfio_group_fd < 0)
+               return -1;
+
+       /* if group_fd == 0, that means the device isn't managed by VFIO */
+       if (vfio_group_fd == 0) {
+               RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+                               dev_addr);
+               return 1;
+       }
+
+       /* Opens main vfio file descriptor which represents the "container" */
+       vfio_container_fd = rte_vfio_get_container_fd();
+       if (vfio_container_fd < 0) {
+               DPAA2_BUS_ERR("Failed to open VFIO container");
+               return -errno;
+       }
+
+       /* check if the group is viable */
+       ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
+       if (ret) {
+               DPAA2_BUS_ERR("  %s cannot get group status, "
+                               "error %i (%s)\n", dev_addr,
+                               errno, strerror(errno));
+               close(vfio_group_fd);
+               rte_vfio_clear_group(vfio_group_fd);
+               return -1;
+       } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+               DPAA2_BUS_ERR("  %s VFIO group is not viable!\n", dev_addr);
+               close(vfio_group_fd);
+               rte_vfio_clear_group(vfio_group_fd);
+               return -1;
+       }
+       /* At this point, we know that this group is viable (meaning,
+        * all devices are either bound to VFIO or not bound to anything)
+        */
+
+       /* check if group does not have a container yet */
+       if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+
+               /* add group to a container */
+               ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+                               &vfio_container_fd);
+               if (ret) {
+                       DPAA2_BUS_ERR("  %s cannot add VFIO group to container, "
+                                       "error %i (%s)\n", dev_addr,
+                                       errno, strerror(errno));
+                       close(vfio_group_fd);
+                       close(vfio_container_fd);
+                       rte_vfio_clear_group(vfio_group_fd);
+                       return -1;
+               }
+
+               /*
+                * set an IOMMU type for container
+                *
+                */
+               if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
+                         fslmc_iommu_type)) {
+                       ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
+                                   fslmc_iommu_type);
+                       if (ret) {
+                               DPAA2_BUS_ERR("Failed to setup VFIO iommu");
+                               close(vfio_group_fd);
+                               close(vfio_container_fd);
+                               return -errno;
+                       }
+               } else {
+                       DPAA2_BUS_ERR("No supported IOMMU available");
+                       close(vfio_group_fd);
+                       close(vfio_container_fd);
+                       return -EINVAL;
+               }
+       }
+
+       /* get a file descriptor for the device */
+       *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
+       if (*vfio_dev_fd < 0) {
+               /* if we cannot get a device fd, this implies a problem with
+                * the VFIO group or the container not having IOMMU configured.
+                */
+
+               DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr);
+               close(vfio_group_fd);
+               close(vfio_container_fd);
+               rte_vfio_clear_group(vfio_group_fd);
+               return -1;
+       }
+
+       /* test and setup the device */
+       ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
+       if (ret) {
+               DPAA2_BUS_ERR("  %s cannot get device info, error %i (%s)",
+                               dev_addr, errno, strerror(errno));
+               close(*vfio_dev_fd);
+               close(vfio_group_fd);
+               close(vfio_container_fd);
+               rte_vfio_clear_group(vfio_group_fd);
+               return -1;
+       }
+
+       return 0;
+}
+
+static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
 {
        intptr_t v_addr = (intptr_t)MAP_FAILED;
        int32_t ret, mc_fd;
+       struct vfio_group_status status = { .argsz = sizeof(status) };
 
        struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
        struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
 
-       /* getting the mcp object's fd*/
-       mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
-       if (mc_fd < 0) {
-               DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
-                             mcp_obj, group->fd);
-               return v_addr;
-       }
-
-       /* getting device info*/
-       ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
-       if (ret < 0) {
-               DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
-               goto MC_FAILURE;
-       }
+       fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj,
+                       &mc_fd, &d_info);
 
        /* getting device region info*/
        ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
@@ -295,9 +570,6 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
                goto MC_FAILURE;
        }
 
-       DPAA2_BUS_DEBUG("Region offset = %llx  , region size = %llx",
-                       reg_info.offset, reg_info.size);
-
        v_addr = (size_t)mmap(NULL, reg_info.size,
                PROT_WRITE | PROT_READ, MAP_SHARED,
                mc_fd, reg_info.offset);
@@ -422,19 +694,8 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
        struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
        struct rte_dpaa2_object *object = NULL;
 
-       dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
-                      dev->device.name);
-       if (dev_fd <= 0) {
-               DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
-                             dev->device.name);
-               return -1;
-       }
-
-       if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
-               DPAA2_BUS_ERR("Unable to obtain information for device:%s",
-                             dev->device.name);
-               return -1;
-       }
+       fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name,
+                       &dev_fd, &device_info);
 
        switch (dev->dev_type) {
        case DPAA2_ETH:
@@ -445,6 +706,8 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
        case DPAA2_IO:
        case DPAA2_CI:
        case DPAA2_BPOOL:
+       case DPAA2_DPRTC:
+       case DPAA2_MUX:
                TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
                        if (dev->dev_type == object->dev_type)
                                object->create(dev_fd, &device_info,
@@ -457,46 +720,56 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
                break;
        }
 
-       DPAA2_BUS_DEBUG("Device (%s) abstracted from VFIO",
-                       dev->device.name);
+       DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+                     dev->device.name);
        return 0;
 }
 
 static int
 fslmc_process_mcp(struct rte_dpaa2_device *dev)
 {
+       int ret;
        intptr_t v_addr;
-       char *dev_name;
+       char *dev_name = NULL;
        struct fsl_mc_io dpmng  = {0};
        struct mc_version mc_ver_info = {0};
 
        rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
        if (!rte_mcp_ptr_list) {
                DPAA2_BUS_ERR("Unable to allocate MC portal memory");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto cleanup;
        }
 
        dev_name = strdup(dev->device.name);
        if (!dev_name) {
                DPAA2_BUS_ERR("Unable to allocate MC device name memory");
-               free(rte_mcp_ptr_list);
-               rte_mcp_ptr_list = NULL;
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto cleanup;
        }
 
-       v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
+       v_addr = vfio_map_mcp_obj(dev->device.name);
        if (v_addr == (intptr_t)MAP_FAILED) {
                DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
-               free(rte_mcp_ptr_list);
-               rte_mcp_ptr_list = NULL;
-               return -1;
+               ret = -1;
+               goto cleanup;
        }
 
        /* check the MC version compatibility */
        dpmng.regs = (void *)v_addr;
+
+       /* In case of secondary processes, MC version check is no longer
+        * required.
+        */
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+               rte_mcp_ptr_list[0] = (void *)v_addr;
+               return 0;
+       }
+
        if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
                DPAA2_BUS_ERR("Unable to obtain MC version");
-               return -1;
+               ret = -1;
+               goto cleanup;
        }
 
        if ((mc_ver_info.major != MC_VER_MAJOR) ||
@@ -506,13 +779,24 @@ fslmc_process_mcp(struct rte_dpaa2_device *dev)
                              MC_VER_MAJOR, MC_VER_MINOR,
                              mc_ver_info.major, mc_ver_info.minor,
                              mc_ver_info.revision);
-               free(rte_mcp_ptr_list);
-               rte_mcp_ptr_list = NULL;
-               return -1;
+               ret = -1;
+               goto cleanup;
        }
        rte_mcp_ptr_list[0] = (void *)v_addr;
 
+       free(dev_name);
        return 0;
+
+cleanup:
+       if (dev_name)
+               free(dev_name);
+
+       if (rte_mcp_ptr_list) {
+               free(rte_mcp_ptr_list);
+               rte_mcp_ptr_list = NULL;
+       }
+
+       return ret;
 }
 
 int
@@ -525,6 +809,15 @@ fslmc_vfio_process_group(void)
        /* Search the MCP as that should be initialized first. */
        TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
                if (dev->dev_type == DPAA2_MPORTAL) {
+                       if (dev->device.devargs &&
+                           dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+                               DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+                                             dev->device.name);
+                               TAILQ_REMOVE(&rte_fslmc_bus.device_list,
+                                               dev, next);
+                               continue;
+                       }
+
                        ret = fslmc_process_mcp(dev);
                        if (ret) {
                                DPAA2_BUS_ERR("Unable to map MC Portal");
@@ -549,12 +842,17 @@ fslmc_vfio_process_group(void)
        }
 
        TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
-               if (!dev)
-                       break;
-
+               if (dev->device.devargs &&
+                   dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+                       DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+                                     dev->device.name);
+                       TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+                       continue;
+               }
                switch (dev->dev_type) {
                case DPAA2_ETH:
                case DPAA2_CRYPTO:
+               case DPAA2_QDMA:
                        ret = fslmc_process_iodevices(dev);
                        if (ret) {
                                DPAA2_BUS_DEBUG("Dev (%s) init failed",
@@ -563,9 +861,18 @@ fslmc_vfio_process_group(void)
                        }
                        break;
                case DPAA2_CON:
-               case DPAA2_IO:
                case DPAA2_CI:
                case DPAA2_BPOOL:
+               case DPAA2_DPRTC:
+               case DPAA2_MUX:
+                       /* IN case of secondary processes, all control objects
+                        * like dpbp, dpcon, dpci are not initialized/required
+                        * - all of these are assumed to be initialized and made
+                        *   available by primary.
+                        */
+                       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+                               continue;
+
                        /* Call the object creation routine and remove the
                         * device entry from device list
                         */
@@ -576,12 +883,15 @@ fslmc_vfio_process_group(void)
                                return -1;
                        }
 
-                       /* This device is not required to be in the DPDK
-                        * exposed device list.
-                        */
-                       TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
-                       free(dev);
-                       dev = NULL;
+                       break;
+               case DPAA2_IO:
+                       ret = fslmc_process_iodevices(dev);
+                       if (ret) {
+                               DPAA2_BUS_DEBUG("Dev (%s) init failed",
+                                               dev->device.name);
+                               return -1;
+                       }
+
                        break;
                case DPAA2_UNKNOWN:
                default:
@@ -621,7 +931,7 @@ fslmc_vfio_setup_group(void)
        }
 
        /* Get the actual group fd */
-       ret = vfio_get_group_fd(groupid);
+       ret = rte_vfio_get_group_fd(groupid);
        if (ret < 0)
                return ret;
        vfio_group.fd = ret;
@@ -659,10 +969,10 @@ fslmc_vfio_setup_group(void)
        }
 
        /* Get Device information */
-       ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
+       ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container);
        if (ret < 0) {
                DPAA2_BUS_ERR("Error getting device %s fd from group %d",
-                             g_container, vfio_group.groupid);
+                             fslmc_container, vfio_group.groupid);
                close(vfio_group.fd);
                rte_vfio_clear_group(vfio_group.fd);
                return ret;