1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
10 #include <sys/types.h>
15 #include <sys/ioctl.h>
21 #include <sys/eventfd.h>
23 #include <eal_filesystem.h>
25 #include <rte_ethdev_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_cycles.h>
30 #include <rte_kvargs.h>
33 #include <rte_eal_memconfig.h>
35 #include "rte_fslmc.h"
36 #include "fslmc_vfio.h"
37 #include "fslmc_logs.h"
38 #include <mc/fsl_dpmng.h>
40 #include "portal/dpaa2_hw_pvt.h"
41 #include "portal/dpaa2_hw_dpio.h"
43 #define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
45 /* Number of VFIO containers & groups with in */
46 static struct fslmc_vfio_group vfio_group;
47 static struct fslmc_vfio_container vfio_container;
48 static int container_device_fd;
49 char *fslmc_container;
50 static int fslmc_iommu_type;
51 static uint32_t *msi_intr_vaddr;
52 void *(*rte_mcp_ptr_list);
55 dpaa2_get_mcp_ptr(int portal_idx)
58 return rte_mcp_ptr_list[portal_idx];
63 static struct rte_dpaa2_object_list dpaa2_obj_list =
64 TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
66 /*register a fslmc bus based dpaa2 driver */
68 rte_fslmc_object_register(struct rte_dpaa2_object *object)
72 TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
76 fslmc_get_container_group(int *groupid)
81 if (!fslmc_container) {
82 container = getenv("DPRC");
83 if (container == NULL) {
84 DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
88 if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
89 DPAA2_BUS_ERR("Invalid container name: %s", container);
93 fslmc_container = strdup(container);
94 if (!fslmc_container) {
95 DPAA2_BUS_ERR("Mem alloc failure; Container name");
100 fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ?
101 RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU;
103 /* get group number */
104 ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
105 fslmc_container, groupid);
107 DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container);
111 DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
112 fslmc_container, *groupid);
118 vfio_connect_container(void)
122 if (vfio_container.used) {
123 DPAA2_BUS_DEBUG("No container available");
127 /* Try connecting to vfio container if already created */
128 if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
129 &vfio_container.fd)) {
131 "Container pre-exists with FD[0x%x] for this group",
133 vfio_group.container = &vfio_container;
137 /* Opens main vfio file descriptor which represents the "container" */
138 fd = rte_vfio_get_container_fd();
140 DPAA2_BUS_ERR("Failed to open VFIO container");
144 /* Check whether support for SMMU type IOMMU present or not */
145 if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) {
146 /* Connect group to container */
147 ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
149 DPAA2_BUS_ERR("Failed to setup group container");
154 ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type);
156 DPAA2_BUS_ERR("Failed to setup VFIO iommu");
161 DPAA2_BUS_ERR("No supported IOMMU available");
166 vfio_container.used = 1;
167 vfio_container.fd = fd;
168 vfio_container.group = &vfio_group;
169 vfio_group.container = &vfio_container;
174 static int vfio_map_irq_region(struct fslmc_vfio_group *group)
177 unsigned long *vaddr = NULL;
178 struct vfio_iommu_type1_dma_map map = {
179 .argsz = sizeof(map),
180 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
186 vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
187 PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
188 if (vaddr == MAP_FAILED) {
189 DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
193 msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
194 map.vaddr = (unsigned long)vaddr;
195 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
199 DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
203 static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
204 static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
207 fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
208 void *arg __rte_unused)
210 struct rte_memseg_list *msl;
211 struct rte_memseg *ms;
212 size_t cur_len = 0, map_len = 0;
214 rte_iova_t iova_addr;
217 msl = rte_mem_virt2memseg_list(addr);
219 while (cur_len < len) {
220 const void *va = RTE_PTR_ADD(addr, cur_len);
222 ms = rte_mem_virt2memseg(va, msl);
223 iova_addr = ms->iova;
224 virt_addr = ms->addr_64;
227 DPAA2_BUS_DEBUG("Request for %s, va=%p, "
228 "virt_addr=0x%" PRIx64 ", "
229 "iova=0x%" PRIx64 ", map_len=%zu",
230 type == RTE_MEM_EVENT_ALLOC ?
232 va, virt_addr, iova_addr, map_len);
234 /* iova_addr may be set to RTE_BAD_IOVA */
235 if (iova_addr == RTE_BAD_IOVA) {
236 DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
241 if (type == RTE_MEM_EVENT_ALLOC)
242 ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
244 ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
247 DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
248 "Map=%d, addr=%p, len=%zu, err:(%d)",
249 type, va, map_len, ret);
256 if (type == RTE_MEM_EVENT_ALLOC)
257 DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
260 DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
265 fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
267 struct fslmc_vfio_group *group;
268 struct vfio_iommu_type1_dma_map dma_map = {
269 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
270 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
274 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
275 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
280 dma_map.vaddr = vaddr;
282 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
283 dma_map.iova = iovaddr;
285 dma_map.iova = dma_map.vaddr;
288 /* SET DMA MAP for IOMMU */
291 if (!group->container) {
292 DPAA2_BUS_ERR("Container is not connected ");
296 DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
297 (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
298 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
300 DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
309 fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
311 struct fslmc_vfio_group *group;
312 struct vfio_iommu_type1_dma_unmap dma_unmap = {
313 .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
318 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
319 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
323 dma_unmap.size = len;
324 dma_unmap.iova = vaddr;
326 /* SET DMA MAP for IOMMU */
329 if (!group->container) {
330 DPAA2_BUS_ERR("Container is not connected ");
334 DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
335 (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
336 ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
338 DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
347 fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
348 const struct rte_memseg *ms, void *arg)
353 /* if IOVA address is invalid, skip */
354 if (ms->iova == RTE_BAD_IOVA)
357 ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
359 DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
368 rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
371 struct fslmc_vfio_group *group;
372 struct vfio_iommu_type1_dma_map dma_map = {
373 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
374 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
377 if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
378 DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
382 /* SET DMA MAP for IOMMU */
384 if (!group->container) {
385 DPAA2_BUS_ERR("Container is not connected");
390 dma_map.vaddr = vaddr;
393 DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n",
394 (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova,
395 (uint64_t)dma_map.size);
396 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
399 printf("Unable to map DMA address (errno = %d)\n",
407 int rte_fslmc_vfio_dmamap(void)
411 /* Lock before parsing and registering callback to memory subsystem */
412 rte_mcfg_mem_read_lock();
414 if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
415 rte_mcfg_mem_read_unlock();
419 ret = rte_mem_event_callback_register("fslmc_memevent_clb",
420 fslmc_memevent_cb, NULL);
421 if (ret && rte_errno == ENOTSUP)
422 DPAA2_BUS_DEBUG("Memory event callbacks not supported");
424 DPAA2_BUS_DEBUG("Unable to install memory handler");
426 DPAA2_BUS_DEBUG("Installed memory callback handler");
428 DPAA2_BUS_DEBUG("Total %d segments found.", i);
430 /* TODO - This is a W.A. as VFIO currently does not add the mapping of
431 * the interrupt region to SMMU. This should be removed once the
432 * support is added in the Kernel.
434 vfio_map_irq_region(&vfio_group);
436 /* Existing segments have been mapped and memory callback for hotplug
437 * has been installed.
439 rte_mcfg_mem_read_unlock();
445 fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
446 int *vfio_dev_fd, struct vfio_device_info *device_info)
448 struct vfio_group_status group_status = {
449 .argsz = sizeof(group_status)
451 int vfio_group_fd, vfio_container_fd, iommu_group_no, ret;
453 /* get group number */
454 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no);
458 /* get the actual group fd */
459 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_no);
460 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
464 * if vfio_group_fd == -ENOENT, that means the device
465 * isn't managed by VFIO
467 if (vfio_group_fd == -ENOENT) {
468 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
473 /* Opens main vfio file descriptor which represents the "container" */
474 vfio_container_fd = rte_vfio_get_container_fd();
475 if (vfio_container_fd < 0) {
476 DPAA2_BUS_ERR("Failed to open VFIO container");
480 /* check if the group is viable */
481 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
483 DPAA2_BUS_ERR(" %s cannot get group status, "
484 "error %i (%s)\n", dev_addr,
485 errno, strerror(errno));
486 close(vfio_group_fd);
487 rte_vfio_clear_group(vfio_group_fd);
489 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
490 DPAA2_BUS_ERR(" %s VFIO group is not viable!\n", dev_addr);
491 close(vfio_group_fd);
492 rte_vfio_clear_group(vfio_group_fd);
495 /* At this point, we know that this group is viable (meaning,
496 * all devices are either bound to VFIO or not bound to anything)
499 /* check if group does not have a container yet */
500 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
502 /* add group to a container */
503 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
506 DPAA2_BUS_ERR(" %s cannot add VFIO group to container, "
507 "error %i (%s)\n", dev_addr,
508 errno, strerror(errno));
509 close(vfio_group_fd);
510 close(vfio_container_fd);
511 rte_vfio_clear_group(vfio_group_fd);
516 * set an IOMMU type for container
519 if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
521 ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
524 DPAA2_BUS_ERR("Failed to setup VFIO iommu");
525 close(vfio_group_fd);
526 close(vfio_container_fd);
530 DPAA2_BUS_ERR("No supported IOMMU available");
531 close(vfio_group_fd);
532 close(vfio_container_fd);
537 /* get a file descriptor for the device */
538 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
539 if (*vfio_dev_fd < 0) {
540 /* if we cannot get a device fd, this implies a problem with
541 * the VFIO group or the container not having IOMMU configured.
544 DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr);
545 close(vfio_group_fd);
546 close(vfio_container_fd);
547 rte_vfio_clear_group(vfio_group_fd);
551 /* test and setup the device */
552 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
554 DPAA2_BUS_ERR(" %s cannot get device info, error %i (%s)",
555 dev_addr, errno, strerror(errno));
557 close(vfio_group_fd);
558 close(vfio_container_fd);
559 rte_vfio_clear_group(vfio_group_fd);
566 static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
568 intptr_t v_addr = (intptr_t)MAP_FAILED;
570 struct vfio_group_status status = { .argsz = sizeof(status) };
572 struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
573 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
575 fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj,
578 /* getting device region info*/
579 ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
581 DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
585 v_addr = (size_t)mmap(NULL, reg_info.size,
586 PROT_WRITE | PROT_READ, MAP_SHARED,
587 mc_fd, reg_info.offset);
595 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
597 int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
600 char irq_set_buf[IRQ_SET_BUF_LEN];
601 struct vfio_irq_set *irq_set;
604 len = sizeof(irq_set_buf);
606 irq_set = (struct vfio_irq_set *)irq_set_buf;
607 irq_set->argsz = len;
610 VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
611 irq_set->index = index;
613 fd_ptr = (int *)&irq_set->data;
614 *fd_ptr = intr_handle->fd;
616 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
618 DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
619 intr_handle->fd, errno, strerror(errno));
626 int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
628 struct vfio_irq_set *irq_set;
629 char irq_set_buf[IRQ_SET_BUF_LEN];
632 len = sizeof(struct vfio_irq_set);
634 irq_set = (struct vfio_irq_set *)irq_set_buf;
635 irq_set->argsz = len;
636 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
637 irq_set->index = index;
641 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
644 "Error disabling dpaa2 interrupts for fd %d",
650 /* set up interrupt support (but not enable interrupts) */
652 rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
658 /* start from MSI-X interrupt type */
659 for (i = 0; i < num_irqs; i++) {
660 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
665 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
667 DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
668 i, errno, strerror(errno));
672 /* if this vector cannot be used with eventfd,
673 * fail if we explicitly
674 * specified interrupt type, otherwise continue
676 if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
679 /* set up an eventfd for interrupts */
680 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
682 DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
683 errno, strerror(errno));
687 intr_handle->fd = fd;
688 intr_handle->type = RTE_INTR_HANDLE_VFIO_MSI;
689 intr_handle->vfio_dev_fd = vfio_dev_fd;
694 /* if we're here, we haven't found a suitable interrupt vector */
699 * fslmc_process_iodevices for processing only IO (ETH, CRYPTO, and possibly
703 fslmc_process_iodevices(struct rte_dpaa2_device *dev)
706 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
707 struct rte_dpaa2_object *object = NULL;
709 fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name,
710 &dev_fd, &device_info);
712 switch (dev->dev_type) {
714 rte_dpaa2_vfio_setup_intr(&dev->intr_handle, dev_fd,
715 device_info.num_irqs);
723 TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
724 if (dev->dev_type == object->dev_type)
725 object->create(dev_fd, &device_info,
735 DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
741 fslmc_process_mcp(struct rte_dpaa2_device *dev)
745 struct fsl_mc_io dpmng = {0};
746 struct mc_version mc_ver_info = {0};
748 rte_mcp_ptr_list = malloc(sizeof(void *) * (MC_PORTAL_INDEX + 1));
749 if (!rte_mcp_ptr_list) {
750 DPAA2_BUS_ERR("Unable to allocate MC portal memory");
755 v_addr = vfio_map_mcp_obj(dev->device.name);
756 if (v_addr == (intptr_t)MAP_FAILED) {
757 DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
762 /* check the MC version compatibility */
763 dpmng.regs = (void *)v_addr;
765 /* In case of secondary processes, MC version check is no longer
768 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
769 rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
773 if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
774 DPAA2_BUS_ERR("Unable to obtain MC version");
779 if ((mc_ver_info.major != MC_VER_MAJOR) ||
780 (mc_ver_info.minor < MC_VER_MINOR)) {
781 DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
782 " Expected %d.%d.x, Detected %d.%d.%d",
783 MC_VER_MAJOR, MC_VER_MINOR,
784 mc_ver_info.major, mc_ver_info.minor,
785 mc_ver_info.revision);
789 rte_mcp_ptr_list[MC_PORTAL_INDEX] = (void *)v_addr;
794 if (rte_mcp_ptr_list) {
795 free(rte_mcp_ptr_list);
796 rte_mcp_ptr_list = NULL;
803 fslmc_vfio_process_group(void)
806 int found_mportal = 0;
807 struct rte_dpaa2_device *dev, *dev_temp;
808 bool is_dpmcp_in_blocklist = false, is_dpio_in_blocklist = false;
809 int dpmcp_count = 0, dpio_count = 0, current_device;
811 TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
812 if (dev->dev_type == DPAA2_MPORTAL) {
814 if (dev->device.devargs &&
815 dev->device.devargs->policy == RTE_DEV_BLACKLISTED)
816 is_dpmcp_in_blocklist = true;
818 if (dev->dev_type == DPAA2_IO) {
820 if (dev->device.devargs &&
821 dev->device.devargs->policy == RTE_DEV_BLACKLISTED)
822 is_dpio_in_blocklist = true;
826 /* Search the MCP as that should be initialized first. */
828 TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
829 if (dev->dev_type == DPAA2_MPORTAL) {
831 if (dev->device.devargs &&
832 dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
833 DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
835 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
840 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
841 !is_dpmcp_in_blocklist) {
842 if (dpmcp_count == 1 ||
843 current_device != dpmcp_count) {
844 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
850 if (!found_mportal) {
851 ret = fslmc_process_mcp(dev);
853 DPAA2_BUS_ERR("Unable to map MC Portal");
859 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
862 /* Ideally there is only a single dpmcp, but in case
863 * multiple exists, looping on remaining devices.
868 /* Cannot continue if there is not even a single mportal */
869 if (!found_mportal) {
870 DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
875 TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
876 if (dev->dev_type == DPAA2_IO)
878 if (dev->device.devargs &&
879 dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
880 DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
882 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
885 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
886 dev->dev_type != DPAA2_ETH &&
887 dev->dev_type != DPAA2_CRYPTO &&
888 dev->dev_type != DPAA2_QDMA &&
889 dev->dev_type != DPAA2_IO) {
890 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
893 switch (dev->dev_type) {
897 ret = fslmc_process_iodevices(dev);
899 DPAA2_BUS_DEBUG("Dev (%s) init failed",
909 /* IN case of secondary processes, all control objects
910 * like dpbp, dpcon, dpci are not initialized/required
911 * - all of these are assumed to be initialized and made
912 * available by primary.
914 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
917 /* Call the object creation routine and remove the
918 * device entry from device list
920 ret = fslmc_process_iodevices(dev);
922 DPAA2_BUS_DEBUG("Dev (%s) init failed",
929 if (!is_dpio_in_blocklist && dpio_count > 1) {
930 if (rte_eal_process_type() == RTE_PROC_SECONDARY
931 && current_device != dpio_count) {
932 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
936 if (rte_eal_process_type() == RTE_PROC_PRIMARY
937 && current_device == dpio_count) {
938 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
944 ret = fslmc_process_iodevices(dev);
946 DPAA2_BUS_DEBUG("Dev (%s) init failed",
954 /* Unknown - ignore */
955 DPAA2_BUS_DEBUG("Found unknown device (%s)",
957 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
967 fslmc_vfio_setup_group(void)
971 struct vfio_group_status status = { .argsz = sizeof(status) };
973 /* if already done once */
974 if (container_device_fd)
977 ret = fslmc_get_container_group(&groupid);
981 /* In case this group was already opened, continue without any
984 if (vfio_group.groupid == groupid) {
985 DPAA2_BUS_ERR("groupid already exists %d", groupid);
989 /* Get the actual group fd */
990 ret = rte_vfio_get_group_fd(groupid);
995 /* Check group viability */
996 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
998 DPAA2_BUS_ERR("VFIO error getting group status");
999 close(vfio_group.fd);
1000 rte_vfio_clear_group(vfio_group.fd);
1004 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1005 DPAA2_BUS_ERR("VFIO group not viable");
1006 close(vfio_group.fd);
1007 rte_vfio_clear_group(vfio_group.fd);
1010 /* Since Group is VIABLE, Store the groupid */
1011 vfio_group.groupid = groupid;
1013 /* check if group does not have a container yet */
1014 if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
1015 /* Now connect this IOMMU group to given container */
1016 ret = vfio_connect_container();
1019 "Error connecting container with groupid %d",
1021 close(vfio_group.fd);
1022 rte_vfio_clear_group(vfio_group.fd);
1027 /* Get Device information */
1028 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container);
1030 DPAA2_BUS_ERR("Error getting device %s fd from group %d",
1031 fslmc_container, vfio_group.groupid);
1032 close(vfio_group.fd);
1033 rte_vfio_clear_group(vfio_group.fd);
1036 container_device_fd = ret;
1037 DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
1038 container_device_fd);