4 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/types.h>
41 #include <sys/ioctl.h>
43 #include <sys/types.h>
48 #include <sys/eventfd.h>
51 #include <rte_ethdev.h>
52 #include <rte_malloc.h>
53 #include <rte_memcpy.h>
54 #include <rte_string_fns.h>
55 #include <rte_cycles.h>
56 #include <rte_kvargs.h>
58 #include <rte_ethdev.h>
61 #include "rte_fslmc.h"
62 #include "fslmc_vfio.h"
64 #include "portal/dpaa2_hw_pvt.h"
65 #include "portal/dpaa2_hw_dpio.h"
67 #define VFIO_MAX_CONTAINERS 1
69 #define FSLMC_VFIO_LOG(level, fmt, args...) \
70 RTE_LOG(level, EAL, "%s(): " fmt "\n", __func__, ##args)
72 /** Pathname of FSL-MC devices directory. */
73 #define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
75 /* Number of VFIO containers & groups with in */
76 static struct fslmc_vfio_group vfio_groups[VFIO_MAX_GRP];
77 static struct fslmc_vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
78 static int container_device_fd;
79 static uint32_t *msi_intr_vaddr;
80 void *(*rte_mcp_ptr_list);
81 static uint32_t mcp_id;
82 static int is_dma_done;
84 static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
86 struct fslmc_vfio_container *container;
89 /* Try connecting to vfio container if already created */
90 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
91 container = &vfio_containers[i];
92 if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
94 FSLMC_VFIO_LOG(INFO, "Container pre-exists with"
95 " FD[0x%x] for this group",
97 vfio_group->container = container;
102 /* Opens main vfio file descriptor which represents the "container" */
103 fd = vfio_get_container_fd();
105 FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container");
109 /* Check whether support for SMMU type IOMMU present or not */
110 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
111 /* Connect group to container */
112 ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
114 FSLMC_VFIO_LOG(ERR, "Failed to setup group container");
119 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
121 FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu");
126 FSLMC_VFIO_LOG(ERR, "No supported IOMMU available");
132 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
133 if (vfio_containers[i].used)
135 FSLMC_VFIO_LOG(DEBUG, "Unused container at index %d", i);
136 container = &vfio_containers[i];
139 FSLMC_VFIO_LOG(ERR, "No free container found");
146 container->group_list[container->index] = vfio_group;
147 vfio_group->container = container;
152 static int vfio_map_irq_region(struct fslmc_vfio_group *group)
155 unsigned long *vaddr = NULL;
156 struct vfio_iommu_type1_dma_map map = {
157 .argsz = sizeof(map),
158 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
164 vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
165 PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
166 if (vaddr == MAP_FAILED) {
167 FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno);
171 msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
172 map.vaddr = (unsigned long)vaddr;
173 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
177 FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno);
181 int vfio_dmamap_mem_region(uint64_t vaddr,
185 struct fslmc_vfio_group *group;
186 struct vfio_iommu_type1_dma_map dma_map = {
187 .argsz = sizeof(dma_map),
188 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
191 dma_map.vaddr = vaddr;
195 /* SET DMA MAP for IOMMU */
196 group = &vfio_groups[0];
197 if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
198 FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA (errno = %d)", errno);
204 int rte_fslmc_vfio_dmamap(void)
207 struct fslmc_vfio_group *group;
208 struct vfio_iommu_type1_dma_map dma_map = {
209 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
210 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
214 const struct rte_memseg *memseg;
220 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
221 memseg = rte_eal_get_physmem_layout();
222 if (memseg == NULL) {
223 FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
227 if (memseg[i].addr == NULL && memseg[i].len == 0)
230 dma_map.size = memseg[i].len;
231 dma_map.vaddr = memseg[i].addr_64;
232 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
233 dma_map.iova = memseg[i].phys_addr;
235 dma_map.iova = dma_map.vaddr;
238 /* SET DMA MAP for IOMMU */
239 group = &vfio_groups[0];
241 if (!group->container) {
242 FSLMC_VFIO_LOG(ERR, "Container is not connected ");
246 FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
248 FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX\n", dma_map.size);
249 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
252 FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API"
253 "(errno = %d)", errno);
256 FSLMC_VFIO_LOG(DEBUG, "-----> dma_map.vaddr = 0x%llX",
260 /* TODO - This is a W.A. as VFIO currently does not add the mapping of
261 * the interrupt region to SMMU. This should be removed once the
262 * support is added in the Kernel.
264 vfio_map_irq_region(group);
269 static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
271 int64_t v_addr = (int64_t)MAP_FAILED;
274 struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
275 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
277 /* getting the mcp object's fd*/
278 mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
280 FSLMC_VFIO_LOG(ERR, "error in VFIO get device %s fd from group"
281 " %d", mcp_obj, group->fd);
285 /* getting device info*/
286 ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
288 FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO");
292 /* getting device region info*/
293 ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
295 FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO");
299 FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
300 reg_info.offset, reg_info.size);
302 v_addr = (uint64_t)mmap(NULL, reg_info.size,
303 PROT_WRITE | PROT_READ, MAP_SHARED,
304 mc_fd, reg_info.offset);
313 dpaa2_compare_dpaa2_dev(const struct rte_dpaa2_device *dev,
314 const struct rte_dpaa2_device *dev2)
316 /*not the same family device */
317 if (dev->dev_type != DPAA2_MC_DPNI_DEVID ||
318 dev->dev_type != DPAA2_MC_DPSECI_DEVID)
321 if (dev->object_id == dev2->object_id)
328 fslmc_bus_add_device(struct rte_dpaa2_device *dev)
330 struct rte_fslmc_device_list *dev_l;
332 dev_l = &rte_fslmc_bus.device_list;
334 /* device is valid, add in list (sorted) */
335 if (TAILQ_EMPTY(dev_l)) {
336 TAILQ_INSERT_TAIL(dev_l, dev, next);
338 struct rte_dpaa2_device *dev2;
341 TAILQ_FOREACH(dev2, dev_l, next) {
342 ret = dpaa2_compare_dpaa2_dev(dev, dev2);
346 TAILQ_INSERT_BEFORE(dev2, dev, next);
350 TAILQ_INSERT_TAIL(dev_l, dev, next);
354 /* Following function shall fetch total available list of MC devices
355 * from VFIO container & populate private list of devices and other
358 int fslmc_vfio_process_group(void)
360 struct fslmc_vfio_device *vdev;
361 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
362 char *temp_obj, *object_type, *mcp_obj, *dev_name;
363 int32_t object_id, i, dev_fd, ret;
369 int dpio_count = 0, dpbp_count = 0;
370 struct fslmc_vfio_group *group = &vfio_groups[0];
371 static int process_once;
373 /* if already done once */
375 FSLMC_VFIO_LOG(DEBUG, "Already scanned once - re-scan "
381 sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid);
385 FSLMC_VFIO_LOG(ERR, "Unable to open directory %s", path);
389 /*Counting the number of devices in a group and getting the mcp ID*/
392 while ((dir = readdir(d)) != NULL) {
393 if (dir->d_type == DT_LNK) {
395 if (!strncmp("dpmcp", dir->d_name, 5)) {
398 mcp_obj = malloc(sizeof(dir->d_name));
400 FSLMC_VFIO_LOG(ERR, "mcp obj:Unable to"
405 strcpy(mcp_obj, dir->d_name);
406 temp_obj = strtok(dir->d_name, ".");
407 temp_obj = strtok(NULL, ".");
408 sscanf(temp_obj, "%d", &mcp_id);
415 FSLMC_VFIO_LOG(ERR, "DPAA2 MCP Object not Found");
418 RTE_LOG(INFO, EAL, "fslmc: DPRC contains = %d devices\n", ndev_count);
420 /* Allocate the memory depends upon number of objects in a group*/
421 group->vfio_device = (struct fslmc_vfio_device *)malloc(ndev_count *
422 sizeof(struct fslmc_vfio_device));
423 if (!(group->vfio_device)) {
424 FSLMC_VFIO_LOG(ERR, "vfio device: Unable to allocate memory\n");
429 /* Allocate memory for MC Portal list */
430 rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
431 if (!rte_mcp_ptr_list) {
432 FSLMC_VFIO_LOG(ERR, "portal list: Unable to allocate memory!");
437 v_addr = vfio_map_mcp_obj(group, mcp_obj);
439 if (v_addr == (int64_t)MAP_FAILED) {
440 FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)", errno);
444 FSLMC_VFIO_LOG(DEBUG, "DPAA2 MC has VIR_ADD = %ld", v_addr);
446 rte_mcp_ptr_list[0] = (void *)v_addr;
450 FSLMC_VFIO_LOG(ERR, "Unable to open %s Directory", path);
455 FSLMC_VFIO_LOG(DEBUG, "DPAA2 - Parsing devices:");
456 /* Parsing each object and initiating them*/
457 while ((dir = readdir(d)) != NULL) {
458 if (dir->d_type != DT_LNK)
460 if (!strncmp("dprc", dir->d_name, 4) ||
461 !strncmp("dpmcp", dir->d_name, 5))
463 dev_name = malloc(sizeof(dir->d_name));
465 FSLMC_VFIO_LOG(ERR, "name: Unable to allocate memory");
468 strcpy(dev_name, dir->d_name);
469 object_type = strtok(dir->d_name, ".");
470 temp_obj = strtok(NULL, ".");
471 sscanf(temp_obj, "%d", &object_id);
472 FSLMC_VFIO_LOG(DEBUG, " - %s ", dev_name);
474 /* getting the device fd*/
475 dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
477 FSLMC_VFIO_LOG(ERR, "VFIO_GROUP_GET_DEVICE_FD error"
478 " Device fd: %s, Group: %d",
479 dev_name, group->fd);
485 vdev = &group->vfio_device[group->object_index++];
489 /* Get Device inofrmation */
490 if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) {
491 FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
494 if (!strcmp(object_type, "dpni") ||
495 !strcmp(object_type, "dpseci")) {
496 struct rte_dpaa2_device *dev;
498 dev = malloc(sizeof(struct rte_dpaa2_device));
502 memset(dev, 0, sizeof(*dev));
503 /* store hw_id of dpni/dpseci device */
504 dev->object_id = object_id;
505 dev->dev_type = (strcmp(object_type, "dpseci")) ?
506 DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
508 FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added [%s-%d]\n",
509 object_type, object_id);
511 fslmc_bus_add_device(dev);
513 if (!strcmp(object_type, "dpio")) {
514 ret = dpaa2_create_dpio_device(vdev,
520 if (!strcmp(object_type, "dpbp")) {
521 ret = dpaa2_create_dpbp_device(object_id);
528 ret = dpaa2_affine_qbman_swp();
530 FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
532 FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added dpbp_count = %d dpio_count=%d\n",
533 dpbp_count, dpio_count);
539 if (rte_mcp_ptr_list) {
540 free(rte_mcp_ptr_list);
541 rte_mcp_ptr_list = NULL;
544 free(group->vfio_device);
545 group->vfio_device = NULL;
549 int fslmc_vfio_setup_group(void)
551 struct fslmc_vfio_group *group = NULL;
555 struct vfio_group_status status = { .argsz = sizeof(status) };
557 /* if already done once */
558 if (container_device_fd)
561 container = getenv("DPRC");
563 if (container == NULL) {
564 FSLMC_VFIO_LOG(ERR, "VFIO container not set in env DPRC");
568 /* get group number */
569 ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, container, &groupid);
571 RTE_LOG(WARNING, EAL, "%s not managed by VFIO, skipping\n",
576 /* if negative, something failed */
580 FSLMC_VFIO_LOG(DEBUG, "VFIO iommu group id = %d", groupid);
582 /* Check if group already exists */
583 for (i = 0; i < VFIO_MAX_GRP; i++) {
584 group = &vfio_groups[i];
585 if (group->groupid == groupid) {
586 FSLMC_VFIO_LOG(ERR, "groupid already exists %d",
592 /* get the actual group fd */
593 ret = vfio_get_group_fd(groupid);
599 * at this point, we know that this group is viable (meaning,
600 * all devices are either bound to VFIO or not bound to anything)
603 ret = ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status);
605 FSLMC_VFIO_LOG(ERR, " VFIO error getting group status");
610 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
611 FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
615 /* Since Group is VIABLE, Store the groupid */
616 group->groupid = groupid;
618 /* check if group does not have a container yet */
619 if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
620 /* Now connect this IOMMU group to given container */
621 ret = vfio_connect_container(group);
623 FSLMC_VFIO_LOG(ERR, "VFIO error connecting container"
624 " with groupid %d", groupid);
630 /* Get Device information */
631 ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, container);
633 FSLMC_VFIO_LOG(ERR, "VFIO error getting device %s fd from"
634 " group %d", container, group->groupid);
637 container_device_fd = ret;
638 FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]",
639 container_device_fd);