* BSD LICENSE
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <libgen.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
#include <rte_bus.h>
#include "rte_fslmc.h"
#include "fslmc_vfio.h"
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
#define VFIO_MAX_CONTAINERS 1
#define FSLMC_VFIO_LOG(level, fmt, args...) \
static struct fslmc_vfio_group vfio_groups[VFIO_MAX_GRP];
static struct fslmc_vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
static int container_device_fd;
+static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
static uint32_t mcp_id;
+static int is_dma_done;
+static struct rte_fslmc_object_list fslmc_obj_list =
+ TAILQ_HEAD_INITIALIZER(fslmc_obj_list);
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_object_register(struct rte_dpaa2_object *object)
+{
+ RTE_VERIFY(object);
+
+ TAILQ_INSERT_TAIL(&fslmc_obj_list, object, next);
+}
static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
{
container = &vfio_containers[i];
if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
&container->fd)) {
- FSLMC_VFIO_LOG(INFO, "Container pre-exists with"
- " FD[0x%x] for this group",
- container->fd);
+ FSLMC_VFIO_LOG(INFO,
+ "Container pre-exists with FD[0x%x] for this group",
+ container->fd);
vfio_group->container = container;
return 0;
}
for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
if (vfio_containers[i].used)
continue;
- FSLMC_VFIO_LOG(DEBUG, "Unused container at index %d", i);
container = &vfio_containers[i];
}
if (!container) {
return 0;
}
-int vfio_dmamap_mem_region(uint64_t vaddr,
- uint64_t iova,
- uint64_t size)
+static int vfio_map_irq_region(struct fslmc_vfio_group *group)
+{
+ int ret;
+ unsigned long *vaddr = NULL;
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = 0x6030000,
+ .iova = 0x6030000,
+ .size = 0x1000,
+ };
+
+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
+ if (vaddr == MAP_FAILED) {
+ FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno);
+ return -errno;
+ }
+
+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
+ map.vaddr = (unsigned long)vaddr;
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
+ if (ret == 0)
+ return 0;
+
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno);
+ return -errno;
+}
+
+int rte_fslmc_vfio_dmamap(void)
{
+ int ret;
struct fslmc_vfio_group *group;
struct vfio_iommu_type1_dma_map dma_map = {
- .argsz = sizeof(dma_map),
+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
};
- dma_map.vaddr = vaddr;
- dma_map.size = size;
- dma_map.iova = iova;
+ int i;
+ const struct rte_memseg *memseg;
+
+ if (is_dma_done)
+ return 0;
+
+ memseg = rte_eal_get_physmem_layout();
+ if (memseg == NULL) {
+ FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if (memseg[i].addr == NULL && memseg[i].len == 0) {
+ FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i);
+ break;
+ }
+
+ dma_map.size = memseg[i].len;
+ dma_map.vaddr = memseg[i].addr_64;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = memseg[i].phys_addr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_groups[0];
+
+ if (!group->container) {
+ FSLMC_VFIO_LOG(ERR, "Container is not connected ");
+ return -1;
+ }
+
+ FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
+ dma_map.vaddr);
+ FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
+ &dma_map);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
+ return ret;
+ }
+ }
- /* SET DMA MAP for IOMMU */
- group = &vfio_groups[0];
- if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA (errno = %d)", errno);
+ /* Verifying that at least single segment is available */
+ if (i <= 0) {
+ FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping");
return -1;
}
+
+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
+ * the interrupt region to SMMU. This should be removed once the
+ * support is added in the Kernel.
+ */
+ vfio_map_irq_region(group);
+
+ is_dma_done = 1;
+
return 0;
}
/* getting the mcp object's fd*/
mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
if (mc_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO get device %s fd from group"
- " %d", mcp_obj, group->fd);
+ FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
return v_addr;
}
}
FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
+ reg_info.offset, reg_info.size);
v_addr = (uint64_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
return v_addr;
}
+static inline int
+dpaa2_compare_dpaa2_dev(const struct rte_dpaa2_device *dev,
+ const struct rte_dpaa2_device *dev2)
+{
+ /*not the same family device */
+ if (dev->dev_type != DPAA2_MC_DPNI_DEVID ||
+ dev->dev_type != DPAA2_MC_DPSECI_DEVID)
+ return -1;
+
+ if (dev->object_id == dev2->object_id)
+ return 0;
+ else
+ return 1;
+}
+
+static void
+fslmc_bus_add_device(struct rte_dpaa2_device *dev)
+{
+ struct rte_fslmc_device_list *dev_l;
+
+ dev_l = &rte_fslmc_bus.device_list;
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(dev_l)) {
+ TAILQ_INSERT_TAIL(dev_l, dev, next);
+ } else {
+ struct rte_dpaa2_device *dev2;
+ int ret;
+
+ TAILQ_FOREACH(dev2, dev_l, next) {
+ ret = dpaa2_compare_dpaa2_dev(dev, dev2);
+ if (ret <= 0)
+ continue;
+
+ TAILQ_INSERT_BEFORE(dev2, dev, next);
+ return;
+ }
+
+ TAILQ_INSERT_TAIL(dev_l, dev, next);
+ }
+}
+
+#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
+
+int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle,
+ uint32_t index)
+{
+ struct vfio_irq_set *irq_set;
+ char irq_set_buf[IRQ_SET_BUF_LEN];
+ int *fd_ptr, fd, ret;
+
+ /* Prepare vfio_irq_set structure and SET the IRQ in VFIO */
+ /* Give the eventfd to VFIO */
+ fd = eventfd(0, 0);
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = index;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ *fd_ptr = fd;
+
+ ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "Unable to set IRQ in VFIO, ret: %d\n",
+ ret);
+ return -1;
+ }
+
+ /* Set the FD and update the flags */
+ intr_handle->fd = fd;
+ return 0;
+}
+
/* Following function shall fetch total available list of MC devices
* from VFIO container & populate private list of devices and other
* data structures
{
struct fslmc_vfio_device *vdev;
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
- char *temp_obj, *object_type __rte_unused, *mcp_obj, *dev_name;
- int32_t object_id, i, dev_fd;
+ char *temp_obj, *object_type, *mcp_obj, *dev_name;
+ int32_t object_id, i, dev_fd, ret;
DIR *d;
struct dirent *dir;
char path[PATH_MAX];
/* if already done once */
if (process_once) {
- FSLMC_VFIO_LOG(DEBUG, "Already scanned once - re-scan "
- "not supported");
+ FSLMC_VFIO_LOG(DEBUG,
+ "Already scanned once - re-scan not supported");
return 0;
}
process_once = 0;
free(mcp_obj);
mcp_obj = malloc(sizeof(dir->d_name));
if (!mcp_obj) {
- FSLMC_VFIO_LOG(ERR, "mcp obj:Unable to"
- " allocate memory");
+ FSLMC_VFIO_LOG(ERR,
+ "mcp obj:alloc failed");
closedir(d);
return -ENOMEM;
}
goto FAILURE;
}
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 MC has VIR_ADD = %ld", v_addr);
-
rte_mcp_ptr_list[0] = (void *)v_addr;
d = opendir(path);
}
i = 0;
- FSLMC_VFIO_LOG(DEBUG, "DPAA2 - Parsing devices:");
/* Parsing each object and initiating them*/
while ((dir = readdir(d)) != NULL) {
if (dir->d_type != DT_LNK)
object_type = strtok(dir->d_name, ".");
temp_obj = strtok(NULL, ".");
sscanf(temp_obj, "%d", &object_id);
- FSLMC_VFIO_LOG(DEBUG, " - %s ", dev_name);
/* getting the device fd*/
dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
if (dev_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "VFIO_GROUP_GET_DEVICE_FD error"
- " Device fd: %s, Group: %d",
- dev_name, group->fd);
+ FSLMC_VFIO_LOG(ERR,
+ "GET_DEVICE_FD error fd: %s, Group: %d",
+ dev_name, group->fd);
free(dev_name);
goto FAILURE;
}
FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
goto FAILURE;
}
+ if (!strcmp(object_type, "dpni") ||
+ !strcmp(object_type, "dpseci")) {
+ struct rte_dpaa2_device *dev;
+
+ dev = malloc(sizeof(struct rte_dpaa2_device));
+ if (dev == NULL)
+ return -1;
+
+ memset(dev, 0, sizeof(*dev));
+ /* store hw_id of dpni/dpseci device */
+ dev->object_id = object_id;
+ dev->dev_type = (strcmp(object_type, "dpseci")) ?
+ DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
+
+ sprintf(dev->name, "%s.%d", object_type, object_id);
+ dev->device.name = dev->name;
+
+ fslmc_bus_add_device(dev);
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added %s", dev->name);
+ } else {
+ /* Parse all other objects */
+ struct rte_dpaa2_object *object;
+
+ TAILQ_FOREACH(object, &fslmc_obj_list, next) {
+ if (!strcmp(object_type, object->name))
+ object->create(vdev, &device_info,
+ object_id);
+ else
+ continue;
+ }
+ }
}
closedir(d);
+ ret = dpaa2_affine_qbman_swp();
+ if (ret)
+ FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
+
return 0;
FAILURE: