dma/idxd: create dmadev instances on PCI probe
authorKevin Laatz <kevin.laatz@intel.com>
Wed, 20 Oct 2021 16:30:02 +0000 (16:30 +0000)
committerThomas Monjalon <thomas@monjalon.net>
Fri, 22 Oct 2021 20:40:59 +0000 (22:40 +0200)
When a suitable device is found during the PCI probe, create a dmadev
instance for each HW queue. HW definitions required are also included.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
drivers/dma/idxd/idxd_hw_defs.h
drivers/dma/idxd/idxd_internal.h
drivers/dma/idxd/idxd_pci.c

index a92d462..86f7f35 100644 (file)
@@ -24,4 +24,67 @@ struct idxd_completion {
        uint32_t invalid_flags;
 } __rte_aligned(32);
 
+/*** Definitions for Intel(R) Data Streaming Accelerator  ***/
+
+#define IDXD_CMD_SHIFT 20
+enum rte_idxd_cmds {
+       idxd_enable_dev = 1,
+       idxd_disable_dev,
+       idxd_drain_all,
+       idxd_abort_all,
+       idxd_reset_device,
+       idxd_enable_wq,
+       idxd_disable_wq,
+       idxd_drain_wq,
+       idxd_abort_wq,
+       idxd_reset_wq,
+};
+
+/* General bar0 registers */
+struct rte_idxd_bar0 {
+       uint32_t __rte_cache_aligned version;    /* offset 0x00 */
+       uint64_t __rte_aligned(0x10) gencap;     /* offset 0x10 */
+       uint64_t __rte_aligned(0x10) wqcap;      /* offset 0x20 */
+       uint64_t __rte_aligned(0x10) grpcap;     /* offset 0x30 */
+       uint64_t __rte_aligned(0x08) engcap;     /* offset 0x38 */
+       uint64_t __rte_aligned(0x10) opcap;      /* offset 0x40 */
+       uint64_t __rte_aligned(0x20) offsets[2]; /* offset 0x60 */
+       uint32_t __rte_aligned(0x20) gencfg;     /* offset 0x80 */
+       uint32_t __rte_aligned(0x08) genctrl;    /* offset 0x88 */
+       uint32_t __rte_aligned(0x10) gensts;     /* offset 0x90 */
+       uint32_t __rte_aligned(0x08) intcause;   /* offset 0x98 */
+       uint32_t __rte_aligned(0x10) cmd;        /* offset 0xA0 */
+       uint32_t __rte_aligned(0x08) cmdstatus;  /* offset 0xA8 */
+       uint64_t __rte_aligned(0x20) swerror[4]; /* offset 0xC0 */
+};
+
+/* workqueue config is provided by array of uint32_t. */
+enum rte_idxd_wqcfg {
+       wq_size_idx,       /* size is in first 32-bit value */
+       wq_threshold_idx,  /* WQ threshold second 32-bits */
+       wq_mode_idx,       /* WQ mode and other flags */
+       wq_sizes_idx,      /* WQ transfer and batch sizes */
+       wq_occ_int_idx,    /* WQ occupancy interrupt handle */
+       wq_occ_limit_idx,  /* WQ occupancy limit */
+       wq_state_idx,      /* WQ state and occupancy state */
+};
+
+#define WQ_MODE_SHARED    0
+#define WQ_MODE_DEDICATED 1
+#define WQ_PRIORITY_SHIFT 4
+#define WQ_BATCH_SZ_SHIFT 5
+#define WQ_STATE_SHIFT 30
+#define WQ_STATE_MASK 0x3
+
+struct rte_idxd_grpcfg {
+       uint64_t grpwqcfg[4]  __rte_cache_aligned; /* 64-byte register set */
+       uint64_t grpengcfg;  /* offset 32 */
+       uint32_t grpflags;   /* offset 40 */
+};
+
+#define GENSTS_DEV_STATE_MASK 0x03
+#define CMDSTATUS_ACTIVE_SHIFT 31
+#define CMDSTATUS_ACTIVE_MASK (1 << 31)
+#define CMDSTATUS_ERR_MASK 0xFF
+
 #endif
index 8f1cdf6..8473bf9 100644 (file)
@@ -6,6 +6,7 @@
 #define _IDXD_INTERNAL_H_
 
 #include <rte_dmadev_pmd.h>
+#include <rte_spinlock.h>
 
 #include "idxd_hw_defs.h"
 
@@ -28,6 +29,16 @@ extern int idxd_pmd_logtype;
 #define IDXD_PMD_ERR(fmt, args...)    IDXD_PMD_LOG(ERR, fmt, ## args)
 #define IDXD_PMD_WARN(fmt, args...)   IDXD_PMD_LOG(WARNING, fmt, ## args)
 
+struct idxd_pci_common {
+       rte_spinlock_t lk;
+
+       uint8_t wq_cfg_sz;
+       volatile struct rte_idxd_bar0 *regs;
+       volatile uint32_t *wq_regs_base;
+       volatile struct rte_idxd_grpcfg *grp_regs;
+       volatile void *portals;
+};
+
 struct idxd_dmadev {
        /* counters to track the batches */
        unsigned short max_batches;
@@ -59,6 +70,8 @@ struct idxd_dmadev {
                struct {
                        unsigned int dsa_id;
                } bus;
+
+               struct idxd_pci_common *pci;
        } u;
 };
 
index 79e4aad..6d26574 100644 (file)
@@ -3,6 +3,9 @@
  */
 
 #include <rte_bus_pci.h>
+#include <rte_devargs.h>
+#include <rte_dmadev_pmd.h>
+#include <rte_malloc.h>
 
 #include "idxd_internal.h"
 
@@ -16,28 +19,290 @@ const struct rte_pci_id pci_id_idxd_map[] = {
        { .vendor_id = 0, /* sentinel */ },
 };
 
+static inline int
+idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
+{
+       uint8_t err_code;
+       uint16_t qid = idxd->qid;
+       int i = 0;
+
+       if (command >= idxd_disable_wq && command <= idxd_reset_wq)
+               qid = (1 << qid);
+       rte_spinlock_lock(&idxd->u.pci->lk);
+       idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
+
+       do {
+               rte_pause();
+               err_code = idxd->u.pci->regs->cmdstatus;
+               if (++i >= 1000) {
+                       IDXD_PMD_ERR("Timeout waiting for command response from HW");
+                       rte_spinlock_unlock(&idxd->u.pci->lk);
+                       return err_code;
+               }
+       } while (err_code & CMDSTATUS_ACTIVE_MASK);
+       rte_spinlock_unlock(&idxd->u.pci->lk);
+
+       err_code &= CMDSTATUS_ERR_MASK;
+       return -err_code;
+}
+
+static uint32_t *
+idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
+{
+       return RTE_PTR_ADD(pci->wq_regs_base,
+                       (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
+}
+
+static int
+idxd_is_wq_enabled(struct idxd_dmadev *idxd)
+{
+       uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[wq_state_idx];
+       return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
+}
+
+static int
+idxd_pci_dev_close(struct rte_dma_dev *dev)
+{
+       struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+       uint8_t err_code;
+
+       /* disable the device */
+       err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
+       if (err_code) {
+               IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
+               return err_code;
+       }
+       IDXD_PMD_DEBUG("IDXD Device disabled OK");
+
+       /* free device memory */
+       IDXD_PMD_DEBUG("Freeing device driver memory");
+       rte_free(idxd->batch_idx_ring);
+
+       return 0;
+}
+
+static const struct rte_dma_dev_ops idxd_pci_ops = {
+       .dev_close = idxd_pci_dev_close,
+};
+
+/* each portal uses 4 x 4k pages */
+#define IDXD_PORTAL_SIZE (4096 * 4)
+
+static int
+init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
+               unsigned int max_queues)
+{
+       struct idxd_pci_common *pci;
+       uint8_t nb_groups, nb_engines, nb_wqs;
+       uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
+       uint16_t wq_size, total_wq_size;
+       uint8_t lg2_max_batch, lg2_max_copy_size;
+       unsigned int i, err_code;
+
+       pci = malloc(sizeof(*pci));
+       if (pci == NULL) {
+               IDXD_PMD_ERR("%s: Can't allocate memory", __func__);
+               err_code = -1;
+               goto err;
+       }
+       rte_spinlock_init(&pci->lk);
+
+       /* assign the bar registers, and then configure device */
+       pci->regs = dev->mem_resource[0].addr;
+       grp_offset = (uint16_t)pci->regs->offsets[0];
+       pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
+       wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
+       pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
+       pci->portals = dev->mem_resource[2].addr;
+       pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
+
+       /* sanity check device status */
+       if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
+               /* need function-level-reset (FLR) or is enabled */
+               IDXD_PMD_ERR("Device status is not disabled, cannot init");
+               err_code = -1;
+               goto err;
+       }
+       if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
+               /* command in progress */
+               IDXD_PMD_ERR("Device has a command in progress, cannot init");
+               err_code = -1;
+               goto err;
+       }
+
+       /* read basic info about the hardware for use when configuring */
+       nb_groups = (uint8_t)pci->regs->grpcap;
+       nb_engines = (uint8_t)pci->regs->engcap;
+       nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
+       total_wq_size = (uint16_t)pci->regs->wqcap;
+       lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
+       lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
+
+       IDXD_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
+                       nb_groups, nb_engines, nb_wqs);
+
+       /* zero out any old config */
+       for (i = 0; i < nb_groups; i++) {
+               pci->grp_regs[i].grpengcfg = 0;
+               pci->grp_regs[i].grpwqcfg[0] = 0;
+       }
+       for (i = 0; i < nb_wqs; i++)
+               idxd_get_wq_cfg(pci, i)[0] = 0;
+
+       /* limit queues if necessary */
+       if (max_queues != 0 && nb_wqs > max_queues) {
+               nb_wqs = max_queues;
+               if (nb_engines > max_queues)
+                       nb_engines = max_queues;
+               if (nb_groups > max_queues)
+                       nb_engines = max_queues;
+               IDXD_PMD_DEBUG("Limiting queues to %u", nb_wqs);
+       }
+
+       /* put each engine into a separate group to avoid reordering */
+       if (nb_groups > nb_engines)
+               nb_groups = nb_engines;
+       if (nb_groups < nb_engines)
+               nb_engines = nb_groups;
+
+       /* assign engines to groups, round-robin style */
+       for (i = 0; i < nb_engines; i++) {
+               IDXD_PMD_DEBUG("Assigning engine %u to group %u",
+                               i, i % nb_groups);
+               pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
+       }
+
+       /* now do the same for queues and give work slots to each queue */
+       wq_size = total_wq_size / nb_wqs;
+       IDXD_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
+                       wq_size, lg2_max_batch, lg2_max_copy_size);
+       for (i = 0; i < nb_wqs; i++) {
+               /* add engine "i" to a group */
+               IDXD_PMD_DEBUG("Assigning work queue %u to group %u",
+                               i, i % nb_groups);
+               pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
+               /* now configure it, in terms of size, max batch, mode */
+               idxd_get_wq_cfg(pci, i)[wq_size_idx] = wq_size;
+               idxd_get_wq_cfg(pci, i)[wq_mode_idx] = (1 << WQ_PRIORITY_SHIFT) |
+                               WQ_MODE_DEDICATED;
+               idxd_get_wq_cfg(pci, i)[wq_sizes_idx] = lg2_max_copy_size |
+                               (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
+       }
+
+       /* dump the group configuration to output */
+       for (i = 0; i < nb_groups; i++) {
+               IDXD_PMD_DEBUG("## Group %d", i);
+               IDXD_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
+               IDXD_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
+               IDXD_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
+       }
+
+       idxd->u.pci = pci;
+       idxd->max_batches = wq_size;
+
+       /* enable the device itself */
+       err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
+       if (err_code) {
+               IDXD_PMD_ERR("Error enabling device: code %#x", err_code);
+               goto err;
+       }
+       IDXD_PMD_DEBUG("IDXD Device enabled OK");
+
+       return nb_wqs;
+
+err:
+       free(pci);
+       return err_code;
+}
+
 static int
 idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
 {
-       int ret = 0;
+       struct idxd_dmadev idxd = {0};
+       uint8_t nb_wqs;
+       int qid, ret = 0;
        char name[PCI_PRI_STR_SIZE];
+       unsigned int max_queues = 0;
 
        rte_pci_device_name(&dev->addr, name, sizeof(name));
        IDXD_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
        dev->device.driver = &drv->driver;
 
+       if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
+               /* if the number of devargs grows beyond just 1, use rte_kvargs */
+               if (sscanf(dev->device.devargs->args,
+                               "max_queues=%u", &max_queues) != 1) {
+                       IDXD_PMD_ERR("Invalid device parameter: '%s'",
+                                       dev->device.devargs->args);
+                       return -1;
+               }
+       }
+
+       ret = init_pci_device(dev, &idxd, max_queues);
+       if (ret < 0) {
+               IDXD_PMD_ERR("Error initializing PCI hardware");
+               return ret;
+       }
+       if (idxd.u.pci->portals == NULL) {
+               IDXD_PMD_ERR("Error, invalid portal assigned during initialization\n");
+               free(idxd.u.pci);
+               return -EINVAL;
+       }
+       nb_wqs = (uint8_t)ret;
+
+       /* set up one device for each queue */
+       for (qid = 0; qid < nb_wqs; qid++) {
+               char qname[32];
+
+               /* add the queue number to each device name */
+               snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
+               idxd.qid = qid;
+               idxd.portal = RTE_PTR_ADD(idxd.u.pci->portals,
+                               qid * IDXD_PORTAL_SIZE);
+               if (idxd_is_wq_enabled(&idxd))
+                       IDXD_PMD_ERR("Error, WQ %u seems enabled", qid);
+               ret = idxd_dmadev_create(qname, &dev->device,
+                               &idxd, &idxd_pci_ops);
+               if (ret != 0) {
+                       IDXD_PMD_ERR("Failed to create dmadev %s", name);
+                       if (qid == 0) /* if no devices using this, free pci */
+                               free(idxd.u.pci);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int
+idxd_dmadev_destroy(const char *name)
+{
+       int ret = 0;
+
+       /* rte_dma_close is called by pmd_release */
+       ret = rte_dma_pmd_release(name);
+       if (ret)
+               IDXD_PMD_DEBUG("Device cleanup failed");
+
        return ret;
 }
 
 static int
 idxd_dmadev_remove_pci(struct rte_pci_device *dev)
 {
+       int i = 0;
        char name[PCI_PRI_STR_SIZE];
 
        rte_pci_device_name(&dev->addr, name, sizeof(name));
 
-       IDXD_PMD_INFO("Closing %s on NUMA node %d",
-                       name, dev->device.numa_node);
+       IDXD_PMD_INFO("Closing %s on NUMA node %d", name, dev->device.numa_node);
+
+       RTE_DMA_FOREACH_DEV(i) {
+               struct rte_dma_info *info = {0};
+               rte_dma_info_get(i, info);
+               if (strncmp(name, info->dev_name, strlen(name)) == 0)
+                       idxd_dmadev_destroy(info->dev_name);
+       }
 
        return 0;
 }