net/txgbe: add Rx and Tx start and stop
[dpdk.git] / drivers / raw / ioat / idxd_pci.c
index c3fec56..99ecfbb 100644 (file)
@@ -44,16 +44,78 @@ idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
        return err_code & CMDSTATUS_ERR_MASK;
 }
 
+static uint32_t *
+idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
+{
+       return RTE_PTR_ADD(pci->wq_regs_base, wq_idx << (5 + pci->wq_cfg_sz));
+}
+
 static int
 idxd_is_wq_enabled(struct idxd_rawdev *idxd)
 {
-       uint32_t state = idxd->u.pci->wq_regs[idxd->qid].wqcfg[WQ_STATE_IDX];
+       uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
        return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
 }
 
+static void
+idxd_pci_dev_stop(struct rte_rawdev *dev)
+{
+       struct idxd_rawdev *idxd = dev->dev_private;
+       uint8_t err_code;
+
+       if (!idxd_is_wq_enabled(idxd)) {
+               IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
+               return;
+       }
+
+       err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
+       if (err_code || idxd_is_wq_enabled(idxd)) {
+               IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
+                               idxd->qid, err_code);
+               return;
+       }
+       IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
+}
+
+static int
+idxd_pci_dev_start(struct rte_rawdev *dev)
+{
+       struct idxd_rawdev *idxd = dev->dev_private;
+       uint8_t err_code;
+
+       if (idxd_is_wq_enabled(idxd)) {
+               IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
+               return 0;
+       }
+
+       if (idxd->public.batch_ring == NULL) {
+               IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
+               return -EINVAL;
+       }
+
+       err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
+       if (err_code || !idxd_is_wq_enabled(idxd)) {
+               IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
+                               idxd->qid, err_code);
+               return err_code == 0 ? -1 : err_code;
+       }
+
+       IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
+
+       return 0;
+}
+
 static const struct rte_rawdev_ops idxd_pci_ops = {
                .dev_close = idxd_rawdev_close,
-               .dev_selftest = idxd_rawdev_test,
+               .dev_selftest = ioat_rawdev_test,
+               .dump = idxd_dev_dump,
+               .dev_configure = idxd_dev_configure,
+               .dev_start = idxd_pci_dev_start,
+               .dev_stop = idxd_pci_dev_stop,
+               .dev_info_get = idxd_dev_info_get,
+               .xstats_get = ioat_xstats_get,
+               .xstats_get_names = ioat_xstats_get_names,
+               .xstats_reset = ioat_xstats_reset,
 };
 
 /* each portal uses 4 x 4k pages */
@@ -81,8 +143,9 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
        grp_offset = (uint16_t)pci->regs->offsets[0];
        pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
        wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
-       pci->wq_regs = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
+       pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
        pci->portals = dev->mem_resource[2].addr;
+       pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
 
        /* sanity check device status */
        if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
@@ -113,7 +176,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
                pci->grp_regs[i].grpwqcfg[0] = 0;
        }
        for (i = 0; i < nb_wqs; i++)
-               pci->wq_regs[i].wqcfg[0] = 0;
+               idxd_get_wq_cfg(pci, i)[0] = 0;
 
        /* put each engine into a separate group to avoid reordering */
        if (nb_groups > nb_engines)
@@ -138,10 +201,10 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
                                i, i % nb_groups);
                pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
                /* now configure it, in terms of size, max batch, mode */
-               pci->wq_regs[i].wqcfg[WQ_SIZE_IDX] = wq_size;
-               pci->wq_regs[i].wqcfg[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
+               idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
+               idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
                                WQ_MODE_DEDICATED;
-               pci->wq_regs[i].wqcfg[WQ_SIZES_IDX] = lg2_max_copy_size |
+               idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
                                (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
        }
 
@@ -234,6 +297,10 @@ idxd_rawdev_destroy(const char *name)
        }
 
        idxd = rdev->dev_private;
+       if (!idxd) {
+               IOAT_PMD_ERR("Error getting dev_private");
+               return -EINVAL;
+       }
 
        /* disable the device */
        err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -244,13 +311,11 @@ idxd_rawdev_destroy(const char *name)
        IOAT_PMD_DEBUG("IDXD Device disabled OK");
 
        /* free device memory */
-       if (rdev->dev_private != NULL) {
-               IOAT_PMD_DEBUG("Freeing device driver memory");
-               rdev->dev_private = NULL;
-               rte_free(idxd->public.batch_ring);
-               rte_free(idxd->public.hdl_ring);
-               rte_memzone_free(idxd->mz);
-       }
+       IOAT_PMD_DEBUG("Freeing device driver memory");
+       rdev->dev_private = NULL;
+       rte_free(idxd->public.batch_ring);
+       rte_free(idxd->public.hdl_ring);
+       rte_memzone_free(idxd->mz);
 
        /* rte_rawdev_close is called by pmd_release */
        ret = rte_rawdev_pmd_release(rdev);