#include <rte_bus_pci.h>
#include <rte_memzone.h>
+#include <rte_devargs.h>
#include "ioat_private.h"
#include "ioat_spec.h"
return err_code & CMDSTATUS_ERR_MASK;
}
+static uint32_t *
+idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
+{
+ return RTE_PTR_ADD(pci->wq_regs_base,
+ (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
+}
+
static int
idxd_is_wq_enabled(struct idxd_rawdev *idxd)
{
- uint32_t state = idxd->u.pci->wq_regs[idxd->qid].wqcfg[WQ_STATE_IDX];
+ uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
}
+static void
+idxd_pci_dev_stop(struct rte_rawdev *dev)
+{
+ struct idxd_rawdev *idxd = dev->dev_private;
+ uint8_t err_code;
+
+ if (!idxd_is_wq_enabled(idxd)) {
+ IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
+ return;
+ }
+
+ err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
+ if (err_code || idxd_is_wq_enabled(idxd)) {
+ IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
+ idxd->qid, err_code);
+ return;
+ }
+ IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
+}
+
+static int
+idxd_pci_dev_start(struct rte_rawdev *dev)
+{
+ struct idxd_rawdev *idxd = dev->dev_private;
+ uint8_t err_code;
+
+ if (idxd_is_wq_enabled(idxd)) {
+ IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
+ return 0;
+ }
+
+ if (idxd->public.batch_ring == NULL) {
+ IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
+ return -EINVAL;
+ }
+
+ err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
+ if (err_code || !idxd_is_wq_enabled(idxd)) {
+ IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
+ idxd->qid, err_code);
+ return err_code == 0 ? -1 : err_code;
+ }
+
+ IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
+
+ return 0;
+}
+
static const struct rte_rawdev_ops idxd_pci_ops = {
.dev_close = idxd_rawdev_close,
- .dev_selftest = idxd_rawdev_test,
+ .dev_selftest = ioat_rawdev_test,
.dump = idxd_dev_dump,
+ .dev_configure = idxd_dev_configure,
+ .dev_start = idxd_pci_dev_start,
+ .dev_stop = idxd_pci_dev_stop,
+ .dev_info_get = idxd_dev_info_get,
+ .xstats_get = ioat_xstats_get,
+ .xstats_get_names = ioat_xstats_get_names,
+ .xstats_reset = ioat_xstats_reset,
};
/* each portal uses 4 x 4k pages */
#define IDXD_PORTAL_SIZE (4096 * 4)
static int
-init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
+init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd,
+ unsigned int max_queues)
{
struct idxd_pci_common *pci;
uint8_t nb_groups, nb_engines, nb_wqs;
grp_offset = (uint16_t)pci->regs->offsets[0];
pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
- pci->wq_regs = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
+ pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
pci->portals = dev->mem_resource[2].addr;
+ pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
/* sanity check device status */
if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
pci->grp_regs[i].grpwqcfg[0] = 0;
}
for (i = 0; i < nb_wqs; i++)
- pci->wq_regs[i].wqcfg[0] = 0;
+ idxd_get_wq_cfg(pci, i)[0] = 0;
+
+ /* limit queues if necessary */
+ if (max_queues != 0 && nb_wqs > max_queues) {
+ nb_wqs = max_queues;
+ if (nb_engines > max_queues)
+ nb_engines = max_queues;
+ if (nb_groups > max_queues)
+ nb_engines = max_queues;
+ IOAT_PMD_DEBUG("Limiting queues to %u", nb_wqs);
+ }
/* put each engine into a separate group to avoid reordering */
if (nb_groups > nb_engines)
i, i % nb_groups);
pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
/* now configure it, in terms of size, max batch, mode */
- pci->wq_regs[i].wqcfg[WQ_SIZE_IDX] = wq_size;
- pci->wq_regs[i].wqcfg[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
+ idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
+ idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
WQ_MODE_DEDICATED;
- pci->wq_regs[i].wqcfg[WQ_SIZES_IDX] = lg2_max_copy_size |
+ idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
(lg2_max_batch << WQ_BATCH_SZ_SHIFT);
}
uint8_t nb_wqs;
int qid, ret = 0;
char name[PCI_PRI_STR_SIZE];
+ unsigned int max_queues = 0;
rte_pci_device_name(&dev->addr, name, sizeof(name));
IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
dev->device.driver = &drv->driver;
- ret = init_pci_device(dev, &idxd);
+ if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
+ /* if the number of devargs grows beyond just 1, use rte_kvargs */
+ if (sscanf(dev->device.devargs->args,
+ "max_queues=%u", &max_queues) != 1) {
+ IOAT_PMD_ERR("Invalid device parameter: '%s'",
+ dev->device.devargs->args);
+ return -1;
+ }
+ }
+
+ ret = init_pci_device(dev, &idxd, max_queues);
if (ret < 0) {
IOAT_PMD_ERR("Error initializing PCI hardware");
return ret;
}
idxd = rdev->dev_private;
+ if (!idxd) {
+ IOAT_PMD_ERR("Error getting dev_private");
+ return -EINVAL;
+ }
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
IOAT_PMD_DEBUG("IDXD Device disabled OK");
/* free device memory */
- if (rdev->dev_private != NULL) {
- IOAT_PMD_DEBUG("Freeing device driver memory");
- rdev->dev_private = NULL;
- rte_free(idxd->public.batch_ring);
- rte_free(idxd->public.hdl_ring);
- rte_memzone_free(idxd->mz);
- }
+ IOAT_PMD_DEBUG("Freeing device driver memory");
+ rdev->dev_private = NULL;
+ rte_free(idxd->public.batch_ring);
+ rte_free(idxd->public.hdl_ring);
+ rte_memzone_free(idxd->mz);
/* rte_rawdev_close is called by pmd_release */
ret = rte_rawdev_pmd_release(rdev);
RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map);
RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI,
"* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(rawdev_idxd_pci, "max_queues=0");