#include <rte_bus_pci.h>
#include <rte_dmadev_pmd.h>
+#include <rte_malloc.h>
#include "ioat_internal.h"
#define IOAT_PMD_NAME dmadev_ioat
#define IOAT_PMD_NAME_STR RTE_STR(IOAT_PMD_NAME)
+/* Create a DMA device. */
+static int
+ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
+{
+ static const struct rte_dma_dev_ops ioat_dmadev_ops = { };
+
+ struct rte_dma_dev *dmadev = NULL;
+ struct ioat_dmadev *ioat = NULL;
+ int retry = 0;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid name of the device!");
+ return -EINVAL;
+ }
+
+ /* Allocate device structure. */
+ dmadev = rte_dma_pmd_allocate(name, dev->device.numa_node, sizeof(struct ioat_dmadev));
+ if (dmadev == NULL) {
+ IOAT_PMD_ERR("Unable to allocate dma device");
+ return -ENOMEM;
+ }
+
+ dmadev->device = &dev->device;
+
+ dmadev->fp_obj->dev_private = dmadev->data->dev_private;
+
+ dmadev->dev_ops = &ioat_dmadev_ops;
+
+ ioat = dmadev->data->dev_private;
+ ioat->dmadev = dmadev;
+ ioat->regs = dev->mem_resource[0].addr;
+ ioat->doorbell = &ioat->regs->dmacount;
+ ioat->qcfg.nb_desc = 0;
+ ioat->desc_ring = NULL;
+ ioat->version = ioat->regs->cbver;
+
+ /* Do device initialization - reset and set error behaviour. */
+ if (ioat->regs->chancnt != 1)
+ IOAT_PMD_WARN("%s: Channel count == %d\n", __func__,
+ ioat->regs->chancnt);
+
+ /* Locked by someone else. */
+ if (ioat->regs->chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
+ IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
+ ioat->regs->chanctrl = 0;
+ }
+
+ /* clear any previous errors */
+ if (ioat->regs->chanerr != 0) {
+ uint32_t val = ioat->regs->chanerr;
+ ioat->regs->chanerr = val;
+ }
+
+ ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
+ rte_delay_ms(1);
+ ioat->regs->chancmd = IOAT_CHANCMD_RESET;
+ rte_delay_ms(1);
+ while (ioat->regs->chancmd & IOAT_CHANCMD_RESET) {
+ ioat->regs->chainaddr = 0;
+ rte_delay_ms(1);
+ if (++retry >= 200) {
+ IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=%#"PRIx8
+ ", CHANSTS=%#"PRIx64", CHANERR=%#"PRIx32"\n",
+ __func__,
+ ioat->regs->chancmd,
+ ioat->regs->chansts,
+ ioat->regs->chanerr);
+ rte_dma_pmd_release(name);
+ return -EIO;
+ }
+ }
+ ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+ IOAT_CHANCTRL_ERR_COMPLETION_EN;
+
+ dmadev->fp_obj->dev_private = ioat;
+
+ dmadev->state = RTE_DMA_DEV_READY;
+
+ return 0;
+
+}
+
+/* Destroy a DMA device. */
+static int
+ioat_dmadev_destroy(const char *name)
+{
+ int ret;
+
+ if (!name) {
+ IOAT_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ ret = rte_dma_pmd_release(name);
+ if (ret)
+ IOAT_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
/* Probe DMA device. */
static int
ioat_dmadev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
dev->device.driver = &drv->driver;
- return 0;
+ return ioat_dmadev_create(name, dev);
}
/* Remove DMA device. */
IOAT_PMD_INFO("Closing %s on NUMA node %d",
name, dev->device.numa_node);
- return 0;
+ return ioat_dmadev_destroy(name);
}
static const struct rte_pci_id pci_id_ioat_map[] = {
#include <stdint.h>
+#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
+
#define IOAT_VER_3_0 0x30
#define IOAT_VER_3_3 0x33
#define IOAT_DEVICE_ID_BDXF 0x6f2F
#define IOAT_DEVICE_ID_ICX 0x0b00
+#define IOAT_COMP_UPDATE_SHIFT 3
+#define IOAT_CMD_OP_SHIFT 24
+
+/* DMA Channel Registers */
+#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000
+#define IOAT_CHANCTRL_COMPL_DCA_EN 0x0200
+#define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100
+#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020
+#define IOAT_CHANCTRL_ERR_INT_EN 0x0010
+#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008
+#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
+#define IOAT_CHANCTRL_INT_REARM 0x0001
+
+struct ioat_registers {
+ uint8_t chancnt;
+ uint8_t xfercap;
+ uint8_t genctrl;
+ uint8_t intrctrl;
+ uint32_t attnstatus;
+ uint8_t cbver; /* 0x08 */
+ uint8_t reserved4[0x3]; /* 0x09 */
+ uint16_t intrdelay; /* 0x0C */
+ uint16_t cs_status; /* 0x0E */
+ uint32_t dmacapability; /* 0x10 */
+ uint8_t reserved5[0x6C]; /* 0x14 */
+ uint16_t chanctrl; /* 0x80 */
+ uint8_t reserved6[0x2]; /* 0x82 */
+ uint8_t chancmd; /* 0x84 */
+ uint8_t reserved3[1]; /* 0x85 */
+ uint16_t dmacount; /* 0x86 */
+ uint64_t chansts; /* 0x88 */
+ uint64_t chainaddr; /* 0x90 */
+ uint64_t chancmp; /* 0x98 */
+ uint8_t reserved2[0x8]; /* 0xA0 */
+ uint32_t chanerr; /* 0xA8 */
+ uint32_t chanerrmask; /* 0xAC */
+} __rte_packed;
+
+#define IOAT_CHANCMD_RESET 0x20
+#define IOAT_CHANCMD_SUSPEND 0x04
+
+#define IOAT_CHANCMP_ALIGN 8 /* CHANCMP address must be 64-bit aligned */
+
#ifdef __cplusplus
}
#endif
#include "ioat_hw_defs.h"
+struct ioat_dmadev {
+ struct rte_dma_dev *dmadev;
+ struct rte_dma_vchan_conf qcfg;
+ struct rte_dma_stats stats;
+
+ volatile uint16_t *doorbell __rte_cache_aligned;
+ phys_addr_t status_addr;
+ phys_addr_t ring_addr;
+
+ struct ioat_dma_hw_desc *desc_ring;
+
+ unsigned short next_read;
+ unsigned short next_write;
+ unsigned short last_write; /* Used to compute submitted count. */
+ unsigned short offset; /* Used after a device recovery when counts -> 0. */
+ unsigned int failure; /* Used to store chanerr for error handling. */
+
+ /* To report completions, the device will write status back here. */
+ volatile uint64_t status __rte_cache_aligned;
+
+ /* Pointer to the register bar. */
+ volatile struct ioat_registers *regs;
+
+ /* Store the IOAT version. */
+ uint8_t version;
+};
+
extern int ioat_pmd_logtype;
#define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \