1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #include <rte_bus_pci.h>
6 #include <rte_dmadev_pmd.h>
7 #include <rte_malloc.h>
9 #include "ioat_internal.h"
11 static struct rte_pci_driver ioat_pmd_drv;
13 RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
15 #define DESC_SZ sizeof(struct ioat_dma_hw_desc)
17 #define IOAT_PMD_NAME dmadev_ioat
18 #define IOAT_PMD_NAME_STR RTE_STR(IOAT_PMD_NAME)
20 /* Configure a device. */
22 ioat_dev_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
25 if (sizeof(struct rte_dma_conf) != conf_sz)
28 if (dev_conf->nb_vchans != 1)
34 /* Setup a virtual channel for IOAT, only 1 vchan is supported. */
36 ioat_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
37 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
39 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
40 uint16_t max_desc = qconf->nb_desc;
43 if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
48 if (!rte_is_power_of_2(max_desc)) {
49 max_desc = rte_align32pow2(max_desc);
50 IOAT_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
51 ioat->qcfg.nb_desc = max_desc;
54 /* In case we are reconfiguring a device, free any existing memory. */
55 rte_free(ioat->desc_ring);
57 ioat->desc_ring = rte_zmalloc(NULL, sizeof(*ioat->desc_ring) * max_desc, 0);
58 if (ioat->desc_ring == NULL)
61 ioat->ring_addr = rte_mem_virt2iova(ioat->desc_ring);
63 ioat->status_addr = rte_mem_virt2iova(ioat) + offsetof(struct ioat_dmadev, status);
65 /* Ensure all counters are reset, if reconfiguring/restarting device. */
72 /* Configure descriptor ring - each one points to next. */
73 for (i = 0; i < ioat->qcfg.nb_desc; i++) {
74 ioat->desc_ring[i].next = ioat->ring_addr +
75 (((i + 1) % ioat->qcfg.nb_desc) * DESC_SZ);
81 /* Get device information of a device. */
83 ioat_dev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
85 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
86 if (size < sizeof(*info))
88 info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
89 RTE_DMA_CAPA_OPS_COPY |
90 RTE_DMA_CAPA_OPS_FILL;
91 if (ioat->version >= IOAT_VER_3_4)
92 info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
95 info->max_desc = 4096;
99 /* Close a configured device. */
101 ioat_dev_close(struct rte_dma_dev *dev)
103 struct ioat_dmadev *ioat;
106 IOAT_PMD_ERR("Invalid device");
110 ioat = dev->fp_obj->dev_private;
112 IOAT_PMD_ERR("Error getting dev_private");
116 rte_free(ioat->desc_ring);
121 /* Dump DMA device info. */
123 __dev_dump(void *dev_private, FILE *f)
125 struct ioat_dmadev *ioat = dev_private;
126 uint64_t chansts_masked = ioat->regs->chansts & IOAT_CHANSTS_STATUS;
127 uint32_t chanerr = ioat->regs->chanerr;
128 uint64_t mask = (ioat->qcfg.nb_desc - 1);
129 char ver = ioat->version;
130 fprintf(f, "========= IOAT =========\n");
131 fprintf(f, " IOAT version: %d.%d\n", ver >> 4, ver & 0xF);
132 fprintf(f, " Channel status: %s [0x%"PRIx64"]\n",
133 chansts_readable[chansts_masked], chansts_masked);
134 fprintf(f, " ChainADDR: 0x%"PRIu64"\n", ioat->regs->chainaddr);
136 fprintf(f, " No Channel Errors\n");
138 fprintf(f, " ChanERR: 0x%"PRIu32"\n", chanerr);
139 if (chanerr & IOAT_CHANERR_INVALID_SRC_ADDR_MASK)
140 fprintf(f, " Invalid Source Address\n");
141 if (chanerr & IOAT_CHANERR_INVALID_DST_ADDR_MASK)
142 fprintf(f, " Invalid Destination Address\n");
143 if (chanerr & IOAT_CHANERR_INVALID_LENGTH_MASK)
144 fprintf(f, " Invalid Descriptor Length\n");
145 if (chanerr & IOAT_CHANERR_DESCRIPTOR_READ_ERROR_MASK)
146 fprintf(f, " Descriptor Read Error\n");
147 if ((chanerr & ~(IOAT_CHANERR_INVALID_SRC_ADDR_MASK |
148 IOAT_CHANERR_INVALID_DST_ADDR_MASK |
149 IOAT_CHANERR_INVALID_LENGTH_MASK |
150 IOAT_CHANERR_DESCRIPTOR_READ_ERROR_MASK)) != 0)
151 fprintf(f, " Unknown Error(s)\n");
153 fprintf(f, "== Private Data ==\n");
154 fprintf(f, " Config: { ring_size: %u }\n", ioat->qcfg.nb_desc);
155 fprintf(f, " Status: 0x%"PRIx64"\n", ioat->status);
156 fprintf(f, " Status IOVA: 0x%"PRIx64"\n", ioat->status_addr);
157 fprintf(f, " Status ADDR: %p\n", &ioat->status);
158 fprintf(f, " Ring IOVA: 0x%"PRIx64"\n", ioat->ring_addr);
159 fprintf(f, " Ring ADDR: 0x%"PRIx64"\n", ioat->desc_ring[0].next-64);
160 fprintf(f, " Next write: %"PRIu16"\n", ioat->next_write);
161 fprintf(f, " Next read: %"PRIu16"\n", ioat->next_read);
162 struct ioat_dma_hw_desc *desc_ring = &ioat->desc_ring[(ioat->next_write - 1) & mask];
163 fprintf(f, " Last Descriptor Written {\n");
164 fprintf(f, " Size: %"PRIu32"\n", desc_ring->size);
165 fprintf(f, " Control: 0x%"PRIx32"\n", desc_ring->u.control_raw);
166 fprintf(f, " Src: 0x%"PRIx64"\n", desc_ring->src_addr);
167 fprintf(f, " Dest: 0x%"PRIx64"\n", desc_ring->dest_addr);
168 fprintf(f, " Next: 0x%"PRIx64"\n", desc_ring->next);
170 fprintf(f, " Next Descriptor {\n");
171 fprintf(f, " Size: %"PRIu32"\n", ioat->desc_ring[ioat->next_read & mask].size);
172 fprintf(f, " Src: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].src_addr);
173 fprintf(f, " Dest: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].dest_addr);
174 fprintf(f, " Next: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].next);
180 /* Public wrapper for dump. */
182 ioat_dev_dump(const struct rte_dma_dev *dev, FILE *f)
184 return __dev_dump(dev->fp_obj->dev_private, f);
187 /* Create a DMA device. */
189 ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
191 static const struct rte_dma_dev_ops ioat_dmadev_ops = {
192 .dev_close = ioat_dev_close,
193 .dev_configure = ioat_dev_configure,
194 .dev_dump = ioat_dev_dump,
195 .dev_info_get = ioat_dev_info_get,
196 .vchan_setup = ioat_vchan_setup,
199 struct rte_dma_dev *dmadev = NULL;
200 struct ioat_dmadev *ioat = NULL;
204 IOAT_PMD_ERR("Invalid name of the device!");
208 /* Allocate device structure. */
209 dmadev = rte_dma_pmd_allocate(name, dev->device.numa_node, sizeof(struct ioat_dmadev));
210 if (dmadev == NULL) {
211 IOAT_PMD_ERR("Unable to allocate dma device");
215 dmadev->device = &dev->device;
217 dmadev->fp_obj->dev_private = dmadev->data->dev_private;
219 dmadev->dev_ops = &ioat_dmadev_ops;
221 ioat = dmadev->data->dev_private;
222 ioat->dmadev = dmadev;
223 ioat->regs = dev->mem_resource[0].addr;
224 ioat->doorbell = &ioat->regs->dmacount;
225 ioat->qcfg.nb_desc = 0;
226 ioat->desc_ring = NULL;
227 ioat->version = ioat->regs->cbver;
229 /* Do device initialization - reset and set error behaviour. */
230 if (ioat->regs->chancnt != 1)
231 IOAT_PMD_WARN("%s: Channel count == %d\n", __func__,
232 ioat->regs->chancnt);
234 /* Locked by someone else. */
235 if (ioat->regs->chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
236 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
237 ioat->regs->chanctrl = 0;
240 /* clear any previous errors */
241 if (ioat->regs->chanerr != 0) {
242 uint32_t val = ioat->regs->chanerr;
243 ioat->regs->chanerr = val;
246 ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
248 ioat->regs->chancmd = IOAT_CHANCMD_RESET;
250 while (ioat->regs->chancmd & IOAT_CHANCMD_RESET) {
251 ioat->regs->chainaddr = 0;
253 if (++retry >= 200) {
254 IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=%#"PRIx8
255 ", CHANSTS=%#"PRIx64", CHANERR=%#"PRIx32"\n",
259 ioat->regs->chanerr);
260 rte_dma_pmd_release(name);
264 ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
265 IOAT_CHANCTRL_ERR_COMPLETION_EN;
267 dmadev->fp_obj->dev_private = ioat;
269 dmadev->state = RTE_DMA_DEV_READY;
275 /* Destroy a DMA device. */
277 ioat_dmadev_destroy(const char *name)
282 IOAT_PMD_ERR("Invalid device name");
286 ret = rte_dma_pmd_release(name);
288 IOAT_PMD_DEBUG("Device cleanup failed");
293 /* Probe DMA device. */
295 ioat_dmadev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
299 rte_pci_device_name(&dev->addr, name, sizeof(name));
300 IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
302 dev->device.driver = &drv->driver;
303 return ioat_dmadev_create(name, dev);
306 /* Remove DMA device. */
308 ioat_dmadev_remove(struct rte_pci_device *dev)
312 rte_pci_device_name(&dev->addr, name, sizeof(name));
314 IOAT_PMD_INFO("Closing %s on NUMA node %d",
315 name, dev->device.numa_node);
317 return ioat_dmadev_destroy(name);
320 static const struct rte_pci_id pci_id_ioat_map[] = {
321 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
322 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
323 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
324 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
325 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
326 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
327 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
328 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
329 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
330 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
331 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
332 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
333 { .vendor_id = 0, /* sentinel */ },
336 static struct rte_pci_driver ioat_pmd_drv = {
337 .id_table = pci_id_ioat_map,
338 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
339 .probe = ioat_dmadev_probe,
340 .remove = ioat_dmadev_remove,
343 RTE_PMD_REGISTER_PCI(IOAT_PMD_NAME, ioat_pmd_drv);
344 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_NAME, pci_id_ioat_map);
345 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_NAME, "* igb_uio | uio_pci_generic | vfio-pci");