1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
5 #include <rte_bus_pci.h>
6 #include <rte_dmadev_pmd.h>
7 #include <rte_malloc.h>
8 #include <rte_prefetch.h>
11 #include "ioat_internal.h"
13 static struct rte_pci_driver ioat_pmd_drv;
15 RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
17 #define DESC_SZ sizeof(struct ioat_dma_hw_desc)
19 #define IOAT_PMD_NAME dmadev_ioat
20 #define IOAT_PMD_NAME_STR RTE_STR(IOAT_PMD_NAME)
22 /* IOAT operations. */
24 ioat_op_copy = 0, /* Standard DMA Operation */
25 ioat_op_fill /* Block Fill */
28 /* Configure a device. */
30 ioat_dev_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
33 if (sizeof(struct rte_dma_conf) != conf_sz)
36 if (dev_conf->nb_vchans != 1)
42 /* Setup a virtual channel for IOAT, only 1 vchan is supported. */
44 ioat_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
45 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
47 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
48 uint16_t max_desc = qconf->nb_desc;
51 if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
56 if (!rte_is_power_of_2(max_desc)) {
57 max_desc = rte_align32pow2(max_desc);
58 IOAT_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
59 ioat->qcfg.nb_desc = max_desc;
62 /* In case we are reconfiguring a device, free any existing memory. */
63 rte_free(ioat->desc_ring);
65 ioat->desc_ring = rte_zmalloc(NULL, sizeof(*ioat->desc_ring) * max_desc, 0);
66 if (ioat->desc_ring == NULL)
69 ioat->ring_addr = rte_mem_virt2iova(ioat->desc_ring);
71 ioat->status_addr = rte_mem_virt2iova(ioat) + offsetof(struct ioat_dmadev, status);
73 /* Ensure all counters are reset, if reconfiguring/restarting device. */
81 ioat->stats = (struct rte_dma_stats){0};
83 /* Configure descriptor ring - each one points to next. */
84 for (i = 0; i < ioat->qcfg.nb_desc; i++) {
85 ioat->desc_ring[i].next = ioat->ring_addr +
86 (((i + 1) % ioat->qcfg.nb_desc) * DESC_SZ);
92 /* Recover IOAT device. */
94 __ioat_recover(struct ioat_dmadev *ioat)
96 uint32_t chanerr, retry = 0;
97 uint16_t mask = ioat->qcfg.nb_desc - 1;
99 /* Clear any channel errors. Reading and writing to chanerr does this. */
100 chanerr = ioat->regs->chanerr;
101 ioat->regs->chanerr = chanerr;
104 ioat->regs->chancmd = IOAT_CHANCMD_RESET;
106 /* Write new chain address to trigger state change. */
107 ioat->regs->chainaddr = ioat->desc_ring[(ioat->next_read - 1) & mask].next;
108 /* Ensure channel control and status addr are correct. */
109 ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
110 IOAT_CHANCTRL_ERR_COMPLETION_EN;
111 ioat->regs->chancmp = ioat->status_addr;
113 /* Allow HW time to move to the ARMED state. */
117 } while (ioat->regs->chansts != IOAT_CHANSTS_ARMED && retry < 200);
119 /* Exit as failure if device is still HALTED. */
120 if (ioat->regs->chansts != IOAT_CHANSTS_ARMED)
123 /* Store next write as offset as recover will move HW and SW ring out of sync. */
124 ioat->offset = ioat->next_read;
126 /* Prime status register with previous address. */
127 ioat->status = ioat->desc_ring[(ioat->next_read - 2) & mask].next;
132 /* Start a configured device. */
134 ioat_dev_start(struct rte_dma_dev *dev)
136 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
138 if (ioat->qcfg.nb_desc == 0 || ioat->desc_ring == NULL)
141 /* Inform hardware of where the descriptor ring is. */
142 ioat->regs->chainaddr = ioat->ring_addr;
143 /* Inform hardware of where to write the status/completions. */
144 ioat->regs->chancmp = ioat->status_addr;
146 /* Prime the status register to be set to the last element. */
147 ioat->status = ioat->ring_addr + ((ioat->qcfg.nb_desc - 1) * DESC_SZ);
149 printf("IOAT.status: %s [0x%"PRIx64"]\n",
150 chansts_readable[ioat->status & IOAT_CHANSTS_STATUS],
153 if ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED) {
154 IOAT_PMD_WARN("Device HALTED on start, attempting to recover\n");
155 if (__ioat_recover(ioat) != 0) {
156 IOAT_PMD_ERR("Device couldn't be recovered");
164 /* Stop a configured device. */
166 ioat_dev_stop(struct rte_dma_dev *dev)
168 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
171 ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
176 } while ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) != IOAT_CHANSTS_SUSPENDED
179 return ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED) ? 0 : -1;
182 /* Get device information of a device. */
184 ioat_dev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
186 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
187 if (size < sizeof(*info))
189 info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
190 RTE_DMA_CAPA_OPS_COPY |
191 RTE_DMA_CAPA_OPS_FILL;
192 if (ioat->version >= IOAT_VER_3_4)
193 info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
194 info->max_vchans = 1;
196 info->max_desc = 4096;
200 /* Close a configured device. */
202 ioat_dev_close(struct rte_dma_dev *dev)
204 struct ioat_dmadev *ioat;
207 IOAT_PMD_ERR("Invalid device");
211 ioat = dev->fp_obj->dev_private;
213 IOAT_PMD_ERR("Error getting dev_private");
217 rte_free(ioat->desc_ring);
222 /* Trigger hardware to begin performing enqueued operations. */
224 __submit(struct ioat_dmadev *ioat)
226 *ioat->doorbell = ioat->next_write - ioat->offset;
228 ioat->stats.submitted += (uint16_t)(ioat->next_write - ioat->last_write);
230 ioat->last_write = ioat->next_write;
233 /* External submit function wrapper. */
235 ioat_submit(void *dev_private, uint16_t qid __rte_unused)
237 struct ioat_dmadev *ioat = dev_private;
244 /* Write descriptor for enqueue. */
246 __write_desc(void *dev_private, uint32_t op, uint64_t src, phys_addr_t dst,
247 unsigned int length, uint64_t flags)
249 struct ioat_dmadev *ioat = dev_private;
251 const unsigned short mask = ioat->qcfg.nb_desc - 1;
252 const unsigned short read = ioat->next_read;
253 unsigned short write = ioat->next_write;
254 const unsigned short space = mask + read - write;
255 struct ioat_dma_hw_desc *desc;
260 ioat->next_write = write + 1;
263 desc = &ioat->desc_ring[write];
265 desc->u.control_raw = (uint32_t)((op << IOAT_CMD_OP_SHIFT) |
266 (1 << IOAT_COMP_UPDATE_SHIFT));
268 /* In IOAT the fence ensures that all operations including the current one
269 * are completed before moving on, DMAdev assumes that the fence ensures
270 * all operations before the current one are completed before starting
271 * the current one, so in IOAT we set the fence for the previous descriptor.
273 if (flags & RTE_DMA_OP_FLAG_FENCE)
274 ioat->desc_ring[(write - 1) & mask].u.control.fence = 1;
276 desc->src_addr = src;
277 desc->dest_addr = dst;
279 rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
281 ret = (uint16_t)(ioat->next_write - 1);
283 if (flags & RTE_DMA_OP_FLAG_SUBMIT)
289 /* Enqueue a fill operation onto the ioat device. */
291 ioat_enqueue_fill(void *dev_private, uint16_t qid __rte_unused, uint64_t pattern,
292 rte_iova_t dst, unsigned int length, uint64_t flags)
294 return __write_desc(dev_private, ioat_op_fill, pattern, dst, length, flags);
297 /* Enqueue a copy operation onto the ioat device. */
299 ioat_enqueue_copy(void *dev_private, uint16_t qid __rte_unused, rte_iova_t src,
300 rte_iova_t dst, unsigned int length, uint64_t flags)
302 return __write_desc(dev_private, ioat_op_copy, src, dst, length, flags);
305 /* Dump DMA device info. */
307 __dev_dump(void *dev_private, FILE *f)
309 struct ioat_dmadev *ioat = dev_private;
310 uint64_t chansts_masked = ioat->regs->chansts & IOAT_CHANSTS_STATUS;
311 uint32_t chanerr = ioat->regs->chanerr;
312 uint64_t mask = (ioat->qcfg.nb_desc - 1);
313 char ver = ioat->version;
314 fprintf(f, "========= IOAT =========\n");
315 fprintf(f, " IOAT version: %d.%d\n", ver >> 4, ver & 0xF);
316 fprintf(f, " Channel status: %s [0x%"PRIx64"]\n",
317 chansts_readable[chansts_masked], chansts_masked);
318 fprintf(f, " ChainADDR: 0x%"PRIu64"\n", ioat->regs->chainaddr);
320 fprintf(f, " No Channel Errors\n");
322 fprintf(f, " ChanERR: 0x%"PRIu32"\n", chanerr);
323 if (chanerr & IOAT_CHANERR_INVALID_SRC_ADDR_MASK)
324 fprintf(f, " Invalid Source Address\n");
325 if (chanerr & IOAT_CHANERR_INVALID_DST_ADDR_MASK)
326 fprintf(f, " Invalid Destination Address\n");
327 if (chanerr & IOAT_CHANERR_INVALID_LENGTH_MASK)
328 fprintf(f, " Invalid Descriptor Length\n");
329 if (chanerr & IOAT_CHANERR_DESCRIPTOR_READ_ERROR_MASK)
330 fprintf(f, " Descriptor Read Error\n");
331 if ((chanerr & ~(IOAT_CHANERR_INVALID_SRC_ADDR_MASK |
332 IOAT_CHANERR_INVALID_DST_ADDR_MASK |
333 IOAT_CHANERR_INVALID_LENGTH_MASK |
334 IOAT_CHANERR_DESCRIPTOR_READ_ERROR_MASK)) != 0)
335 fprintf(f, " Unknown Error(s)\n");
337 fprintf(f, "== Private Data ==\n");
338 fprintf(f, " Config: { ring_size: %u }\n", ioat->qcfg.nb_desc);
339 fprintf(f, " Status: 0x%"PRIx64"\n", ioat->status);
340 fprintf(f, " Status IOVA: 0x%"PRIx64"\n", ioat->status_addr);
341 fprintf(f, " Status ADDR: %p\n", &ioat->status);
342 fprintf(f, " Ring IOVA: 0x%"PRIx64"\n", ioat->ring_addr);
343 fprintf(f, " Ring ADDR: 0x%"PRIx64"\n", ioat->desc_ring[0].next-64);
344 fprintf(f, " Next write: %"PRIu16"\n", ioat->next_write);
345 fprintf(f, " Next read: %"PRIu16"\n", ioat->next_read);
346 struct ioat_dma_hw_desc *desc_ring = &ioat->desc_ring[(ioat->next_write - 1) & mask];
347 fprintf(f, " Last Descriptor Written {\n");
348 fprintf(f, " Size: %"PRIu32"\n", desc_ring->size);
349 fprintf(f, " Control: 0x%"PRIx32"\n", desc_ring->u.control_raw);
350 fprintf(f, " Src: 0x%"PRIx64"\n", desc_ring->src_addr);
351 fprintf(f, " Dest: 0x%"PRIx64"\n", desc_ring->dest_addr);
352 fprintf(f, " Next: 0x%"PRIx64"\n", desc_ring->next);
354 fprintf(f, " Next Descriptor {\n");
355 fprintf(f, " Size: %"PRIu32"\n", ioat->desc_ring[ioat->next_read & mask].size);
356 fprintf(f, " Src: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].src_addr);
357 fprintf(f, " Dest: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].dest_addr);
358 fprintf(f, " Next: 0x%"PRIx64"\n", ioat->desc_ring[ioat->next_read & mask].next);
360 fprintf(f, " Key Stats { submitted: %"PRIu64", comp: %"PRIu64", failed: %"PRIu64" }\n",
361 ioat->stats.submitted,
362 ioat->stats.completed,
368 /* Public wrapper for dump. */
370 ioat_dev_dump(const struct rte_dma_dev *dev, FILE *f)
372 return __dev_dump(dev->fp_obj->dev_private, f);
375 /* Returns the index of the last completed operation. */
376 static inline uint16_t
377 __get_last_completed(const struct ioat_dmadev *ioat, int *state)
379 /* Status register contains the address of the completed operation */
380 uint64_t status = ioat->status;
382 /* lower 3 bits indicate "transfer status" : active, idle, halted.
383 * We can ignore bit 0.
385 *state = status & IOAT_CHANSTS_STATUS;
387 /* If we are just after recovering from an error the address returned by
388 * status will be 0, in this case we return the offset - 1 as the last
389 * completed. If not return the status value minus the chainaddr which
390 * gives us an offset into the ring. Right shifting by 6 (divide by 64)
391 * gives the index of the completion from the HW point of view and adding
392 * the offset translates the ring index from HW to SW point of view.
394 if ((status & ~IOAT_CHANSTS_STATUS) == 0)
395 return ioat->offset - 1;
397 return (status - ioat->ring_addr) >> 6;
400 /* Translates IOAT ChanERRs to DMA error codes. */
401 static inline enum rte_dma_status_code
402 __translate_status_ioat_to_dma(uint32_t chanerr)
404 if (chanerr & IOAT_CHANERR_INVALID_SRC_ADDR_MASK)
405 return RTE_DMA_STATUS_INVALID_SRC_ADDR;
406 else if (chanerr & IOAT_CHANERR_INVALID_DST_ADDR_MASK)
407 return RTE_DMA_STATUS_INVALID_DST_ADDR;
408 else if (chanerr & IOAT_CHANERR_INVALID_LENGTH_MASK)
409 return RTE_DMA_STATUS_INVALID_LENGTH;
410 else if (chanerr & IOAT_CHANERR_DESCRIPTOR_READ_ERROR_MASK)
411 return RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR;
413 return RTE_DMA_STATUS_ERROR_UNKNOWN;
416 /* Returns details of operations that have been completed. */
418 ioat_completed(void *dev_private, uint16_t qid __rte_unused, const uint16_t max_ops,
419 uint16_t *last_idx, bool *has_error)
421 struct ioat_dmadev *ioat = dev_private;
423 const unsigned short mask = (ioat->qcfg.nb_desc - 1);
424 const unsigned short read = ioat->next_read;
425 unsigned short last_completed, count;
426 int state, fails = 0;
428 /* Do not do any work if there is an uncleared error. */
429 if (ioat->failure != 0) {
431 *last_idx = ioat->next_read - 2;
435 last_completed = __get_last_completed(ioat, &state);
436 count = (last_completed + 1 - read) & mask;
438 /* Cap count at max_ops or set as last run in batch. */
442 if (count == max_ops || state != IOAT_CHANSTS_HALTED) {
443 ioat->next_read = read + count;
444 *last_idx = ioat->next_read - 1;
448 ioat->failure = ioat->regs->chanerr;
449 ioat->next_read = read + count + 1;
450 if (__ioat_recover(ioat) != 0) {
451 IOAT_PMD_ERR("Device HALTED and could not be recovered\n");
452 __dev_dump(dev_private, stdout);
457 *last_idx = ioat->next_read - 2;
460 ioat->stats.completed += count;
461 ioat->stats.errors += fails;
466 /* Returns detailed status information about operations that have been completed. */
468 ioat_completed_status(void *dev_private, uint16_t qid __rte_unused,
469 uint16_t max_ops, uint16_t *last_idx, enum rte_dma_status_code *status)
471 struct ioat_dmadev *ioat = dev_private;
473 const unsigned short mask = (ioat->qcfg.nb_desc - 1);
474 const unsigned short read = ioat->next_read;
475 unsigned short count, last_completed;
479 last_completed = __get_last_completed(ioat, &state);
480 count = (last_completed + 1 - read) & mask;
482 for (i = 0; i < RTE_MIN(count + 1, max_ops); i++)
483 status[i] = RTE_DMA_STATUS_SUCCESSFUL;
485 /* Cap count at max_ops or set as last run in batch. */
489 if (count == max_ops || state != IOAT_CHANSTS_HALTED)
490 ioat->next_read = read + count;
493 status[count] = __translate_status_ioat_to_dma(ioat->regs->chanerr);
495 ioat->next_read = read + count;
496 if (__ioat_recover(ioat) != 0) {
497 IOAT_PMD_ERR("Device HALTED and could not be recovered\n");
498 __dev_dump(dev_private, stdout);
505 if (ioat->failure > 0) {
506 status[0] = __translate_status_ioat_to_dma(ioat->failure);
507 count = RTE_MIN(count + 1, max_ops);
511 *last_idx = ioat->next_read - 1;
513 ioat->stats.completed += count;
514 ioat->stats.errors += fails;
519 /* Get the remaining capacity of the ring. */
521 ioat_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
523 const struct ioat_dmadev *ioat = dev_private;
524 unsigned short size = ioat->qcfg.nb_desc - 1;
525 unsigned short read = ioat->next_read;
526 unsigned short write = ioat->next_write;
527 unsigned short space = size - (write - read);
532 /* Retrieve the generic stats of a DMA device. */
534 ioat_stats_get(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
535 struct rte_dma_stats *rte_stats, uint32_t size)
537 struct rte_dma_stats *stats = (&((struct ioat_dmadev *)dev->fp_obj->dev_private)->stats);
539 if (size < sizeof(rte_stats))
541 if (rte_stats == NULL)
548 /* Reset the generic stat counters for the DMA device. */
550 ioat_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
552 struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
554 ioat->stats = (struct rte_dma_stats){0};
558 /* Check if the IOAT device is idle. */
560 ioat_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
561 enum rte_dma_vchan_status *status)
564 const struct ioat_dmadev *ioat = dev->fp_obj->dev_private;
565 const uint16_t mask = ioat->qcfg.nb_desc - 1;
566 const uint16_t last = __get_last_completed(ioat, &state);
568 if (state == IOAT_CHANSTS_HALTED || state == IOAT_CHANSTS_SUSPENDED)
569 *status = RTE_DMA_VCHAN_HALTED_ERROR;
570 else if (last == ((ioat->next_write - 1) & mask))
571 *status = RTE_DMA_VCHAN_IDLE;
573 *status = RTE_DMA_VCHAN_ACTIVE;
578 /* Create a DMA device. */
580 ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
582 static const struct rte_dma_dev_ops ioat_dmadev_ops = {
583 .dev_close = ioat_dev_close,
584 .dev_configure = ioat_dev_configure,
585 .dev_dump = ioat_dev_dump,
586 .dev_info_get = ioat_dev_info_get,
587 .dev_start = ioat_dev_start,
588 .dev_stop = ioat_dev_stop,
589 .stats_get = ioat_stats_get,
590 .stats_reset = ioat_stats_reset,
591 .vchan_status = ioat_vchan_status,
592 .vchan_setup = ioat_vchan_setup,
595 struct rte_dma_dev *dmadev = NULL;
596 struct ioat_dmadev *ioat = NULL;
600 IOAT_PMD_ERR("Invalid name of the device!");
604 /* Allocate device structure. */
605 dmadev = rte_dma_pmd_allocate(name, dev->device.numa_node, sizeof(struct ioat_dmadev));
606 if (dmadev == NULL) {
607 IOAT_PMD_ERR("Unable to allocate dma device");
611 dmadev->device = &dev->device;
613 dmadev->fp_obj->dev_private = dmadev->data->dev_private;
615 dmadev->dev_ops = &ioat_dmadev_ops;
617 dmadev->fp_obj->burst_capacity = ioat_burst_capacity;
618 dmadev->fp_obj->completed = ioat_completed;
619 dmadev->fp_obj->completed_status = ioat_completed_status;
620 dmadev->fp_obj->copy = ioat_enqueue_copy;
621 dmadev->fp_obj->fill = ioat_enqueue_fill;
622 dmadev->fp_obj->submit = ioat_submit;
624 ioat = dmadev->data->dev_private;
625 ioat->dmadev = dmadev;
626 ioat->regs = dev->mem_resource[0].addr;
627 ioat->doorbell = &ioat->regs->dmacount;
628 ioat->qcfg.nb_desc = 0;
629 ioat->desc_ring = NULL;
630 ioat->version = ioat->regs->cbver;
632 /* Do device initialization - reset and set error behaviour. */
633 if (ioat->regs->chancnt != 1)
634 IOAT_PMD_WARN("%s: Channel count == %d\n", __func__,
635 ioat->regs->chancnt);
637 /* Locked by someone else. */
638 if (ioat->regs->chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
639 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
640 ioat->regs->chanctrl = 0;
643 /* clear any previous errors */
644 if (ioat->regs->chanerr != 0) {
645 uint32_t val = ioat->regs->chanerr;
646 ioat->regs->chanerr = val;
649 ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
651 ioat->regs->chancmd = IOAT_CHANCMD_RESET;
653 while (ioat->regs->chancmd & IOAT_CHANCMD_RESET) {
654 ioat->regs->chainaddr = 0;
656 if (++retry >= 200) {
657 IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=%#"PRIx8
658 ", CHANSTS=%#"PRIx64", CHANERR=%#"PRIx32"\n",
662 ioat->regs->chanerr);
663 rte_dma_pmd_release(name);
667 ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
668 IOAT_CHANCTRL_ERR_COMPLETION_EN;
670 dmadev->fp_obj->dev_private = ioat;
672 dmadev->state = RTE_DMA_DEV_READY;
678 /* Destroy a DMA device. */
680 ioat_dmadev_destroy(const char *name)
685 IOAT_PMD_ERR("Invalid device name");
689 ret = rte_dma_pmd_release(name);
691 IOAT_PMD_DEBUG("Device cleanup failed");
696 /* Probe DMA device. */
698 ioat_dmadev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
702 rte_pci_device_name(&dev->addr, name, sizeof(name));
703 IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
705 dev->device.driver = &drv->driver;
706 return ioat_dmadev_create(name, dev);
709 /* Remove DMA device. */
711 ioat_dmadev_remove(struct rte_pci_device *dev)
715 rte_pci_device_name(&dev->addr, name, sizeof(name));
717 IOAT_PMD_INFO("Closing %s on NUMA node %d",
718 name, dev->device.numa_node);
720 return ioat_dmadev_destroy(name);
723 static const struct rte_pci_id pci_id_ioat_map[] = {
724 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
725 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
726 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
727 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
728 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
729 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
730 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
731 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
732 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
733 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
734 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
735 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
736 { .vendor_id = 0, /* sentinel */ },
739 static struct rte_pci_driver ioat_pmd_drv = {
740 .id_table = pci_id_ioat_map,
741 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
742 .probe = ioat_dmadev_probe,
743 .remove = ioat_dmadev_remove,
746 RTE_PMD_REGISTER_PCI(IOAT_PMD_NAME, ioat_pmd_drv);
747 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_NAME, pci_id_ioat_map);
748 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_NAME, "* igb_uio | uio_pci_generic | vfio-pci");