1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Intel Corporation
5 #include <rte_malloc.h>
6 #include <rte_common.h>
9 #include "idxd_internal.h"
11 #define IDXD_PMD_NAME_STR "dmadev_idxd"
14 idxd_dump(const struct rte_dma_dev *dev, FILE *f)
16 struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
19 fprintf(f, "== IDXD Private Data ==\n");
20 fprintf(f, " Portal: %p\n", idxd->portal);
21 fprintf(f, " Config: { ring_size: %u }\n",
23 fprintf(f, " Batch ring (sz = %u, max_batches = %u):\n\t",
24 idxd->max_batches + 1, idxd->max_batches);
25 for (i = 0; i <= idxd->max_batches; i++) {
26 fprintf(f, " %u ", idxd->batch_idx_ring[i]);
27 if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
28 fprintf(f, "[rd ptr, wr ptr] ");
29 else if (i == idxd->batch_idx_read)
30 fprintf(f, "[rd ptr] ");
31 else if (i == idxd->batch_idx_write)
32 fprintf(f, "[wr ptr] ");
33 if (i == idxd->max_batches)
37 fprintf(f, " Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
38 fprintf(f, " IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
43 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
45 struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
47 if (size < sizeof(*info))
50 *info = (struct rte_dma_info) {
51 .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
52 RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
57 if (idxd->sva_support)
58 info->dev_capa |= RTE_DMA_CAPA_SVA;
63 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
66 if (sizeof(struct rte_dma_conf) != conf_sz)
69 if (dev_conf->nb_vchans != 1)
75 idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
76 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
78 struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
79 uint16_t max_desc = qconf->nb_desc;
81 if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
86 if (!rte_is_power_of_2(max_desc))
87 max_desc = rte_align32pow2(max_desc);
88 IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
89 idxd->desc_ring_mask = max_desc - 1;
90 idxd->qcfg.nb_desc = max_desc;
92 /* in case we are reconfiguring a device, free any existing memory */
93 rte_free(idxd->desc_ring);
95 /* allocate the descriptor ring at 2x size as batches can't wrap */
96 idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
97 if (idxd->desc_ring == NULL)
99 idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
101 idxd->batch_idx_read = 0;
102 idxd->batch_idx_write = 0;
103 idxd->batch_start = 0;
104 idxd->batch_size = 0;
105 idxd->ids_returned = 0;
108 memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
109 (idxd->max_batches + 1));
114 idxd_dmadev_create(const char *name, struct rte_device *dev,
115 const struct idxd_dmadev *base_idxd,
116 const struct rte_dma_dev_ops *ops)
118 struct idxd_dmadev *idxd = NULL;
119 struct rte_dma_dev *dmadev = NULL;
122 RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
123 RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
124 RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
127 IDXD_PMD_ERR("Invalid name of the device!");
132 /* Allocate device structure */
133 dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
134 if (dmadev == NULL) {
135 IDXD_PMD_ERR("Unable to allocate dma device");
139 dmadev->dev_ops = ops;
140 dmadev->device = dev;
142 idxd = dmadev->data->dev_private;
143 *idxd = *base_idxd; /* copy over the main fields already passed in */
144 idxd->dmadev = dmadev;
146 /* allocate batch index ring and completion ring.
147 * The +1 is because we can never fully use
148 * the ring, otherwise read == write means both full and empty.
150 idxd->batch_comp_ring = rte_zmalloc_socket(NULL, (sizeof(idxd->batch_idx_ring[0]) +
151 sizeof(idxd->batch_comp_ring[0])) * (idxd->max_batches + 1),
152 sizeof(idxd->batch_comp_ring[0]), dev->numa_node);
153 if (idxd->batch_comp_ring == NULL) {
154 IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
158 idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
159 idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
161 dmadev->fp_obj->dev_private = idxd;
163 idxd->dmadev->state = RTE_DMA_DEV_READY;
169 rte_dma_pmd_release(name);
174 int idxd_pmd_logtype;
176 RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);