70d094e3a2ee89e52b95d268e4e25af01e9ed05d
[dpdk.git] / drivers / dma / idxd / idxd_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_common.h>
7 #include <rte_log.h>
8
9 #include "idxd_internal.h"
10
11 #define IDXD_PMD_NAME_STR "dmadev_idxd"
12
13 int
14 idxd_dump(const struct rte_dma_dev *dev, FILE *f)
15 {
16         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
17         unsigned int i;
18
19         fprintf(f, "== IDXD Private Data ==\n");
20         fprintf(f, "  Portal: %p\n", idxd->portal);
21         fprintf(f, "  Config: { ring_size: %u }\n",
22                         idxd->qcfg.nb_desc);
23         fprintf(f, "  Batch ring (sz = %u, max_batches = %u):\n\t",
24                         idxd->max_batches + 1, idxd->max_batches);
25         for (i = 0; i <= idxd->max_batches; i++) {
26                 fprintf(f, " %u ", idxd->batch_idx_ring[i]);
27                 if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
28                         fprintf(f, "[rd ptr, wr ptr] ");
29                 else if (i == idxd->batch_idx_read)
30                         fprintf(f, "[rd ptr] ");
31                 else if (i == idxd->batch_idx_write)
32                         fprintf(f, "[wr ptr] ");
33                 if (i == idxd->max_batches)
34                         fprintf(f, "\n");
35         }
36
37         fprintf(f, "  Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
38         fprintf(f, "  IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
39         return 0;
40 }
41
42 int
43 idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
44 {
45         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
46
47         if (size < sizeof(*info))
48                 return -EINVAL;
49
50         *info = (struct rte_dma_info) {
51                         .dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
52                                 RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
53                         .max_vchans = 1,
54                         .max_desc = 4096,
55                         .min_desc = 64,
56         };
57         if (idxd->sva_support)
58                 info->dev_capa |= RTE_DMA_CAPA_SVA;
59         return 0;
60 }
61
62 int
63 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
64                 uint32_t conf_sz)
65 {
66         if (sizeof(struct rte_dma_conf) != conf_sz)
67                 return -EINVAL;
68
69         if (dev_conf->nb_vchans != 1)
70                 return -EINVAL;
71         return 0;
72 }
73
74 int
75 idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
76                 const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
77 {
78         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
79         uint16_t max_desc = qconf->nb_desc;
80
81         if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
82                 return -EINVAL;
83
84         idxd->qcfg = *qconf;
85
86         if (!rte_is_power_of_2(max_desc))
87                 max_desc = rte_align32pow2(max_desc);
88         IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
89         idxd->desc_ring_mask = max_desc - 1;
90         idxd->qcfg.nb_desc = max_desc;
91
92         /* in case we are reconfiguring a device, free any existing memory */
93         rte_free(idxd->desc_ring);
94
95         /* allocate the descriptor ring at 2x size as batches can't wrap */
96         idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
97         if (idxd->desc_ring == NULL)
98                 return -ENOMEM;
99         idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
100
101         idxd->batch_idx_read = 0;
102         idxd->batch_idx_write = 0;
103         idxd->batch_start = 0;
104         idxd->batch_size = 0;
105         idxd->ids_returned = 0;
106         idxd->ids_avail = 0;
107
108         memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
109                         (idxd->max_batches + 1));
110         return 0;
111 }
112
113 int
114 idxd_dmadev_create(const char *name, struct rte_device *dev,
115                    const struct idxd_dmadev *base_idxd,
116                    const struct rte_dma_dev_ops *ops)
117 {
118         struct idxd_dmadev *idxd = NULL;
119         struct rte_dma_dev *dmadev = NULL;
120         int ret = 0;
121
122         RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
123         RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
124         RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
125
126         if (!name) {
127                 IDXD_PMD_ERR("Invalid name of the device!");
128                 ret = -EINVAL;
129                 goto cleanup;
130         }
131
132         /* Allocate device structure */
133         dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
134         if (dmadev == NULL) {
135                 IDXD_PMD_ERR("Unable to allocate dma device");
136                 ret = -ENOMEM;
137                 goto cleanup;
138         }
139         dmadev->dev_ops = ops;
140         dmadev->device = dev;
141
142         idxd = dmadev->data->dev_private;
143         *idxd = *base_idxd; /* copy over the main fields already passed in */
144         idxd->dmadev = dmadev;
145
146         /* allocate batch index ring and completion ring.
147          * The +1 is because we can never fully use
148          * the ring, otherwise read == write means both full and empty.
149          */
150         idxd->batch_comp_ring = rte_zmalloc_socket(NULL, (sizeof(idxd->batch_idx_ring[0]) +
151                         sizeof(idxd->batch_comp_ring[0]))       * (idxd->max_batches + 1),
152                         sizeof(idxd->batch_comp_ring[0]), dev->numa_node);
153         if (idxd->batch_comp_ring == NULL) {
154                 IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
155                 ret = -ENOMEM;
156                 goto cleanup;
157         }
158         idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
159         idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
160
161         dmadev->fp_obj->dev_private = idxd;
162
163         idxd->dmadev->state = RTE_DMA_DEV_READY;
164
165         return 0;
166
167 cleanup:
168         if (dmadev)
169                 rte_dma_pmd_release(name);
170
171         return ret;
172 }
173
174 int idxd_pmd_logtype;
175
176 RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);