1aabacee412323a4a304e56108fdfd52736475e0
[dpdk.git] / drivers / dma / idxd / idxd_pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_devargs.h>
7 #include <rte_dmadev_pmd.h>
8 #include <rte_malloc.h>
9
10 #include "idxd_internal.h"
11
12 #define IDXD_VENDOR_ID          0x8086
13 #define IDXD_DEVICE_ID_SPR      0x0B25
14
15 #define IDXD_PMD_DMADEV_NAME_PCI dmadev_idxd_pci
16
17 const struct rte_pci_id pci_id_idxd_map[] = {
18         { RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
19         { .vendor_id = 0, /* sentinel */ },
20 };
21
22 static inline int
23 idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
24 {
25         uint8_t err_code;
26         uint16_t qid = idxd->qid;
27         int i = 0;
28
29         if (command >= idxd_disable_wq && command <= idxd_reset_wq)
30                 qid = (1 << qid);
31         rte_spinlock_lock(&idxd->u.pci->lk);
32         idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
33
34         do {
35                 rte_pause();
36                 err_code = idxd->u.pci->regs->cmdstatus;
37                 if (++i >= 1000) {
38                         IDXD_PMD_ERR("Timeout waiting for command response from HW");
39                         rte_spinlock_unlock(&idxd->u.pci->lk);
40                         return err_code;
41                 }
42         } while (err_code & CMDSTATUS_ACTIVE_MASK);
43         rte_spinlock_unlock(&idxd->u.pci->lk);
44
45         err_code &= CMDSTATUS_ERR_MASK;
46         return -err_code;
47 }
48
49 static uint32_t *
50 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
51 {
52         return RTE_PTR_ADD(pci->wq_regs_base,
53                         (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
54 }
55
56 static int
57 idxd_is_wq_enabled(struct idxd_dmadev *idxd)
58 {
59         uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[wq_state_idx];
60         return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
61 }
62
63 static int
64 idxd_pci_dev_stop(struct rte_dma_dev *dev)
65 {
66         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
67         uint8_t err_code;
68
69         if (!idxd_is_wq_enabled(idxd)) {
70                 IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
71                 return 0;
72         }
73
74         err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
75         if (err_code || idxd_is_wq_enabled(idxd)) {
76                 IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
77                                 idxd->qid, err_code);
78                 return err_code == 0 ? -1 : -err_code;
79         }
80         IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
81
82         return 0;
83 }
84
85 static int
86 idxd_pci_dev_start(struct rte_dma_dev *dev)
87 {
88         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
89         uint8_t err_code;
90
91         if (idxd_is_wq_enabled(idxd)) {
92                 IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
93                 return 0;
94         }
95
96         if (idxd->desc_ring == NULL) {
97                 IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
98                 return -EINVAL;
99         }
100
101         err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
102         if (err_code || !idxd_is_wq_enabled(idxd)) {
103                 IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
104                                 idxd->qid, err_code);
105                 return err_code == 0 ? -1 : -err_code;
106         }
107         IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
108
109         return 0;
110 }
111
112 static int
113 idxd_pci_dev_close(struct rte_dma_dev *dev)
114 {
115         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
116         uint8_t err_code;
117
118         /* disable the device */
119         err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
120         if (err_code) {
121                 IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
122                 return err_code;
123         }
124         IDXD_PMD_DEBUG("IDXD Device disabled OK");
125
126         /* free device memory */
127         IDXD_PMD_DEBUG("Freeing device driver memory");
128         rte_free(idxd->batch_idx_ring);
129         rte_free(idxd->desc_ring);
130
131         return 0;
132 }
133
134 static const struct rte_dma_dev_ops idxd_pci_ops = {
135         .dev_close = idxd_pci_dev_close,
136         .dev_dump = idxd_dump,
137         .dev_configure = idxd_configure,
138         .vchan_setup = idxd_vchan_setup,
139         .dev_info_get = idxd_info_get,
140         .stats_get = idxd_stats_get,
141         .stats_reset = idxd_stats_reset,
142         .dev_start = idxd_pci_dev_start,
143         .dev_stop = idxd_pci_dev_stop,
144         .vchan_status = idxd_vchan_status,
145 };
146
147 /* each portal uses 4 x 4k pages */
148 #define IDXD_PORTAL_SIZE (4096 * 4)
149
150 static int
151 init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
152                 unsigned int max_queues)
153 {
154         struct idxd_pci_common *pci;
155         uint8_t nb_groups, nb_engines, nb_wqs;
156         uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
157         uint16_t wq_size, total_wq_size;
158         uint8_t lg2_max_batch, lg2_max_copy_size;
159         unsigned int i, err_code;
160
161         pci = malloc(sizeof(*pci));
162         if (pci == NULL) {
163                 IDXD_PMD_ERR("%s: Can't allocate memory", __func__);
164                 err_code = -1;
165                 goto err;
166         }
167         rte_spinlock_init(&pci->lk);
168
169         /* assign the bar registers, and then configure device */
170         pci->regs = dev->mem_resource[0].addr;
171         grp_offset = (uint16_t)pci->regs->offsets[0];
172         pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
173         wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
174         pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
175         pci->portals = dev->mem_resource[2].addr;
176         pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
177
178         /* sanity check device status */
179         if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
180                 /* need function-level-reset (FLR) or is enabled */
181                 IDXD_PMD_ERR("Device status is not disabled, cannot init");
182                 err_code = -1;
183                 goto err;
184         }
185         if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
186                 /* command in progress */
187                 IDXD_PMD_ERR("Device has a command in progress, cannot init");
188                 err_code = -1;
189                 goto err;
190         }
191
192         /* read basic info about the hardware for use when configuring */
193         nb_groups = (uint8_t)pci->regs->grpcap;
194         nb_engines = (uint8_t)pci->regs->engcap;
195         nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
196         total_wq_size = (uint16_t)pci->regs->wqcap;
197         lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
198         lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
199
200         IDXD_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
201                         nb_groups, nb_engines, nb_wqs);
202
203         /* zero out any old config */
204         for (i = 0; i < nb_groups; i++) {
205                 pci->grp_regs[i].grpengcfg = 0;
206                 pci->grp_regs[i].grpwqcfg[0] = 0;
207         }
208         for (i = 0; i < nb_wqs; i++)
209                 idxd_get_wq_cfg(pci, i)[0] = 0;
210
211         /* limit queues if necessary */
212         if (max_queues != 0 && nb_wqs > max_queues) {
213                 nb_wqs = max_queues;
214                 if (nb_engines > max_queues)
215                         nb_engines = max_queues;
216                 if (nb_groups > max_queues)
217                         nb_engines = max_queues;
218                 IDXD_PMD_DEBUG("Limiting queues to %u", nb_wqs);
219         }
220
221         /* put each engine into a separate group to avoid reordering */
222         if (nb_groups > nb_engines)
223                 nb_groups = nb_engines;
224         if (nb_groups < nb_engines)
225                 nb_engines = nb_groups;
226
227         /* assign engines to groups, round-robin style */
228         for (i = 0; i < nb_engines; i++) {
229                 IDXD_PMD_DEBUG("Assigning engine %u to group %u",
230                                 i, i % nb_groups);
231                 pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
232         }
233
234         /* now do the same for queues and give work slots to each queue */
235         wq_size = total_wq_size / nb_wqs;
236         IDXD_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
237                         wq_size, lg2_max_batch, lg2_max_copy_size);
238         for (i = 0; i < nb_wqs; i++) {
239                 /* add engine "i" to a group */
240                 IDXD_PMD_DEBUG("Assigning work queue %u to group %u",
241                                 i, i % nb_groups);
242                 pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
243                 /* now configure it, in terms of size, max batch, mode */
244                 idxd_get_wq_cfg(pci, i)[wq_size_idx] = wq_size;
245                 idxd_get_wq_cfg(pci, i)[wq_mode_idx] = (1 << WQ_PRIORITY_SHIFT) |
246                                 WQ_MODE_DEDICATED;
247                 idxd_get_wq_cfg(pci, i)[wq_sizes_idx] = lg2_max_copy_size |
248                                 (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
249         }
250
251         /* dump the group configuration to output */
252         for (i = 0; i < nb_groups; i++) {
253                 IDXD_PMD_DEBUG("## Group %d", i);
254                 IDXD_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
255                 IDXD_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
256                 IDXD_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
257         }
258
259         idxd->u.pci = pci;
260         idxd->max_batches = wq_size;
261         idxd->max_batch_size = 1 << lg2_max_batch;
262
263         /* enable the device itself */
264         err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
265         if (err_code) {
266                 IDXD_PMD_ERR("Error enabling device: code %#x", err_code);
267                 goto err;
268         }
269         IDXD_PMD_DEBUG("IDXD Device enabled OK");
270
271         return nb_wqs;
272
273 err:
274         free(pci);
275         return err_code;
276 }
277
278 static int
279 idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
280 {
281         struct idxd_dmadev idxd = {0};
282         uint8_t nb_wqs;
283         int qid, ret = 0;
284         char name[PCI_PRI_STR_SIZE];
285         unsigned int max_queues = 0;
286
287         rte_pci_device_name(&dev->addr, name, sizeof(name));
288         IDXD_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
289         dev->device.driver = &drv->driver;
290
291         if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
292                 /* if the number of devargs grows beyond just 1, use rte_kvargs */
293                 if (sscanf(dev->device.devargs->args,
294                                 "max_queues=%u", &max_queues) != 1) {
295                         IDXD_PMD_ERR("Invalid device parameter: '%s'",
296                                         dev->device.devargs->args);
297                         return -1;
298                 }
299         }
300
301         ret = init_pci_device(dev, &idxd, max_queues);
302         if (ret < 0) {
303                 IDXD_PMD_ERR("Error initializing PCI hardware");
304                 return ret;
305         }
306         if (idxd.u.pci->portals == NULL) {
307                 IDXD_PMD_ERR("Error, invalid portal assigned during initialization\n");
308                 free(idxd.u.pci);
309                 return -EINVAL;
310         }
311         nb_wqs = (uint8_t)ret;
312
313         /* set up one device for each queue */
314         for (qid = 0; qid < nb_wqs; qid++) {
315                 char qname[32];
316
317                 /* add the queue number to each device name */
318                 snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
319                 idxd.qid = qid;
320                 idxd.portal = RTE_PTR_ADD(idxd.u.pci->portals,
321                                 qid * IDXD_PORTAL_SIZE);
322                 if (idxd_is_wq_enabled(&idxd))
323                         IDXD_PMD_ERR("Error, WQ %u seems enabled", qid);
324                 ret = idxd_dmadev_create(qname, &dev->device,
325                                 &idxd, &idxd_pci_ops);
326                 if (ret != 0) {
327                         IDXD_PMD_ERR("Failed to create dmadev %s", name);
328                         if (qid == 0) /* if no devices using this, free pci */
329                                 free(idxd.u.pci);
330                         return ret;
331                 }
332         }
333
334         return 0;
335 }
336
337 static int
338 idxd_dmadev_destroy(const char *name)
339 {
340         int ret = 0;
341
342         /* rte_dma_close is called by pmd_release */
343         ret = rte_dma_pmd_release(name);
344         if (ret)
345                 IDXD_PMD_DEBUG("Device cleanup failed");
346
347         return ret;
348 }
349
350 static int
351 idxd_dmadev_remove_pci(struct rte_pci_device *dev)
352 {
353         int i = 0;
354         char name[PCI_PRI_STR_SIZE];
355
356         rte_pci_device_name(&dev->addr, name, sizeof(name));
357
358         IDXD_PMD_INFO("Closing %s on NUMA node %d", name, dev->device.numa_node);
359
360         RTE_DMA_FOREACH_DEV(i) {
361                 struct rte_dma_info *info = {0};
362                 rte_dma_info_get(i, info);
363                 if (strncmp(name, info->dev_name, strlen(name)) == 0)
364                         idxd_dmadev_destroy(info->dev_name);
365         }
366
367         return 0;
368 }
369
370 struct rte_pci_driver idxd_pmd_drv_pci = {
371         .id_table = pci_id_idxd_map,
372         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
373         .probe = idxd_dmadev_probe_pci,
374         .remove = idxd_dmadev_remove_pci,
375 };
376
377 RTE_PMD_REGISTER_PCI(IDXD_PMD_DMADEV_NAME_PCI, idxd_pmd_drv_pci);
378 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_DMADEV_NAME_PCI, pci_id_idxd_map);
379 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_DMADEV_NAME_PCI, "vfio-pci");
380 RTE_PMD_REGISTER_PARAM_STRING(dmadev_idxd_pci, "max_queues=0");