vdpa/mlx5: add task ring for multi-thread management
[dpdk.git] / drivers / dma / idxd / idxd_pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_devargs.h>
7 #include <rte_dmadev_pmd.h>
8 #include <rte_malloc.h>
9
10 #include "idxd_internal.h"
11
12 #define IDXD_VENDOR_ID          0x8086
13 #define IDXD_DEVICE_ID_SPR      0x0B25
14
15 #define IDXD_PMD_DMADEV_NAME_PCI dmadev_idxd_pci
16
17 const struct rte_pci_id pci_id_idxd_map[] = {
18         { RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
19         { .vendor_id = 0, /* sentinel */ },
20 };
21
22 static inline int
23 idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
24 {
25         uint32_t err_code;
26         uint16_t qid = idxd->qid;
27         int i = 0;
28
29         if (command >= idxd_disable_wq && command <= idxd_reset_wq)
30                 qid = (1 << qid);
31         rte_spinlock_lock(&idxd->u.pci->lk);
32         idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
33
34         do {
35                 rte_pause();
36                 err_code = idxd->u.pci->regs->cmdstatus;
37                 if (++i >= 1000) {
38                         IDXD_PMD_ERR("Timeout waiting for command response from HW");
39                         rte_spinlock_unlock(&idxd->u.pci->lk);
40                         err_code &= CMDSTATUS_ERR_MASK;
41                         return err_code;
42                 }
43         } while (err_code & CMDSTATUS_ACTIVE_MASK);
44         rte_spinlock_unlock(&idxd->u.pci->lk);
45
46         err_code &= CMDSTATUS_ERR_MASK;
47         return err_code;
48 }
49
50 static uint32_t *
51 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
52 {
53         return RTE_PTR_ADD(pci->wq_regs_base,
54                         (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
55 }
56
57 static int
58 idxd_is_wq_enabled(struct idxd_dmadev *idxd)
59 {
60         uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[wq_state_idx];
61         return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
62 }
63
64 static int
65 idxd_pci_dev_stop(struct rte_dma_dev *dev)
66 {
67         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
68         uint8_t err_code;
69
70         if (!idxd_is_wq_enabled(idxd)) {
71                 IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
72                 return 0;
73         }
74
75         err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
76         if (err_code || idxd_is_wq_enabled(idxd)) {
77                 IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
78                                 idxd->qid, err_code);
79                 return err_code == 0 ? -1 : -err_code;
80         }
81         IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
82
83         return 0;
84 }
85
86 static int
87 idxd_pci_dev_start(struct rte_dma_dev *dev)
88 {
89         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
90         uint8_t err_code;
91
92         if (idxd_is_wq_enabled(idxd)) {
93                 IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
94                 return 0;
95         }
96
97         if (idxd->desc_ring == NULL) {
98                 IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
99                 return -EINVAL;
100         }
101
102         err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
103         if (err_code || !idxd_is_wq_enabled(idxd)) {
104                 IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
105                                 idxd->qid, err_code);
106                 return err_code == 0 ? -1 : -err_code;
107         }
108         IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
109
110         return 0;
111 }
112
113 static int
114 idxd_pci_dev_close(struct rte_dma_dev *dev)
115 {
116         struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
117         uint8_t err_code;
118
119         /* disable the device */
120         err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
121         if (err_code) {
122                 IDXD_PMD_ERR("Error disabling device: code %#x", err_code);
123                 return err_code;
124         }
125         IDXD_PMD_DEBUG("IDXD Device disabled OK");
126
127         /* free device memory */
128         IDXD_PMD_DEBUG("Freeing device driver memory");
129         rte_free(idxd->batch_idx_ring);
130         rte_free(idxd->desc_ring);
131
132         return 0;
133 }
134
135 static const struct rte_dma_dev_ops idxd_pci_ops = {
136         .dev_close = idxd_pci_dev_close,
137         .dev_dump = idxd_dump,
138         .dev_configure = idxd_configure,
139         .vchan_setup = idxd_vchan_setup,
140         .dev_info_get = idxd_info_get,
141         .stats_get = idxd_stats_get,
142         .stats_reset = idxd_stats_reset,
143         .dev_start = idxd_pci_dev_start,
144         .dev_stop = idxd_pci_dev_stop,
145         .vchan_status = idxd_vchan_status,
146 };
147
148 /* each portal uses 4 x 4k pages */
149 #define IDXD_PORTAL_SIZE (4096 * 4)
150
151 static int
152 init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
153                 unsigned int max_queues)
154 {
155         struct idxd_pci_common *pci;
156         uint8_t nb_groups, nb_engines, nb_wqs;
157         uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
158         uint16_t wq_size, total_wq_size;
159         uint8_t lg2_max_batch, lg2_max_copy_size;
160         unsigned int i, err_code;
161
162         pci = malloc(sizeof(*pci));
163         if (pci == NULL) {
164                 IDXD_PMD_ERR("%s: Can't allocate memory", __func__);
165                 err_code = -1;
166                 goto err;
167         }
168         rte_spinlock_init(&pci->lk);
169
170         /* assign the bar registers, and then configure device */
171         pci->regs = dev->mem_resource[0].addr;
172         grp_offset = (uint16_t)pci->regs->offsets[0];
173         pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
174         wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
175         pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
176         pci->portals = dev->mem_resource[2].addr;
177         pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
178
179         /* sanity check device status */
180         if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
181                 /* need function-level-reset (FLR) or is enabled */
182                 IDXD_PMD_ERR("Device status is not disabled, cannot init");
183                 err_code = -1;
184                 goto err;
185         }
186         if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
187                 /* command in progress */
188                 IDXD_PMD_ERR("Device has a command in progress, cannot init");
189                 err_code = -1;
190                 goto err;
191         }
192
193         /* read basic info about the hardware for use when configuring */
194         nb_groups = (uint8_t)pci->regs->grpcap;
195         nb_engines = (uint8_t)pci->regs->engcap;
196         nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
197         total_wq_size = (uint16_t)pci->regs->wqcap;
198         lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
199         lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
200
201         IDXD_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
202                         nb_groups, nb_engines, nb_wqs);
203
204         /* zero out any old config */
205         for (i = 0; i < nb_groups; i++) {
206                 pci->grp_regs[i].grpengcfg = 0;
207                 pci->grp_regs[i].grpwqcfg[0] = 0;
208         }
209         for (i = 0; i < nb_wqs; i++)
210                 idxd_get_wq_cfg(pci, i)[0] = 0;
211
212         /* limit queues if necessary */
213         if (max_queues != 0 && nb_wqs > max_queues) {
214                 nb_wqs = max_queues;
215                 if (nb_engines > max_queues)
216                         nb_engines = max_queues;
217                 if (nb_groups > max_queues)
218                         nb_engines = max_queues;
219                 IDXD_PMD_DEBUG("Limiting queues to %u", nb_wqs);
220         }
221
222         /* put each engine into a separate group to avoid reordering */
223         if (nb_groups > nb_engines)
224                 nb_groups = nb_engines;
225         if (nb_groups < nb_engines)
226                 nb_engines = nb_groups;
227
228         /* assign engines to groups, round-robin style */
229         for (i = 0; i < nb_engines; i++) {
230                 IDXD_PMD_DEBUG("Assigning engine %u to group %u",
231                                 i, i % nb_groups);
232                 pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
233         }
234
235         /* now do the same for queues and give work slots to each queue */
236         wq_size = total_wq_size / nb_wqs;
237         IDXD_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
238                         wq_size, lg2_max_batch, lg2_max_copy_size);
239         for (i = 0; i < nb_wqs; i++) {
240                 /* add engine "i" to a group */
241                 IDXD_PMD_DEBUG("Assigning work queue %u to group %u",
242                                 i, i % nb_groups);
243                 pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
244                 /* now configure it, in terms of size, max batch, mode */
245                 idxd_get_wq_cfg(pci, i)[wq_size_idx] = wq_size;
246                 idxd_get_wq_cfg(pci, i)[wq_mode_idx] = (1 << WQ_PRIORITY_SHIFT) |
247                                 WQ_MODE_DEDICATED;
248                 idxd_get_wq_cfg(pci, i)[wq_sizes_idx] = lg2_max_copy_size |
249                                 (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
250         }
251
252         /* dump the group configuration to output */
253         for (i = 0; i < nb_groups; i++) {
254                 IDXD_PMD_DEBUG("## Group %d", i);
255                 IDXD_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
256                 IDXD_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
257                 IDXD_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
258         }
259
260         idxd->u.pci = pci;
261         idxd->max_batches = wq_size;
262         idxd->max_batch_size = 1 << lg2_max_batch;
263
264         /* enable the device itself */
265         err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
266         if (err_code) {
267                 IDXD_PMD_ERR("Error enabling device: code %#x", err_code);
268                 goto err;
269         }
270         IDXD_PMD_DEBUG("IDXD Device enabled OK");
271
272         return nb_wqs;
273
274 err:
275         free(pci);
276         return err_code;
277 }
278
279 static int
280 idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
281 {
282         struct idxd_dmadev idxd = {0};
283         uint8_t nb_wqs;
284         int qid, ret = 0;
285         char name[PCI_PRI_STR_SIZE];
286         unsigned int max_queues = 0;
287
288         rte_pci_device_name(&dev->addr, name, sizeof(name));
289         IDXD_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
290         dev->device.driver = &drv->driver;
291
292         if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
293                 /* if the number of devargs grows beyond just 1, use rte_kvargs */
294                 if (sscanf(dev->device.devargs->args,
295                                 "max_queues=%u", &max_queues) != 1) {
296                         IDXD_PMD_ERR("Invalid device parameter: '%s'",
297                                         dev->device.devargs->args);
298                         return -1;
299                 }
300         }
301
302         ret = init_pci_device(dev, &idxd, max_queues);
303         if (ret < 0) {
304                 IDXD_PMD_ERR("Error initializing PCI hardware");
305                 return ret;
306         }
307         if (idxd.u.pci->portals == NULL) {
308                 IDXD_PMD_ERR("Error, invalid portal assigned during initialization\n");
309                 free(idxd.u.pci);
310                 return -EINVAL;
311         }
312         nb_wqs = (uint8_t)ret;
313
314         /* set up one device for each queue */
315         for (qid = 0; qid < nb_wqs; qid++) {
316                 char qname[32];
317
318                 /* add the queue number to each device name */
319                 snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
320                 idxd.qid = qid;
321                 idxd.portal = RTE_PTR_ADD(idxd.u.pci->portals,
322                                 qid * IDXD_PORTAL_SIZE);
323                 if (idxd_is_wq_enabled(&idxd))
324                         IDXD_PMD_ERR("Error, WQ %u seems enabled", qid);
325                 ret = idxd_dmadev_create(qname, &dev->device,
326                                 &idxd, &idxd_pci_ops);
327                 if (ret != 0) {
328                         IDXD_PMD_ERR("Failed to create dmadev %s", name);
329                         if (qid == 0) /* if no devices using this, free pci */
330                                 free(idxd.u.pci);
331                         return ret;
332                 }
333         }
334
335         return 0;
336 }
337
338 static int
339 idxd_dmadev_destroy(const char *name)
340 {
341         int ret = 0;
342
343         /* rte_dma_close is called by pmd_release */
344         ret = rte_dma_pmd_release(name);
345         if (ret)
346                 IDXD_PMD_DEBUG("Device cleanup failed");
347
348         return ret;
349 }
350
351 static int
352 idxd_dmadev_remove_pci(struct rte_pci_device *dev)
353 {
354         int i = 0;
355         char name[PCI_PRI_STR_SIZE];
356
357         rte_pci_device_name(&dev->addr, name, sizeof(name));
358
359         IDXD_PMD_INFO("Closing %s on NUMA node %d", name, dev->device.numa_node);
360
361         RTE_DMA_FOREACH_DEV(i) {
362                 struct rte_dma_info *info = {0};
363                 rte_dma_info_get(i, info);
364                 if (strncmp(name, info->dev_name, strlen(name)) == 0)
365                         idxd_dmadev_destroy(info->dev_name);
366         }
367
368         return 0;
369 }
370
371 struct rte_pci_driver idxd_pmd_drv_pci = {
372         .id_table = pci_id_idxd_map,
373         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
374         .probe = idxd_dmadev_probe_pci,
375         .remove = idxd_dmadev_remove_pci,
376 };
377
378 RTE_PMD_REGISTER_PCI(IDXD_PMD_DMADEV_NAME_PCI, idxd_pmd_drv_pci);
379 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_DMADEV_NAME_PCI, pci_id_idxd_map);
380 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_DMADEV_NAME_PCI, "vfio-pci");
381 RTE_PMD_REGISTER_PARAM_STRING(dmadev_idxd_pci, "max_queues=0");