raw/ioat: support limiting queues for idxd PCI device
[dpdk.git] / drivers / raw / ioat / idxd_pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_memzone.h>
7 #include <rte_devargs.h>
8
9 #include "ioat_private.h"
10 #include "ioat_spec.h"
11
12 #define IDXD_VENDOR_ID          0x8086
13 #define IDXD_DEVICE_ID_SPR      0x0B25
14
15 #define IDXD_PMD_RAWDEV_NAME_PCI rawdev_idxd_pci
16
17 const struct rte_pci_id pci_id_idxd_map[] = {
18         { RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
19         { .vendor_id = 0, /* sentinel */ },
20 };
21
22 static inline int
23 idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
24 {
25         uint8_t err_code;
26         uint16_t qid = idxd->qid;
27         int i = 0;
28
29         if (command >= idxd_disable_wq && command <= idxd_reset_wq)
30                 qid = (1 << qid);
31         rte_spinlock_lock(&idxd->u.pci->lk);
32         idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
33
34         do {
35                 rte_pause();
36                 err_code = idxd->u.pci->regs->cmdstatus;
37                 if (++i >= 1000) {
38                         IOAT_PMD_ERR("Timeout waiting for command response from HW");
39                         rte_spinlock_unlock(&idxd->u.pci->lk);
40                         return err_code;
41                 }
42         } while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);
43         rte_spinlock_unlock(&idxd->u.pci->lk);
44
45         return err_code & CMDSTATUS_ERR_MASK;
46 }
47
48 static uint32_t *
49 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
50 {
51         return RTE_PTR_ADD(pci->wq_regs_base,
52                         (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
53 }
54
55 static int
56 idxd_is_wq_enabled(struct idxd_rawdev *idxd)
57 {
58         uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
59         return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
60 }
61
62 static void
63 idxd_pci_dev_stop(struct rte_rawdev *dev)
64 {
65         struct idxd_rawdev *idxd = dev->dev_private;
66         uint8_t err_code;
67
68         if (!idxd_is_wq_enabled(idxd)) {
69                 IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
70                 return;
71         }
72
73         err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
74         if (err_code || idxd_is_wq_enabled(idxd)) {
75                 IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
76                                 idxd->qid, err_code);
77                 return;
78         }
79         IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
80 }
81
82 static int
83 idxd_pci_dev_start(struct rte_rawdev *dev)
84 {
85         struct idxd_rawdev *idxd = dev->dev_private;
86         uint8_t err_code;
87
88         if (idxd_is_wq_enabled(idxd)) {
89                 IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
90                 return 0;
91         }
92
93         if (idxd->public.batch_ring == NULL) {
94                 IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
95                 return -EINVAL;
96         }
97
98         err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
99         if (err_code || !idxd_is_wq_enabled(idxd)) {
100                 IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
101                                 idxd->qid, err_code);
102                 return err_code == 0 ? -1 : err_code;
103         }
104
105         IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
106
107         return 0;
108 }
109
110 static const struct rte_rawdev_ops idxd_pci_ops = {
111                 .dev_close = idxd_rawdev_close,
112                 .dev_selftest = ioat_rawdev_test,
113                 .dump = idxd_dev_dump,
114                 .dev_configure = idxd_dev_configure,
115                 .dev_start = idxd_pci_dev_start,
116                 .dev_stop = idxd_pci_dev_stop,
117                 .dev_info_get = idxd_dev_info_get,
118                 .xstats_get = ioat_xstats_get,
119                 .xstats_get_names = ioat_xstats_get_names,
120                 .xstats_reset = ioat_xstats_reset,
121 };
122
123 /* each portal uses 4 x 4k pages */
124 #define IDXD_PORTAL_SIZE (4096 * 4)
125
126 static int
127 init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd,
128                 unsigned int max_queues)
129 {
130         struct idxd_pci_common *pci;
131         uint8_t nb_groups, nb_engines, nb_wqs;
132         uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
133         uint16_t wq_size, total_wq_size;
134         uint8_t lg2_max_batch, lg2_max_copy_size;
135         unsigned int i, err_code;
136
137         pci = malloc(sizeof(*pci));
138         if (pci == NULL) {
139                 IOAT_PMD_ERR("%s: Can't allocate memory", __func__);
140                 goto err;
141         }
142         rte_spinlock_init(&pci->lk);
143
144         /* assign the bar registers, and then configure device */
145         pci->regs = dev->mem_resource[0].addr;
146         grp_offset = (uint16_t)pci->regs->offsets[0];
147         pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
148         wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
149         pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
150         pci->portals = dev->mem_resource[2].addr;
151         pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
152
153         /* sanity check device status */
154         if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
155                 /* need function-level-reset (FLR) or is enabled */
156                 IOAT_PMD_ERR("Device status is not disabled, cannot init");
157                 goto err;
158         }
159         if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
160                 /* command in progress */
161                 IOAT_PMD_ERR("Device has a command in progress, cannot init");
162                 goto err;
163         }
164
165         /* read basic info about the hardware for use when configuring */
166         nb_groups = (uint8_t)pci->regs->grpcap;
167         nb_engines = (uint8_t)pci->regs->engcap;
168         nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
169         total_wq_size = (uint16_t)pci->regs->wqcap;
170         lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
171         lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
172
173         IOAT_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
174                         nb_groups, nb_engines, nb_wqs);
175
176         /* zero out any old config */
177         for (i = 0; i < nb_groups; i++) {
178                 pci->grp_regs[i].grpengcfg = 0;
179                 pci->grp_regs[i].grpwqcfg[0] = 0;
180         }
181         for (i = 0; i < nb_wqs; i++)
182                 idxd_get_wq_cfg(pci, i)[0] = 0;
183
184         /* limit queues if necessary */
185         if (max_queues != 0 && nb_wqs > max_queues) {
186                 nb_wqs = max_queues;
187                 if (nb_engines > max_queues)
188                         nb_engines = max_queues;
189                 if (nb_groups > max_queues)
190                         nb_engines = max_queues;
191                 IOAT_PMD_DEBUG("Limiting queues to %u", nb_wqs);
192         }
193
194         /* put each engine into a separate group to avoid reordering */
195         if (nb_groups > nb_engines)
196                 nb_groups = nb_engines;
197         if (nb_groups < nb_engines)
198                 nb_engines = nb_groups;
199
200         /* assign engines to groups, round-robin style */
201         for (i = 0; i < nb_engines; i++) {
202                 IOAT_PMD_DEBUG("Assigning engine %u to group %u",
203                                 i, i % nb_groups);
204                 pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
205         }
206
207         /* now do the same for queues and give work slots to each queue */
208         wq_size = total_wq_size / nb_wqs;
209         IOAT_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
210                         wq_size, lg2_max_batch, lg2_max_copy_size);
211         for (i = 0; i < nb_wqs; i++) {
212                 /* add engine "i" to a group */
213                 IOAT_PMD_DEBUG("Assigning work queue %u to group %u",
214                                 i, i % nb_groups);
215                 pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
216                 /* now configure it, in terms of size, max batch, mode */
217                 idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
218                 idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
219                                 WQ_MODE_DEDICATED;
220                 idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
221                                 (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
222         }
223
224         /* dump the group configuration to output */
225         for (i = 0; i < nb_groups; i++) {
226                 IOAT_PMD_DEBUG("## Group %d", i);
227                 IOAT_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
228                 IOAT_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
229                 IOAT_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
230         }
231
232         idxd->u.pci = pci;
233         idxd->max_batches = wq_size;
234
235         /* enable the device itself */
236         err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
237         if (err_code) {
238                 IOAT_PMD_ERR("Error enabling device: code %#x", err_code);
239                 return err_code;
240         }
241         IOAT_PMD_DEBUG("IDXD Device enabled OK");
242
243         return nb_wqs;
244
245 err:
246         free(pci);
247         return -1;
248 }
249
250 static int
251 idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
252 {
253         struct idxd_rawdev idxd = {{0}}; /* Double {} to avoid error on BSD12 */
254         uint8_t nb_wqs;
255         int qid, ret = 0;
256         char name[PCI_PRI_STR_SIZE];
257         unsigned int max_queues = 0;
258
259         rte_pci_device_name(&dev->addr, name, sizeof(name));
260         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
261         dev->device.driver = &drv->driver;
262
263         if (dev->device.devargs && dev->device.devargs->args[0] != '\0') {
264                 /* if the number of devargs grows beyond just 1, use rte_kvargs */
265                 if (sscanf(dev->device.devargs->args,
266                                 "max_queues=%u", &max_queues) != 1) {
267                         IOAT_PMD_ERR("Invalid device parameter: '%s'",
268                                         dev->device.devargs->args);
269                         return -1;
270                 }
271         }
272
273         ret = init_pci_device(dev, &idxd, max_queues);
274         if (ret < 0) {
275                 IOAT_PMD_ERR("Error initializing PCI hardware");
276                 return ret;
277         }
278         nb_wqs = (uint8_t)ret;
279
280         /* set up one device for each queue */
281         for (qid = 0; qid < nb_wqs; qid++) {
282                 char qname[32];
283
284                 /* add the queue number to each device name */
285                 snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
286                 idxd.qid = qid;
287                 idxd.public.portal = RTE_PTR_ADD(idxd.u.pci->portals,
288                                 qid * IDXD_PORTAL_SIZE);
289                 if (idxd_is_wq_enabled(&idxd))
290                         IOAT_PMD_ERR("Error, WQ %u seems enabled", qid);
291                 ret = idxd_rawdev_create(qname, &dev->device,
292                                 &idxd, &idxd_pci_ops);
293                 if (ret != 0) {
294                         IOAT_PMD_ERR("Failed to create rawdev %s", name);
295                         if (qid == 0) /* if no devices using this, free pci */
296                                 free(idxd.u.pci);
297                         return ret;
298                 }
299         }
300
301         return 0;
302 }
303
304 static int
305 idxd_rawdev_destroy(const char *name)
306 {
307         int ret;
308         uint8_t err_code;
309         struct rte_rawdev *rdev;
310         struct idxd_rawdev *idxd;
311
312         if (!name) {
313                 IOAT_PMD_ERR("Invalid device name");
314                 return -EINVAL;
315         }
316
317         rdev = rte_rawdev_pmd_get_named_dev(name);
318         if (!rdev) {
319                 IOAT_PMD_ERR("Invalid device name (%s)", name);
320                 return -EINVAL;
321         }
322
323         idxd = rdev->dev_private;
324         if (!idxd) {
325                 IOAT_PMD_ERR("Error getting dev_private");
326                 return -EINVAL;
327         }
328
329         /* disable the device */
330         err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
331         if (err_code) {
332                 IOAT_PMD_ERR("Error disabling device: code %#x", err_code);
333                 return err_code;
334         }
335         IOAT_PMD_DEBUG("IDXD Device disabled OK");
336
337         /* free device memory */
338         IOAT_PMD_DEBUG("Freeing device driver memory");
339         rdev->dev_private = NULL;
340         rte_free(idxd->public.batch_ring);
341         rte_free(idxd->public.hdl_ring);
342         rte_memzone_free(idxd->mz);
343
344         /* rte_rawdev_close is called by pmd_release */
345         ret = rte_rawdev_pmd_release(rdev);
346         if (ret)
347                 IOAT_PMD_DEBUG("Device cleanup failed");
348
349         return 0;
350 }
351
352 static int
353 idxd_rawdev_remove_pci(struct rte_pci_device *dev)
354 {
355         char name[PCI_PRI_STR_SIZE];
356         int ret = 0;
357
358         rte_pci_device_name(&dev->addr, name, sizeof(name));
359
360         IOAT_PMD_INFO("Closing %s on NUMA node %d",
361                         name, dev->device.numa_node);
362
363         ret = idxd_rawdev_destroy(name);
364
365         return ret;
366 }
367
368 struct rte_pci_driver idxd_pmd_drv_pci = {
369         .id_table = pci_id_idxd_map,
370         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
371         .probe = idxd_rawdev_probe_pci,
372         .remove = idxd_rawdev_remove_pci,
373 };
374
375 RTE_PMD_REGISTER_PCI(IDXD_PMD_RAWDEV_NAME_PCI, idxd_pmd_drv_pci);
376 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map);
377 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI,
378                           "* igb_uio | uio_pci_generic | vfio-pci");
379 RTE_PMD_REGISTER_PARAM_STRING(rawdev_idxd_pci, "max_queues=0");