net/mlx5: enforce limitation on IPv6 next protocol
[dpdk.git] / drivers / raw / ioat / idxd_pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <rte_bus_pci.h>
6 #include <rte_memzone.h>
7
8 #include "ioat_private.h"
9 #include "ioat_spec.h"
10
11 #define IDXD_VENDOR_ID          0x8086
12 #define IDXD_DEVICE_ID_SPR      0x0B25
13
14 #define IDXD_PMD_RAWDEV_NAME_PCI rawdev_idxd_pci
15
16 const struct rte_pci_id pci_id_idxd_map[] = {
17         { RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
18         { .vendor_id = 0, /* sentinel */ },
19 };
20
21 static inline int
22 idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
23 {
24         uint8_t err_code;
25         uint16_t qid = idxd->qid;
26         int i = 0;
27
28         if (command >= idxd_disable_wq && command <= idxd_reset_wq)
29                 qid = (1 << qid);
30         rte_spinlock_lock(&idxd->u.pci->lk);
31         idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
32
33         do {
34                 rte_pause();
35                 err_code = idxd->u.pci->regs->cmdstatus;
36                 if (++i >= 1000) {
37                         IOAT_PMD_ERR("Timeout waiting for command response from HW");
38                         rte_spinlock_unlock(&idxd->u.pci->lk);
39                         return err_code;
40                 }
41         } while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);
42         rte_spinlock_unlock(&idxd->u.pci->lk);
43
44         return err_code & CMDSTATUS_ERR_MASK;
45 }
46
47 static uint32_t *
48 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
49 {
50         return RTE_PTR_ADD(pci->wq_regs_base, wq_idx << (5 + pci->wq_cfg_sz));
51 }
52
53 static int
54 idxd_is_wq_enabled(struct idxd_rawdev *idxd)
55 {
56         uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
57         return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
58 }
59
60 static void
61 idxd_pci_dev_stop(struct rte_rawdev *dev)
62 {
63         struct idxd_rawdev *idxd = dev->dev_private;
64         uint8_t err_code;
65
66         if (!idxd_is_wq_enabled(idxd)) {
67                 IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
68                 return;
69         }
70
71         err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
72         if (err_code || idxd_is_wq_enabled(idxd)) {
73                 IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
74                                 idxd->qid, err_code);
75                 return;
76         }
77         IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
78 }
79
80 static int
81 idxd_pci_dev_start(struct rte_rawdev *dev)
82 {
83         struct idxd_rawdev *idxd = dev->dev_private;
84         uint8_t err_code;
85
86         if (idxd_is_wq_enabled(idxd)) {
87                 IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
88                 return 0;
89         }
90
91         if (idxd->public.batch_ring == NULL) {
92                 IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
93                 return -EINVAL;
94         }
95
96         err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
97         if (err_code || !idxd_is_wq_enabled(idxd)) {
98                 IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
99                                 idxd->qid, err_code);
100                 return err_code == 0 ? -1 : err_code;
101         }
102
103         IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
104
105         return 0;
106 }
107
108 static const struct rte_rawdev_ops idxd_pci_ops = {
109                 .dev_close = idxd_rawdev_close,
110                 .dev_selftest = ioat_rawdev_test,
111                 .dump = idxd_dev_dump,
112                 .dev_configure = idxd_dev_configure,
113                 .dev_start = idxd_pci_dev_start,
114                 .dev_stop = idxd_pci_dev_stop,
115                 .dev_info_get = idxd_dev_info_get,
116                 .xstats_get = ioat_xstats_get,
117                 .xstats_get_names = ioat_xstats_get_names,
118                 .xstats_reset = ioat_xstats_reset,
119 };
120
121 /* each portal uses 4 x 4k pages */
122 #define IDXD_PORTAL_SIZE (4096 * 4)
123
124 static int
125 init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
126 {
127         struct idxd_pci_common *pci;
128         uint8_t nb_groups, nb_engines, nb_wqs;
129         uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
130         uint16_t wq_size, total_wq_size;
131         uint8_t lg2_max_batch, lg2_max_copy_size;
132         unsigned int i, err_code;
133
134         pci = malloc(sizeof(*pci));
135         if (pci == NULL) {
136                 IOAT_PMD_ERR("%s: Can't allocate memory", __func__);
137                 goto err;
138         }
139         rte_spinlock_init(&pci->lk);
140
141         /* assign the bar registers, and then configure device */
142         pci->regs = dev->mem_resource[0].addr;
143         grp_offset = (uint16_t)pci->regs->offsets[0];
144         pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
145         wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
146         pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
147         pci->portals = dev->mem_resource[2].addr;
148         pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
149
150         /* sanity check device status */
151         if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
152                 /* need function-level-reset (FLR) or is enabled */
153                 IOAT_PMD_ERR("Device status is not disabled, cannot init");
154                 goto err;
155         }
156         if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
157                 /* command in progress */
158                 IOAT_PMD_ERR("Device has a command in progress, cannot init");
159                 goto err;
160         }
161
162         /* read basic info about the hardware for use when configuring */
163         nb_groups = (uint8_t)pci->regs->grpcap;
164         nb_engines = (uint8_t)pci->regs->engcap;
165         nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
166         total_wq_size = (uint16_t)pci->regs->wqcap;
167         lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
168         lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
169
170         IOAT_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
171                         nb_groups, nb_engines, nb_wqs);
172
173         /* zero out any old config */
174         for (i = 0; i < nb_groups; i++) {
175                 pci->grp_regs[i].grpengcfg = 0;
176                 pci->grp_regs[i].grpwqcfg[0] = 0;
177         }
178         for (i = 0; i < nb_wqs; i++)
179                 idxd_get_wq_cfg(pci, i)[0] = 0;
180
181         /* put each engine into a separate group to avoid reordering */
182         if (nb_groups > nb_engines)
183                 nb_groups = nb_engines;
184         if (nb_groups < nb_engines)
185                 nb_engines = nb_groups;
186
187         /* assign engines to groups, round-robin style */
188         for (i = 0; i < nb_engines; i++) {
189                 IOAT_PMD_DEBUG("Assigning engine %u to group %u",
190                                 i, i % nb_groups);
191                 pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
192         }
193
194         /* now do the same for queues and give work slots to each queue */
195         wq_size = total_wq_size / nb_wqs;
196         IOAT_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
197                         wq_size, lg2_max_batch, lg2_max_copy_size);
198         for (i = 0; i < nb_wqs; i++) {
199                 /* add engine "i" to a group */
200                 IOAT_PMD_DEBUG("Assigning work queue %u to group %u",
201                                 i, i % nb_groups);
202                 pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
203                 /* now configure it, in terms of size, max batch, mode */
204                 idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
205                 idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
206                                 WQ_MODE_DEDICATED;
207                 idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
208                                 (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
209         }
210
211         /* dump the group configuration to output */
212         for (i = 0; i < nb_groups; i++) {
213                 IOAT_PMD_DEBUG("## Group %d", i);
214                 IOAT_PMD_DEBUG("    GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
215                 IOAT_PMD_DEBUG("    GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
216                 IOAT_PMD_DEBUG("    GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
217         }
218
219         idxd->u.pci = pci;
220         idxd->max_batches = wq_size;
221
222         /* enable the device itself */
223         err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
224         if (err_code) {
225                 IOAT_PMD_ERR("Error enabling device: code %#x", err_code);
226                 return err_code;
227         }
228         IOAT_PMD_DEBUG("IDXD Device enabled OK");
229
230         return nb_wqs;
231
232 err:
233         free(pci);
234         return -1;
235 }
236
237 static int
238 idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
239 {
240         struct idxd_rawdev idxd = {{0}}; /* Double {} to avoid error on BSD12 */
241         uint8_t nb_wqs;
242         int qid, ret = 0;
243         char name[PCI_PRI_STR_SIZE];
244
245         rte_pci_device_name(&dev->addr, name, sizeof(name));
246         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
247         dev->device.driver = &drv->driver;
248
249         ret = init_pci_device(dev, &idxd);
250         if (ret < 0) {
251                 IOAT_PMD_ERR("Error initializing PCI hardware");
252                 return ret;
253         }
254         nb_wqs = (uint8_t)ret;
255
256         /* set up one device for each queue */
257         for (qid = 0; qid < nb_wqs; qid++) {
258                 char qname[32];
259
260                 /* add the queue number to each device name */
261                 snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
262                 idxd.qid = qid;
263                 idxd.public.portal = RTE_PTR_ADD(idxd.u.pci->portals,
264                                 qid * IDXD_PORTAL_SIZE);
265                 if (idxd_is_wq_enabled(&idxd))
266                         IOAT_PMD_ERR("Error, WQ %u seems enabled", qid);
267                 ret = idxd_rawdev_create(qname, &dev->device,
268                                 &idxd, &idxd_pci_ops);
269                 if (ret != 0) {
270                         IOAT_PMD_ERR("Failed to create rawdev %s", name);
271                         if (qid == 0) /* if no devices using this, free pci */
272                                 free(idxd.u.pci);
273                         return ret;
274                 }
275         }
276
277         return 0;
278 }
279
280 static int
281 idxd_rawdev_destroy(const char *name)
282 {
283         int ret;
284         uint8_t err_code;
285         struct rte_rawdev *rdev;
286         struct idxd_rawdev *idxd;
287
288         if (!name) {
289                 IOAT_PMD_ERR("Invalid device name");
290                 return -EINVAL;
291         }
292
293         rdev = rte_rawdev_pmd_get_named_dev(name);
294         if (!rdev) {
295                 IOAT_PMD_ERR("Invalid device name (%s)", name);
296                 return -EINVAL;
297         }
298
299         idxd = rdev->dev_private;
300         if (!idxd) {
301                 IOAT_PMD_ERR("Error getting dev_private");
302                 return -EINVAL;
303         }
304
305         /* disable the device */
306         err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
307         if (err_code) {
308                 IOAT_PMD_ERR("Error disabling device: code %#x", err_code);
309                 return err_code;
310         }
311         IOAT_PMD_DEBUG("IDXD Device disabled OK");
312
313         /* free device memory */
314         IOAT_PMD_DEBUG("Freeing device driver memory");
315         rdev->dev_private = NULL;
316         rte_free(idxd->public.batch_ring);
317         rte_free(idxd->public.hdl_ring);
318         rte_memzone_free(idxd->mz);
319
320         /* rte_rawdev_close is called by pmd_release */
321         ret = rte_rawdev_pmd_release(rdev);
322         if (ret)
323                 IOAT_PMD_DEBUG("Device cleanup failed");
324
325         return 0;
326 }
327
328 static int
329 idxd_rawdev_remove_pci(struct rte_pci_device *dev)
330 {
331         char name[PCI_PRI_STR_SIZE];
332         int ret = 0;
333
334         rte_pci_device_name(&dev->addr, name, sizeof(name));
335
336         IOAT_PMD_INFO("Closing %s on NUMA node %d",
337                         name, dev->device.numa_node);
338
339         ret = idxd_rawdev_destroy(name);
340
341         return ret;
342 }
343
344 struct rte_pci_driver idxd_pmd_drv_pci = {
345         .id_table = pci_id_idxd_map,
346         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
347         .probe = idxd_rawdev_probe_pci,
348         .remove = idxd_rawdev_remove_pci,
349 };
350
351 RTE_PMD_REGISTER_PCI(IDXD_PMD_RAWDEV_NAME_PCI, idxd_pmd_drv_pci);
352 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map);
353 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI,
354                           "* igb_uio | uio_pci_generic | vfio-pci");