1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_bus_pci.h>
6 #include <rte_memzone.h>
8 #include "ioat_private.h"
11 #define IDXD_VENDOR_ID 0x8086
12 #define IDXD_DEVICE_ID_SPR 0x0B25
14 #define IDXD_PMD_RAWDEV_NAME_PCI rawdev_idxd_pci
16 const struct rte_pci_id pci_id_idxd_map[] = {
17 { RTE_PCI_DEVICE(IDXD_VENDOR_ID, IDXD_DEVICE_ID_SPR) },
18 { .vendor_id = 0, /* sentinel */ },
22 idxd_pci_dev_command(struct idxd_rawdev *idxd, enum rte_idxd_cmds command)
25 uint16_t qid = idxd->qid;
28 if (command >= idxd_disable_wq && command <= idxd_reset_wq)
30 rte_spinlock_lock(&idxd->u.pci->lk);
31 idxd->u.pci->regs->cmd = (command << IDXD_CMD_SHIFT) | qid;
35 err_code = idxd->u.pci->regs->cmdstatus;
37 IOAT_PMD_ERR("Timeout waiting for command response from HW");
38 rte_spinlock_unlock(&idxd->u.pci->lk);
41 } while (idxd->u.pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK);
42 rte_spinlock_unlock(&idxd->u.pci->lk);
44 return err_code & CMDSTATUS_ERR_MASK;
48 idxd_get_wq_cfg(struct idxd_pci_common *pci, uint8_t wq_idx)
50 return RTE_PTR_ADD(pci->wq_regs_base,
51 (uintptr_t)wq_idx << (5 + pci->wq_cfg_sz));
55 idxd_is_wq_enabled(struct idxd_rawdev *idxd)
57 uint32_t state = idxd_get_wq_cfg(idxd->u.pci, idxd->qid)[WQ_STATE_IDX];
58 return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
62 idxd_pci_dev_stop(struct rte_rawdev *dev)
64 struct idxd_rawdev *idxd = dev->dev_private;
67 if (!idxd_is_wq_enabled(idxd)) {
68 IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid);
72 err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
73 if (err_code || idxd_is_wq_enabled(idxd)) {
74 IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x",
78 IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
82 idxd_pci_dev_start(struct rte_rawdev *dev)
84 struct idxd_rawdev *idxd = dev->dev_private;
87 if (idxd_is_wq_enabled(idxd)) {
88 IOAT_PMD_WARN("WQ %d already enabled", idxd->qid);
92 if (idxd->public.batch_ring == NULL) {
93 IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
97 err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
98 if (err_code || !idxd_is_wq_enabled(idxd)) {
99 IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x",
100 idxd->qid, err_code);
101 return err_code == 0 ? -1 : err_code;
104 IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
109 static const struct rte_rawdev_ops idxd_pci_ops = {
110 .dev_close = idxd_rawdev_close,
111 .dev_selftest = ioat_rawdev_test,
112 .dump = idxd_dev_dump,
113 .dev_configure = idxd_dev_configure,
114 .dev_start = idxd_pci_dev_start,
115 .dev_stop = idxd_pci_dev_stop,
116 .dev_info_get = idxd_dev_info_get,
117 .xstats_get = ioat_xstats_get,
118 .xstats_get_names = ioat_xstats_get_names,
119 .xstats_reset = ioat_xstats_reset,
122 /* each portal uses 4 x 4k pages */
123 #define IDXD_PORTAL_SIZE (4096 * 4)
126 init_pci_device(struct rte_pci_device *dev, struct idxd_rawdev *idxd)
128 struct idxd_pci_common *pci;
129 uint8_t nb_groups, nb_engines, nb_wqs;
130 uint16_t grp_offset, wq_offset; /* how far into bar0 the regs are */
131 uint16_t wq_size, total_wq_size;
132 uint8_t lg2_max_batch, lg2_max_copy_size;
133 unsigned int i, err_code;
135 pci = malloc(sizeof(*pci));
137 IOAT_PMD_ERR("%s: Can't allocate memory", __func__);
140 rte_spinlock_init(&pci->lk);
142 /* assign the bar registers, and then configure device */
143 pci->regs = dev->mem_resource[0].addr;
144 grp_offset = (uint16_t)pci->regs->offsets[0];
145 pci->grp_regs = RTE_PTR_ADD(pci->regs, grp_offset * 0x100);
146 wq_offset = (uint16_t)(pci->regs->offsets[0] >> 16);
147 pci->wq_regs_base = RTE_PTR_ADD(pci->regs, wq_offset * 0x100);
148 pci->portals = dev->mem_resource[2].addr;
149 pci->wq_cfg_sz = (pci->regs->wqcap >> 24) & 0x0F;
151 /* sanity check device status */
152 if (pci->regs->gensts & GENSTS_DEV_STATE_MASK) {
153 /* need function-level-reset (FLR) or is enabled */
154 IOAT_PMD_ERR("Device status is not disabled, cannot init");
157 if (pci->regs->cmdstatus & CMDSTATUS_ACTIVE_MASK) {
158 /* command in progress */
159 IOAT_PMD_ERR("Device has a command in progress, cannot init");
163 /* read basic info about the hardware for use when configuring */
164 nb_groups = (uint8_t)pci->regs->grpcap;
165 nb_engines = (uint8_t)pci->regs->engcap;
166 nb_wqs = (uint8_t)(pci->regs->wqcap >> 16);
167 total_wq_size = (uint16_t)pci->regs->wqcap;
168 lg2_max_copy_size = (uint8_t)(pci->regs->gencap >> 16) & 0x1F;
169 lg2_max_batch = (uint8_t)(pci->regs->gencap >> 21) & 0x0F;
171 IOAT_PMD_DEBUG("nb_groups = %u, nb_engines = %u, nb_wqs = %u",
172 nb_groups, nb_engines, nb_wqs);
174 /* zero out any old config */
175 for (i = 0; i < nb_groups; i++) {
176 pci->grp_regs[i].grpengcfg = 0;
177 pci->grp_regs[i].grpwqcfg[0] = 0;
179 for (i = 0; i < nb_wqs; i++)
180 idxd_get_wq_cfg(pci, i)[0] = 0;
182 /* put each engine into a separate group to avoid reordering */
183 if (nb_groups > nb_engines)
184 nb_groups = nb_engines;
185 if (nb_groups < nb_engines)
186 nb_engines = nb_groups;
188 /* assign engines to groups, round-robin style */
189 for (i = 0; i < nb_engines; i++) {
190 IOAT_PMD_DEBUG("Assigning engine %u to group %u",
192 pci->grp_regs[i % nb_groups].grpengcfg |= (1ULL << i);
195 /* now do the same for queues and give work slots to each queue */
196 wq_size = total_wq_size / nb_wqs;
197 IOAT_PMD_DEBUG("Work queue size = %u, max batch = 2^%u, max copy = 2^%u",
198 wq_size, lg2_max_batch, lg2_max_copy_size);
199 for (i = 0; i < nb_wqs; i++) {
200 /* add engine "i" to a group */
201 IOAT_PMD_DEBUG("Assigning work queue %u to group %u",
203 pci->grp_regs[i % nb_groups].grpwqcfg[0] |= (1ULL << i);
204 /* now configure it, in terms of size, max batch, mode */
205 idxd_get_wq_cfg(pci, i)[WQ_SIZE_IDX] = wq_size;
206 idxd_get_wq_cfg(pci, i)[WQ_MODE_IDX] = (1 << WQ_PRIORITY_SHIFT) |
208 idxd_get_wq_cfg(pci, i)[WQ_SIZES_IDX] = lg2_max_copy_size |
209 (lg2_max_batch << WQ_BATCH_SZ_SHIFT);
212 /* dump the group configuration to output */
213 for (i = 0; i < nb_groups; i++) {
214 IOAT_PMD_DEBUG("## Group %d", i);
215 IOAT_PMD_DEBUG(" GRPWQCFG: %"PRIx64, pci->grp_regs[i].grpwqcfg[0]);
216 IOAT_PMD_DEBUG(" GRPENGCFG: %"PRIx64, pci->grp_regs[i].grpengcfg);
217 IOAT_PMD_DEBUG(" GRPFLAGS: %"PRIx32, pci->grp_regs[i].grpflags);
221 idxd->max_batches = wq_size;
223 /* enable the device itself */
224 err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
226 IOAT_PMD_ERR("Error enabling device: code %#x", err_code);
229 IOAT_PMD_DEBUG("IDXD Device enabled OK");
239 idxd_rawdev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev)
241 struct idxd_rawdev idxd = {{0}}; /* Double {} to avoid error on BSD12 */
244 char name[PCI_PRI_STR_SIZE];
246 rte_pci_device_name(&dev->addr, name, sizeof(name));
247 IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
248 dev->device.driver = &drv->driver;
250 ret = init_pci_device(dev, &idxd);
252 IOAT_PMD_ERR("Error initializing PCI hardware");
255 nb_wqs = (uint8_t)ret;
257 /* set up one device for each queue */
258 for (qid = 0; qid < nb_wqs; qid++) {
261 /* add the queue number to each device name */
262 snprintf(qname, sizeof(qname), "%s-q%d", name, qid);
264 idxd.public.portal = RTE_PTR_ADD(idxd.u.pci->portals,
265 qid * IDXD_PORTAL_SIZE);
266 if (idxd_is_wq_enabled(&idxd))
267 IOAT_PMD_ERR("Error, WQ %u seems enabled", qid);
268 ret = idxd_rawdev_create(qname, &dev->device,
269 &idxd, &idxd_pci_ops);
271 IOAT_PMD_ERR("Failed to create rawdev %s", name);
272 if (qid == 0) /* if no devices using this, free pci */
282 idxd_rawdev_destroy(const char *name)
286 struct rte_rawdev *rdev;
287 struct idxd_rawdev *idxd;
290 IOAT_PMD_ERR("Invalid device name");
294 rdev = rte_rawdev_pmd_get_named_dev(name);
296 IOAT_PMD_ERR("Invalid device name (%s)", name);
300 idxd = rdev->dev_private;
302 IOAT_PMD_ERR("Error getting dev_private");
306 /* disable the device */
307 err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
309 IOAT_PMD_ERR("Error disabling device: code %#x", err_code);
312 IOAT_PMD_DEBUG("IDXD Device disabled OK");
314 /* free device memory */
315 IOAT_PMD_DEBUG("Freeing device driver memory");
316 rdev->dev_private = NULL;
317 rte_free(idxd->public.batch_ring);
318 rte_free(idxd->public.hdl_ring);
319 rte_memzone_free(idxd->mz);
321 /* rte_rawdev_close is called by pmd_release */
322 ret = rte_rawdev_pmd_release(rdev);
324 IOAT_PMD_DEBUG("Device cleanup failed");
330 idxd_rawdev_remove_pci(struct rte_pci_device *dev)
332 char name[PCI_PRI_STR_SIZE];
335 rte_pci_device_name(&dev->addr, name, sizeof(name));
337 IOAT_PMD_INFO("Closing %s on NUMA node %d",
338 name, dev->device.numa_node);
340 ret = idxd_rawdev_destroy(name);
345 struct rte_pci_driver idxd_pmd_drv_pci = {
346 .id_table = pci_id_idxd_map,
347 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
348 .probe = idxd_rawdev_probe_pci,
349 .remove = idxd_rawdev_remove_pci,
352 RTE_PMD_REGISTER_PCI(IDXD_PMD_RAWDEV_NAME_PCI, idxd_pmd_drv_pci);
353 RTE_PMD_REGISTER_PCI_TABLE(IDXD_PMD_RAWDEV_NAME_PCI, pci_id_idxd_map);
354 RTE_PMD_REGISTER_KMOD_DEP(IDXD_PMD_RAWDEV_NAME_PCI,
355 "* igb_uio | uio_pci_generic | vfio-pci");