1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_string_fns.h>
8 #include <rte_rawdev_pmd.h>
10 #include "rte_ioat_rawdev.h"
12 static struct rte_pci_driver ioat_pmd_drv;
14 #define IOAT_VENDOR_ID 0x8086
15 #define IOAT_DEVICE_ID_SKX 0x2021
16 #define IOAT_DEVICE_ID_BDX0 0x6f20
17 #define IOAT_DEVICE_ID_BDX1 0x6f21
18 #define IOAT_DEVICE_ID_BDX2 0x6f22
19 #define IOAT_DEVICE_ID_BDX3 0x6f23
20 #define IOAT_DEVICE_ID_BDX4 0x6f24
21 #define IOAT_DEVICE_ID_BDX5 0x6f25
22 #define IOAT_DEVICE_ID_BDX6 0x6f26
23 #define IOAT_DEVICE_ID_BDX7 0x6f27
24 #define IOAT_DEVICE_ID_BDXE 0x6f2E
25 #define IOAT_DEVICE_ID_BDXF 0x6f2F
26 #define IOAT_DEVICE_ID_ICX 0x0b00
28 RTE_LOG_REGISTER(ioat_pmd_logtype, rawdev.ioat, INFO);
30 #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
31 ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
33 #define IOAT_PMD_DEBUG(fmt, args...) IOAT_PMD_LOG(DEBUG, fmt, ## args)
34 #define IOAT_PMD_INFO(fmt, args...) IOAT_PMD_LOG(INFO, fmt, ## args)
35 #define IOAT_PMD_ERR(fmt, args...) IOAT_PMD_LOG(ERR, fmt, ## args)
36 #define IOAT_PMD_WARN(fmt, args...) IOAT_PMD_LOG(WARNING, fmt, ## args)
38 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
39 #define COMPLETION_SZ sizeof(__m128i)
42 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config,
45 struct rte_ioat_rawdev_config *params = config;
46 struct rte_ioat_rawdev *ioat = dev->dev_private;
47 char mz_name[RTE_MEMZONE_NAMESIZE];
53 if (params == NULL || config_size != sizeof(*params))
56 if (params->ring_size > 4096 || params->ring_size < 64 ||
57 !rte_is_power_of_2(params->ring_size))
60 ioat->ring_size = params->ring_size;
61 ioat->hdls_disable = params->hdls_disable;
62 if (ioat->desc_ring != NULL) {
63 rte_memzone_free(ioat->desc_mz);
64 ioat->desc_ring = NULL;
68 /* allocate one block of memory for both descriptors
69 * and completion handles.
71 snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
72 ioat->desc_mz = rte_memzone_reserve(mz_name,
73 (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
74 dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
75 if (ioat->desc_mz == NULL)
77 ioat->desc_ring = ioat->desc_mz->addr;
78 ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
80 ioat->ring_addr = ioat->desc_mz->iova;
82 /* configure descriptor ring - each one points to next */
83 for (i = 0; i < ioat->ring_size; i++) {
84 ioat->desc_ring[i].next = ioat->ring_addr +
85 (((i + 1) % ioat->ring_size) * DESC_SZ);
92 ioat_dev_start(struct rte_rawdev *dev)
94 struct rte_ioat_rawdev *ioat = dev->dev_private;
96 if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
99 /* inform hardware of where the descriptor ring is */
100 ioat->regs->chainaddr = ioat->ring_addr;
101 /* inform hardware of where to write the status/completions */
102 ioat->regs->chancmp = ioat->status_addr;
104 /* prime the status register to be set to the last element */
105 ioat->status = ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
110 ioat_dev_stop(struct rte_rawdev *dev)
116 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
117 size_t dev_info_size)
119 struct rte_ioat_rawdev_config *cfg = dev_info;
120 struct rte_ioat_rawdev *ioat = dev->dev_private;
122 if (dev_info == NULL || dev_info_size != sizeof(*cfg))
125 cfg->ring_size = ioat->ring_size;
126 cfg->hdls_disable = ioat->hdls_disable;
130 static const char * const xstat_names[] = {
131 "failed_enqueues", "successful_enqueues",
132 "copies_started", "copies_completed"
136 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
137 uint64_t values[], unsigned int n)
139 const struct rte_ioat_rawdev *ioat = dev->dev_private;
142 for (i = 0; i < n; i++) {
144 case 0: values[i] = ioat->enqueue_failed; break;
145 case 1: values[i] = ioat->enqueued; break;
146 case 2: values[i] = ioat->started; break;
147 case 3: values[i] = ioat->completed; break;
148 default: values[i] = 0; break;
155 ioat_xstats_get_names(const struct rte_rawdev *dev,
156 struct rte_rawdev_xstats_name *names,
162 if (size < RTE_DIM(xstat_names))
163 return RTE_DIM(xstat_names);
165 for (i = 0; i < RTE_DIM(xstat_names); i++)
166 strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
168 return RTE_DIM(xstat_names);
172 ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
174 struct rte_ioat_rawdev *ioat = dev->dev_private;
178 ioat->enqueue_failed = 0;
185 for (i = 0; i < nb_ids; i++) {
188 ioat->enqueue_failed = 0;
200 IOAT_PMD_WARN("Invalid xstat id - cannot reset value");
209 ioat_dev_close(struct rte_rawdev *dev __rte_unused)
214 extern int ioat_rawdev_test(uint16_t dev_id);
217 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
219 static const struct rte_rawdev_ops ioat_rawdev_ops = {
220 .dev_configure = ioat_dev_configure,
221 .dev_start = ioat_dev_start,
222 .dev_stop = ioat_dev_stop,
223 .dev_close = ioat_dev_close,
224 .dev_info_get = ioat_dev_info_get,
225 .xstats_get = ioat_xstats_get,
226 .xstats_get_names = ioat_xstats_get_names,
227 .xstats_reset = ioat_xstats_reset,
228 .dev_selftest = ioat_rawdev_test,
231 struct rte_rawdev *rawdev = NULL;
232 struct rte_ioat_rawdev *ioat = NULL;
233 const struct rte_memzone *mz = NULL;
234 char mz_name[RTE_MEMZONE_NAMESIZE];
239 IOAT_PMD_ERR("Invalid name of the device!");
244 /* Allocate device structure */
245 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
246 dev->device.numa_node);
247 if (rawdev == NULL) {
248 IOAT_PMD_ERR("Unable to allocate raw device");
253 snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
254 mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
255 dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
257 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
262 rawdev->dev_private = mz->addr;
263 rawdev->dev_ops = &ioat_rawdev_ops;
264 rawdev->device = &dev->device;
265 rawdev->driver_name = dev->device.driver->name;
267 ioat = rawdev->dev_private;
268 ioat->rawdev = rawdev;
270 ioat->regs = dev->mem_resource[0].addr;
272 ioat->desc_ring = NULL;
273 ioat->status_addr = ioat->mz->iova +
274 offsetof(struct rte_ioat_rawdev, status);
276 /* do device initialization - reset and set error behaviour */
277 if (ioat->regs->chancnt != 1)
278 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
279 ioat->regs->chancnt);
281 if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
282 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
283 ioat->regs->chanctrl = 0;
286 ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
288 ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
290 while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
291 ioat->regs->chainaddr = 0;
293 if (++retry >= 200) {
294 IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
298 ioat->regs->chanerr);
302 ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
303 RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
309 rte_rawdev_pmd_release(rawdev);
315 ioat_rawdev_destroy(const char *name)
318 struct rte_rawdev *rdev;
321 IOAT_PMD_ERR("Invalid device name");
325 rdev = rte_rawdev_pmd_get_named_dev(name);
327 IOAT_PMD_ERR("Invalid device name (%s)", name);
331 if (rdev->dev_private != NULL) {
332 struct rte_ioat_rawdev *ioat = rdev->dev_private;
333 rdev->dev_private = NULL;
334 rte_memzone_free(ioat->desc_mz);
335 rte_memzone_free(ioat->mz);
338 /* rte_rawdev_close is called by pmd_release */
339 ret = rte_rawdev_pmd_release(rdev);
341 IOAT_PMD_DEBUG("Device cleanup failed");
347 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
353 rte_pci_device_name(&dev->addr, name, sizeof(name));
354 IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
356 dev->device.driver = &drv->driver;
357 ret = ioat_rawdev_create(name, dev);
362 ioat_rawdev_remove(struct rte_pci_device *dev)
367 rte_pci_device_name(&dev->addr, name, sizeof(name));
369 IOAT_PMD_INFO("Closing %s on NUMA node %d",
370 name, dev->device.numa_node);
372 ret = ioat_rawdev_destroy(name);
376 static const struct rte_pci_id pci_id_ioat_map[] = {
377 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
378 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
379 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
380 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
381 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
382 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
383 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
384 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
385 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
386 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
387 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
388 { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
389 { .vendor_id = 0, /* sentinel */ },
392 static struct rte_pci_driver ioat_pmd_drv = {
393 .id_table = pci_id_ioat_map,
394 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
395 .probe = ioat_rawdev_probe,
396 .remove = ioat_rawdev_remove,
399 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
400 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
401 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");