raw/ioat: add statistics functions
[dpdk.git] / drivers / raw / ioat / ioat_rawdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_string_fns.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include "rte_ioat_rawdev.h"
11
12 /* Dynamic log type identifier */
13 int ioat_pmd_logtype;
14
15 static struct rte_pci_driver ioat_pmd_drv;
16
17 #define IOAT_VENDOR_ID          0x8086
18 #define IOAT_DEVICE_ID_SKX      0x2021
19 #define IOAT_DEVICE_ID_BDX0     0x6f20
20 #define IOAT_DEVICE_ID_BDX1     0x6f21
21 #define IOAT_DEVICE_ID_BDX2     0x6f22
22 #define IOAT_DEVICE_ID_BDX3     0x6f23
23 #define IOAT_DEVICE_ID_BDX4     0x6f24
24 #define IOAT_DEVICE_ID_BDX5     0x6f25
25 #define IOAT_DEVICE_ID_BDX6     0x6f26
26 #define IOAT_DEVICE_ID_BDX7     0x6f27
27 #define IOAT_DEVICE_ID_BDXE     0x6f2E
28 #define IOAT_DEVICE_ID_BDXF     0x6f2F
29
30 #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
31         ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
32
33 #define IOAT_PMD_DEBUG(fmt, args...)  IOAT_PMD_LOG(DEBUG, fmt, ## args)
34 #define IOAT_PMD_INFO(fmt, args...)   IOAT_PMD_LOG(INFO, fmt, ## args)
35 #define IOAT_PMD_ERR(fmt, args...)    IOAT_PMD_LOG(ERR, fmt, ## args)
36 #define IOAT_PMD_WARN(fmt, args...)   IOAT_PMD_LOG(WARNING, fmt, ## args)
37
38 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
39 #define COMPLETION_SZ sizeof(__m128i)
40
41 static int
42 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
43 {
44         struct rte_ioat_rawdev_config *params = config;
45         struct rte_ioat_rawdev *ioat = dev->dev_private;
46         char mz_name[RTE_MEMZONE_NAMESIZE];
47         unsigned short i;
48
49         if (dev->started)
50                 return -EBUSY;
51
52         if (params == NULL)
53                 return -EINVAL;
54
55         if (params->ring_size > 4096 || params->ring_size < 64 ||
56                         !rte_is_power_of_2(params->ring_size))
57                 return -EINVAL;
58
59         ioat->ring_size = params->ring_size;
60         if (ioat->desc_ring != NULL) {
61                 rte_memzone_free(ioat->desc_mz);
62                 ioat->desc_ring = NULL;
63                 ioat->desc_mz = NULL;
64         }
65
66         /* allocate one block of memory for both descriptors
67          * and completion handles.
68          */
69         snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
70         ioat->desc_mz = rte_memzone_reserve(mz_name,
71                         (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
72                         dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
73         if (ioat->desc_mz == NULL)
74                 return -ENOMEM;
75         ioat->desc_ring = ioat->desc_mz->addr;
76         ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
77
78         ioat->ring_addr = ioat->desc_mz->iova;
79
80         /* configure descriptor ring - each one points to next */
81         for (i = 0; i < ioat->ring_size; i++) {
82                 ioat->desc_ring[i].next = ioat->ring_addr +
83                                 (((i + 1) % ioat->ring_size) * DESC_SZ);
84         }
85
86         return 0;
87 }
88
89 static int
90 ioat_dev_start(struct rte_rawdev *dev)
91 {
92         struct rte_ioat_rawdev *ioat = dev->dev_private;
93
94         if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
95                 return -EBUSY;
96
97         /* inform hardware of where the descriptor ring is */
98         ioat->regs->chainaddr = ioat->ring_addr;
99         /* inform hardware of where to write the status/completions */
100         ioat->regs->chancmp = ioat->status_addr;
101
102         /* prime the status register to be set to the last element */
103         ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
104         return 0;
105 }
106
107 static void
108 ioat_dev_stop(struct rte_rawdev *dev)
109 {
110         RTE_SET_USED(dev);
111 }
112
113 static void
114 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
115 {
116         struct rte_ioat_rawdev_config *cfg = dev_info;
117         struct rte_ioat_rawdev *ioat = dev->dev_private;
118
119         if (cfg != NULL)
120                 cfg->ring_size = ioat->ring_size;
121 }
122
123 static const char * const xstat_names[] = {
124                 "failed_enqueues", "successful_enqueues",
125                 "copies_started", "copies_completed"
126 };
127
128 static int
129 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
130                 uint64_t values[], unsigned int n)
131 {
132         const struct rte_ioat_rawdev *ioat = dev->dev_private;
133         unsigned int i;
134
135         for (i = 0; i < n; i++) {
136                 switch (ids[i]) {
137                 case 0: values[i] = ioat->enqueue_failed; break;
138                 case 1: values[i] = ioat->enqueued; break;
139                 case 2: values[i] = ioat->started; break;
140                 case 3: values[i] = ioat->completed; break;
141                 default: values[i] = 0; break;
142                 }
143         }
144         return n;
145 }
146
147 static int
148 ioat_xstats_get_names(const struct rte_rawdev *dev,
149                 struct rte_rawdev_xstats_name *names,
150                 unsigned int size)
151 {
152         unsigned int i;
153
154         RTE_SET_USED(dev);
155         if (size < RTE_DIM(xstat_names))
156                 return RTE_DIM(xstat_names);
157
158         for (i = 0; i < RTE_DIM(xstat_names); i++)
159                 strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
160
161         return RTE_DIM(xstat_names);
162 }
163
164 extern int ioat_rawdev_test(uint16_t dev_id);
165
166 static int
167 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
168 {
169         static const struct rte_rawdev_ops ioat_rawdev_ops = {
170                         .dev_configure = ioat_dev_configure,
171                         .dev_start = ioat_dev_start,
172                         .dev_stop = ioat_dev_stop,
173                         .dev_info_get = ioat_dev_info_get,
174                         .xstats_get = ioat_xstats_get,
175                         .xstats_get_names = ioat_xstats_get_names,
176                         .dev_selftest = ioat_rawdev_test,
177         };
178
179         struct rte_rawdev *rawdev = NULL;
180         struct rte_ioat_rawdev *ioat = NULL;
181         const struct rte_memzone *mz = NULL;
182         char mz_name[RTE_MEMZONE_NAMESIZE];
183         int ret = 0;
184         int retry = 0;
185
186         if (!name) {
187                 IOAT_PMD_ERR("Invalid name of the device!");
188                 ret = -EINVAL;
189                 goto cleanup;
190         }
191
192         /* Allocate device structure */
193         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
194                                          dev->device.numa_node);
195         if (rawdev == NULL) {
196                 IOAT_PMD_ERR("Unable to allocate raw device");
197                 ret = -ENOMEM;
198                 goto cleanup;
199         }
200
201         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
202         mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
203                         dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
204         if (mz == NULL) {
205                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
206                 ret = -ENOMEM;
207                 goto cleanup;
208         }
209
210         rawdev->dev_private = mz->addr;
211         rawdev->dev_ops = &ioat_rawdev_ops;
212         rawdev->device = &dev->device;
213         rawdev->driver_name = dev->device.driver->name;
214
215         ioat = rawdev->dev_private;
216         ioat->rawdev = rawdev;
217         ioat->mz = mz;
218         ioat->regs = dev->mem_resource[0].addr;
219         ioat->ring_size = 0;
220         ioat->desc_ring = NULL;
221         ioat->status_addr = ioat->mz->iova +
222                         offsetof(struct rte_ioat_rawdev, status);
223
224         /* do device initialization - reset and set error behaviour */
225         if (ioat->regs->chancnt != 1)
226                 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
227                                 ioat->regs->chancnt);
228
229         if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
230                 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
231                 ioat->regs->chanctrl = 0;
232         }
233
234         ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
235         rte_delay_ms(1);
236         ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
237         rte_delay_ms(1);
238         while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
239                 ioat->regs->chainaddr = 0;
240                 rte_delay_ms(1);
241                 if (++retry >= 200) {
242                         IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
243                                         __func__,
244                                         ioat->regs->chancmd,
245                                         ioat->regs->chansts,
246                                         ioat->regs->chanerr);
247                         ret = -EIO;
248                 }
249         }
250         ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
251                         RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
252
253         return 0;
254
255 cleanup:
256         if (rawdev)
257                 rte_rawdev_pmd_release(rawdev);
258
259         return ret;
260 }
261
262 static int
263 ioat_rawdev_destroy(const char *name)
264 {
265         int ret;
266         struct rte_rawdev *rdev;
267
268         if (!name) {
269                 IOAT_PMD_ERR("Invalid device name");
270                 return -EINVAL;
271         }
272
273         rdev = rte_rawdev_pmd_get_named_dev(name);
274         if (!rdev) {
275                 IOAT_PMD_ERR("Invalid device name (%s)", name);
276                 return -EINVAL;
277         }
278
279         if (rdev->dev_private != NULL) {
280                 struct rte_ioat_rawdev *ioat = rdev->dev_private;
281                 rdev->dev_private = NULL;
282                 rte_memzone_free(ioat->desc_mz);
283                 rte_memzone_free(ioat->mz);
284         }
285
286         /* rte_rawdev_close is called by pmd_release */
287         ret = rte_rawdev_pmd_release(rdev);
288         if (ret)
289                 IOAT_PMD_DEBUG("Device cleanup failed");
290
291         return 0;
292 }
293
294 static int
295 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
296 {
297         char name[32];
298         int ret = 0;
299
300
301         rte_pci_device_name(&dev->addr, name, sizeof(name));
302         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
303
304         dev->device.driver = &drv->driver;
305         ret = ioat_rawdev_create(name, dev);
306         return ret;
307 }
308
309 static int
310 ioat_rawdev_remove(struct rte_pci_device *dev)
311 {
312         char name[32];
313         int ret;
314
315         rte_pci_device_name(&dev->addr, name, sizeof(name));
316
317         IOAT_PMD_INFO("Closing %s on NUMA node %d",
318                         name, dev->device.numa_node);
319
320         ret = ioat_rawdev_destroy(name);
321         return ret;
322 }
323
324 static const struct rte_pci_id pci_id_ioat_map[] = {
325         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
326         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
327         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
328         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
329         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
330         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
331         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
332         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
333         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
334         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
335         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
336         { .vendor_id = 0, /* sentinel */ },
337 };
338
339 static struct rte_pci_driver ioat_pmd_drv = {
340         .id_table = pci_id_ioat_map,
341         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
342                      RTE_PCI_DRV_IOVA_AS_VA,
343         .probe = ioat_rawdev_probe,
344         .remove = ioat_rawdev_remove,
345 };
346
347 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
348 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
349 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");
350
351 RTE_INIT(ioat_pmd_init_log)
352 {
353         ioat_pmd_logtype = rte_log_register(IOAT_PMD_LOG_NAME);
354         if (ioat_pmd_logtype >= 0)
355                 rte_log_set_level(ioat_pmd_logtype, RTE_LOG_INFO);
356 }