4ea913fff1c5e3d39c6d97a6c70ac7f70ea4c2d7
[dpdk.git] / drivers / raw / ioat / ioat_rawdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_memzone.h>
8 #include <rte_string_fns.h>
9 #include <rte_rawdev_pmd.h>
10
11 #include "rte_ioat_rawdev.h"
12 #include "ioat_spec.h"
13 #include "ioat_private.h"
14
15 static struct rte_pci_driver ioat_pmd_drv;
16
17 #define IOAT_VENDOR_ID          0x8086
18 #define IOAT_DEVICE_ID_SKX      0x2021
19 #define IOAT_DEVICE_ID_BDX0     0x6f20
20 #define IOAT_DEVICE_ID_BDX1     0x6f21
21 #define IOAT_DEVICE_ID_BDX2     0x6f22
22 #define IOAT_DEVICE_ID_BDX3     0x6f23
23 #define IOAT_DEVICE_ID_BDX4     0x6f24
24 #define IOAT_DEVICE_ID_BDX5     0x6f25
25 #define IOAT_DEVICE_ID_BDX6     0x6f26
26 #define IOAT_DEVICE_ID_BDX7     0x6f27
27 #define IOAT_DEVICE_ID_BDXE     0x6f2E
28 #define IOAT_DEVICE_ID_BDXF     0x6f2F
29 #define IOAT_DEVICE_ID_ICX      0x0b00
30
31 RTE_LOG_REGISTER(ioat_pmd_logtype, rawdev.ioat, INFO);
32
33 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
34 #define COMPLETION_SZ sizeof(__m128i)
35
36 static int
37 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config,
38                 size_t config_size)
39 {
40         struct rte_ioat_rawdev_config *params = config;
41         struct rte_ioat_rawdev *ioat = dev->dev_private;
42         char mz_name[RTE_MEMZONE_NAMESIZE];
43         unsigned short i;
44
45         if (dev->started)
46                 return -EBUSY;
47
48         if (params == NULL || config_size != sizeof(*params))
49                 return -EINVAL;
50
51         if (params->ring_size > 4096 || params->ring_size < 64 ||
52                         !rte_is_power_of_2(params->ring_size))
53                 return -EINVAL;
54
55         ioat->ring_size = params->ring_size;
56         ioat->hdls_disable = params->hdls_disable;
57         if (ioat->desc_ring != NULL) {
58                 rte_memzone_free(ioat->desc_mz);
59                 ioat->desc_ring = NULL;
60                 ioat->desc_mz = NULL;
61         }
62
63         /* allocate one block of memory for both descriptors
64          * and completion handles.
65          */
66         snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
67         ioat->desc_mz = rte_memzone_reserve(mz_name,
68                         (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
69                         dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
70         if (ioat->desc_mz == NULL)
71                 return -ENOMEM;
72         ioat->desc_ring = ioat->desc_mz->addr;
73         ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
74
75         ioat->ring_addr = ioat->desc_mz->iova;
76
77         /* configure descriptor ring - each one points to next */
78         for (i = 0; i < ioat->ring_size; i++) {
79                 ioat->desc_ring[i].next = ioat->ring_addr +
80                                 (((i + 1) % ioat->ring_size) * DESC_SZ);
81         }
82
83         return 0;
84 }
85
86 static int
87 ioat_dev_start(struct rte_rawdev *dev)
88 {
89         struct rte_ioat_rawdev *ioat = dev->dev_private;
90
91         if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
92                 return -EBUSY;
93
94         /* inform hardware of where the descriptor ring is */
95         ioat->regs->chainaddr = ioat->ring_addr;
96         /* inform hardware of where to write the status/completions */
97         ioat->regs->chancmp = ioat->status_addr;
98
99         /* prime the status register to be set to the last element */
100         ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
101         return 0;
102 }
103
104 static void
105 ioat_dev_stop(struct rte_rawdev *dev)
106 {
107         RTE_SET_USED(dev);
108 }
109
110 static int
111 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
112                 size_t dev_info_size)
113 {
114         struct rte_ioat_rawdev_config *cfg = dev_info;
115         struct rte_ioat_rawdev *ioat = dev->dev_private;
116
117         if (dev_info == NULL || dev_info_size != sizeof(*cfg))
118                 return -EINVAL;
119
120         cfg->ring_size = ioat->ring_size;
121         cfg->hdls_disable = ioat->hdls_disable;
122         return 0;
123 }
124
125 static const char * const xstat_names[] = {
126                 "failed_enqueues", "successful_enqueues",
127                 "copies_started", "copies_completed"
128 };
129
130 static int
131 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
132                 uint64_t values[], unsigned int n)
133 {
134         const struct rte_ioat_rawdev *ioat = dev->dev_private;
135         const uint64_t *stats = (const void *)&ioat->xstats;
136         unsigned int i;
137
138         for (i = 0; i < n; i++) {
139                 if (ids[i] < sizeof(ioat->xstats)/sizeof(*stats))
140                         values[i] = stats[ids[i]];
141                 else
142                         values[i] = 0;
143         }
144         return n;
145 }
146
147 static int
148 ioat_xstats_get_names(const struct rte_rawdev *dev,
149                 struct rte_rawdev_xstats_name *names,
150                 unsigned int size)
151 {
152         unsigned int i;
153
154         RTE_SET_USED(dev);
155         if (size < RTE_DIM(xstat_names))
156                 return RTE_DIM(xstat_names);
157
158         for (i = 0; i < RTE_DIM(xstat_names); i++)
159                 strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
160
161         return RTE_DIM(xstat_names);
162 }
163
164 static int
165 ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
166 {
167         struct rte_ioat_rawdev *ioat = dev->dev_private;
168         uint64_t *stats = (void *)&ioat->xstats;
169         unsigned int i;
170
171         if (!ids) {
172                 memset(&ioat->xstats, 0, sizeof(ioat->xstats));
173                 return 0;
174         }
175
176         for (i = 0; i < nb_ids; i++)
177                 if (ids[i] < sizeof(ioat->xstats)/sizeof(*stats))
178                         stats[ids[i]] = 0;
179
180         return 0;
181 }
182
183 static int
184 ioat_dev_close(struct rte_rawdev *dev __rte_unused)
185 {
186         return 0;
187 }
188
189 extern int ioat_rawdev_test(uint16_t dev_id);
190
191 static int
192 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
193 {
194         static const struct rte_rawdev_ops ioat_rawdev_ops = {
195                         .dev_configure = ioat_dev_configure,
196                         .dev_start = ioat_dev_start,
197                         .dev_stop = ioat_dev_stop,
198                         .dev_close = ioat_dev_close,
199                         .dev_info_get = ioat_dev_info_get,
200                         .xstats_get = ioat_xstats_get,
201                         .xstats_get_names = ioat_xstats_get_names,
202                         .xstats_reset = ioat_xstats_reset,
203                         .dev_selftest = ioat_rawdev_test,
204         };
205
206         struct rte_rawdev *rawdev = NULL;
207         struct rte_ioat_rawdev *ioat = NULL;
208         const struct rte_memzone *mz = NULL;
209         char mz_name[RTE_MEMZONE_NAMESIZE];
210         int ret = 0;
211         int retry = 0;
212
213         if (!name) {
214                 IOAT_PMD_ERR("Invalid name of the device!");
215                 ret = -EINVAL;
216                 goto cleanup;
217         }
218
219         /* Allocate device structure */
220         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
221                                          dev->device.numa_node);
222         if (rawdev == NULL) {
223                 IOAT_PMD_ERR("Unable to allocate raw device");
224                 ret = -ENOMEM;
225                 goto cleanup;
226         }
227
228         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
229         mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
230                         dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
231         if (mz == NULL) {
232                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
233                 ret = -ENOMEM;
234                 goto cleanup;
235         }
236
237         rawdev->dev_private = mz->addr;
238         rawdev->dev_ops = &ioat_rawdev_ops;
239         rawdev->device = &dev->device;
240         rawdev->driver_name = dev->device.driver->name;
241
242         ioat = rawdev->dev_private;
243         ioat->type = RTE_IOAT_DEV;
244         ioat->rawdev = rawdev;
245         ioat->mz = mz;
246         ioat->regs = dev->mem_resource[0].addr;
247         ioat->doorbell = &ioat->regs->dmacount;
248         ioat->ring_size = 0;
249         ioat->desc_ring = NULL;
250         ioat->status_addr = ioat->mz->iova +
251                         offsetof(struct rte_ioat_rawdev, status);
252
253         /* do device initialization - reset and set error behaviour */
254         if (ioat->regs->chancnt != 1)
255                 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
256                                 ioat->regs->chancnt);
257
258         if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
259                 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
260                 ioat->regs->chanctrl = 0;
261         }
262
263         ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
264         rte_delay_ms(1);
265         ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
266         rte_delay_ms(1);
267         while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
268                 ioat->regs->chainaddr = 0;
269                 rte_delay_ms(1);
270                 if (++retry >= 200) {
271                         IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
272                                         __func__,
273                                         ioat->regs->chancmd,
274                                         ioat->regs->chansts,
275                                         ioat->regs->chanerr);
276                         ret = -EIO;
277                 }
278         }
279         ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
280                         RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
281
282         return 0;
283
284 cleanup:
285         if (rawdev)
286                 rte_rawdev_pmd_release(rawdev);
287
288         return ret;
289 }
290
291 static int
292 ioat_rawdev_destroy(const char *name)
293 {
294         int ret;
295         struct rte_rawdev *rdev;
296
297         if (!name) {
298                 IOAT_PMD_ERR("Invalid device name");
299                 return -EINVAL;
300         }
301
302         rdev = rte_rawdev_pmd_get_named_dev(name);
303         if (!rdev) {
304                 IOAT_PMD_ERR("Invalid device name (%s)", name);
305                 return -EINVAL;
306         }
307
308         if (rdev->dev_private != NULL) {
309                 struct rte_ioat_rawdev *ioat = rdev->dev_private;
310                 rdev->dev_private = NULL;
311                 rte_memzone_free(ioat->desc_mz);
312                 rte_memzone_free(ioat->mz);
313         }
314
315         /* rte_rawdev_close is called by pmd_release */
316         ret = rte_rawdev_pmd_release(rdev);
317         if (ret)
318                 IOAT_PMD_DEBUG("Device cleanup failed");
319
320         return 0;
321 }
322
323 static int
324 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
325 {
326         char name[32];
327         int ret = 0;
328
329
330         rte_pci_device_name(&dev->addr, name, sizeof(name));
331         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
332
333         dev->device.driver = &drv->driver;
334         ret = ioat_rawdev_create(name, dev);
335         return ret;
336 }
337
338 static int
339 ioat_rawdev_remove(struct rte_pci_device *dev)
340 {
341         char name[32];
342         int ret;
343
344         rte_pci_device_name(&dev->addr, name, sizeof(name));
345
346         IOAT_PMD_INFO("Closing %s on NUMA node %d",
347                         name, dev->device.numa_node);
348
349         ret = ioat_rawdev_destroy(name);
350         return ret;
351 }
352
353 static const struct rte_pci_id pci_id_ioat_map[] = {
354         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
355         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
356         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
357         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
358         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
359         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
360         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
361         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
362         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
363         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
364         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
365         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
366         { .vendor_id = 0, /* sentinel */ },
367 };
368
369 static struct rte_pci_driver ioat_pmd_drv = {
370         .id_table = pci_id_ioat_map,
371         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
372         .probe = ioat_rawdev_probe,
373         .remove = ioat_rawdev_remove,
374 };
375
376 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
377 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
378 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");