a5c0452d7e8653e527e1b727507b78337f9c8131
[dpdk.git] / drivers / raw / ioat / ioat_rawdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_string_fns.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include "rte_ioat_rawdev.h"
11
12 static struct rte_pci_driver ioat_pmd_drv;
13
14 #define IOAT_VENDOR_ID          0x8086
15 #define IOAT_DEVICE_ID_SKX      0x2021
16 #define IOAT_DEVICE_ID_BDX0     0x6f20
17 #define IOAT_DEVICE_ID_BDX1     0x6f21
18 #define IOAT_DEVICE_ID_BDX2     0x6f22
19 #define IOAT_DEVICE_ID_BDX3     0x6f23
20 #define IOAT_DEVICE_ID_BDX4     0x6f24
21 #define IOAT_DEVICE_ID_BDX5     0x6f25
22 #define IOAT_DEVICE_ID_BDX6     0x6f26
23 #define IOAT_DEVICE_ID_BDX7     0x6f27
24 #define IOAT_DEVICE_ID_BDXE     0x6f2E
25 #define IOAT_DEVICE_ID_BDXF     0x6f2F
26 #define IOAT_DEVICE_ID_ICX      0x0b00
27
28 RTE_LOG_REGISTER(ioat_pmd_logtype, rawdev.ioat, INFO);
29
30 #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
31         ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
32
33 #define IOAT_PMD_DEBUG(fmt, args...)  IOAT_PMD_LOG(DEBUG, fmt, ## args)
34 #define IOAT_PMD_INFO(fmt, args...)   IOAT_PMD_LOG(INFO, fmt, ## args)
35 #define IOAT_PMD_ERR(fmt, args...)    IOAT_PMD_LOG(ERR, fmt, ## args)
36 #define IOAT_PMD_WARN(fmt, args...)   IOAT_PMD_LOG(WARNING, fmt, ## args)
37
38 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
39 #define COMPLETION_SZ sizeof(__m128i)
40
41 static int
42 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
43 {
44         struct rte_ioat_rawdev_config *params = config;
45         struct rte_ioat_rawdev *ioat = dev->dev_private;
46         char mz_name[RTE_MEMZONE_NAMESIZE];
47         unsigned short i;
48
49         if (dev->started)
50                 return -EBUSY;
51
52         if (params == NULL)
53                 return -EINVAL;
54
55         if (params->ring_size > 4096 || params->ring_size < 64 ||
56                         !rte_is_power_of_2(params->ring_size))
57                 return -EINVAL;
58
59         ioat->ring_size = params->ring_size;
60         if (ioat->desc_ring != NULL) {
61                 rte_memzone_free(ioat->desc_mz);
62                 ioat->desc_ring = NULL;
63                 ioat->desc_mz = NULL;
64         }
65
66         /* allocate one block of memory for both descriptors
67          * and completion handles.
68          */
69         snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
70         ioat->desc_mz = rte_memzone_reserve(mz_name,
71                         (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
72                         dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
73         if (ioat->desc_mz == NULL)
74                 return -ENOMEM;
75         ioat->desc_ring = ioat->desc_mz->addr;
76         ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
77
78         ioat->ring_addr = ioat->desc_mz->iova;
79
80         /* configure descriptor ring - each one points to next */
81         for (i = 0; i < ioat->ring_size; i++) {
82                 ioat->desc_ring[i].next = ioat->ring_addr +
83                                 (((i + 1) % ioat->ring_size) * DESC_SZ);
84         }
85
86         return 0;
87 }
88
89 static int
90 ioat_dev_start(struct rte_rawdev *dev)
91 {
92         struct rte_ioat_rawdev *ioat = dev->dev_private;
93
94         if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
95                 return -EBUSY;
96
97         /* inform hardware of where the descriptor ring is */
98         ioat->regs->chainaddr = ioat->ring_addr;
99         /* inform hardware of where to write the status/completions */
100         ioat->regs->chancmp = ioat->status_addr;
101
102         /* prime the status register to be set to the last element */
103         ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
104         return 0;
105 }
106
107 static void
108 ioat_dev_stop(struct rte_rawdev *dev)
109 {
110         RTE_SET_USED(dev);
111 }
112
113 static void
114 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
115                 size_t dev_info_size)
116 {
117         struct rte_ioat_rawdev_config *cfg = dev_info;
118         struct rte_ioat_rawdev *ioat = dev->dev_private;
119
120         if (cfg != NULL && dev_info_size == sizeof(*cfg))
121                 cfg->ring_size = ioat->ring_size;
122 }
123
124 static const char * const xstat_names[] = {
125                 "failed_enqueues", "successful_enqueues",
126                 "copies_started", "copies_completed"
127 };
128
129 static int
130 ioat_xstats_get(const struct rte_rawdev *dev, const unsigned int ids[],
131                 uint64_t values[], unsigned int n)
132 {
133         const struct rte_ioat_rawdev *ioat = dev->dev_private;
134         unsigned int i;
135
136         for (i = 0; i < n; i++) {
137                 switch (ids[i]) {
138                 case 0: values[i] = ioat->enqueue_failed; break;
139                 case 1: values[i] = ioat->enqueued; break;
140                 case 2: values[i] = ioat->started; break;
141                 case 3: values[i] = ioat->completed; break;
142                 default: values[i] = 0; break;
143                 }
144         }
145         return n;
146 }
147
148 static int
149 ioat_xstats_get_names(const struct rte_rawdev *dev,
150                 struct rte_rawdev_xstats_name *names,
151                 unsigned int size)
152 {
153         unsigned int i;
154
155         RTE_SET_USED(dev);
156         if (size < RTE_DIM(xstat_names))
157                 return RTE_DIM(xstat_names);
158
159         for (i = 0; i < RTE_DIM(xstat_names); i++)
160                 strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
161
162         return RTE_DIM(xstat_names);
163 }
164
165 static int
166 ioat_xstats_reset(struct rte_rawdev *dev, const uint32_t *ids, uint32_t nb_ids)
167 {
168         struct rte_ioat_rawdev *ioat = dev->dev_private;
169         unsigned int i;
170
171         if (!ids) {
172                 ioat->enqueue_failed = 0;
173                 ioat->enqueued = 0;
174                 ioat->started = 0;
175                 ioat->completed = 0;
176                 return 0;
177         }
178
179         for (i = 0; i < nb_ids; i++) {
180                 switch (ids[i]) {
181                 case 0:
182                         ioat->enqueue_failed = 0;
183                         break;
184                 case 1:
185                         ioat->enqueued = 0;
186                         break;
187                 case 2:
188                         ioat->started = 0;
189                         break;
190                 case 3:
191                         ioat->completed = 0;
192                         break;
193                 default:
194                         IOAT_PMD_WARN("Invalid xstat id - cannot reset value");
195                         break;
196                 }
197         }
198
199         return 0;
200 }
201
202 extern int ioat_rawdev_test(uint16_t dev_id);
203
204 static int
205 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
206 {
207         static const struct rte_rawdev_ops ioat_rawdev_ops = {
208                         .dev_configure = ioat_dev_configure,
209                         .dev_start = ioat_dev_start,
210                         .dev_stop = ioat_dev_stop,
211                         .dev_info_get = ioat_dev_info_get,
212                         .xstats_get = ioat_xstats_get,
213                         .xstats_get_names = ioat_xstats_get_names,
214                         .xstats_reset = ioat_xstats_reset,
215                         .dev_selftest = ioat_rawdev_test,
216         };
217
218         struct rte_rawdev *rawdev = NULL;
219         struct rte_ioat_rawdev *ioat = NULL;
220         const struct rte_memzone *mz = NULL;
221         char mz_name[RTE_MEMZONE_NAMESIZE];
222         int ret = 0;
223         int retry = 0;
224
225         if (!name) {
226                 IOAT_PMD_ERR("Invalid name of the device!");
227                 ret = -EINVAL;
228                 goto cleanup;
229         }
230
231         /* Allocate device structure */
232         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
233                                          dev->device.numa_node);
234         if (rawdev == NULL) {
235                 IOAT_PMD_ERR("Unable to allocate raw device");
236                 ret = -ENOMEM;
237                 goto cleanup;
238         }
239
240         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
241         mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
242                         dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
243         if (mz == NULL) {
244                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
245                 ret = -ENOMEM;
246                 goto cleanup;
247         }
248
249         rawdev->dev_private = mz->addr;
250         rawdev->dev_ops = &ioat_rawdev_ops;
251         rawdev->device = &dev->device;
252         rawdev->driver_name = dev->device.driver->name;
253
254         ioat = rawdev->dev_private;
255         ioat->rawdev = rawdev;
256         ioat->mz = mz;
257         ioat->regs = dev->mem_resource[0].addr;
258         ioat->ring_size = 0;
259         ioat->desc_ring = NULL;
260         ioat->status_addr = ioat->mz->iova +
261                         offsetof(struct rte_ioat_rawdev, status);
262
263         /* do device initialization - reset and set error behaviour */
264         if (ioat->regs->chancnt != 1)
265                 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
266                                 ioat->regs->chancnt);
267
268         if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
269                 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
270                 ioat->regs->chanctrl = 0;
271         }
272
273         ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
274         rte_delay_ms(1);
275         ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
276         rte_delay_ms(1);
277         while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
278                 ioat->regs->chainaddr = 0;
279                 rte_delay_ms(1);
280                 if (++retry >= 200) {
281                         IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
282                                         __func__,
283                                         ioat->regs->chancmd,
284                                         ioat->regs->chansts,
285                                         ioat->regs->chanerr);
286                         ret = -EIO;
287                 }
288         }
289         ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
290                         RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
291
292         return 0;
293
294 cleanup:
295         if (rawdev)
296                 rte_rawdev_pmd_release(rawdev);
297
298         return ret;
299 }
300
301 static int
302 ioat_rawdev_destroy(const char *name)
303 {
304         int ret;
305         struct rte_rawdev *rdev;
306
307         if (!name) {
308                 IOAT_PMD_ERR("Invalid device name");
309                 return -EINVAL;
310         }
311
312         rdev = rte_rawdev_pmd_get_named_dev(name);
313         if (!rdev) {
314                 IOAT_PMD_ERR("Invalid device name (%s)", name);
315                 return -EINVAL;
316         }
317
318         if (rdev->dev_private != NULL) {
319                 struct rte_ioat_rawdev *ioat = rdev->dev_private;
320                 rdev->dev_private = NULL;
321                 rte_memzone_free(ioat->desc_mz);
322                 rte_memzone_free(ioat->mz);
323         }
324
325         /* rte_rawdev_close is called by pmd_release */
326         ret = rte_rawdev_pmd_release(rdev);
327         if (ret)
328                 IOAT_PMD_DEBUG("Device cleanup failed");
329
330         return 0;
331 }
332
333 static int
334 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
335 {
336         char name[32];
337         int ret = 0;
338
339
340         rte_pci_device_name(&dev->addr, name, sizeof(name));
341         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
342
343         dev->device.driver = &drv->driver;
344         ret = ioat_rawdev_create(name, dev);
345         return ret;
346 }
347
348 static int
349 ioat_rawdev_remove(struct rte_pci_device *dev)
350 {
351         char name[32];
352         int ret;
353
354         rte_pci_device_name(&dev->addr, name, sizeof(name));
355
356         IOAT_PMD_INFO("Closing %s on NUMA node %d",
357                         name, dev->device.numa_node);
358
359         ret = ioat_rawdev_destroy(name);
360         return ret;
361 }
362
363 static const struct rte_pci_id pci_id_ioat_map[] = {
364         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
365         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
366         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
367         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
368         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
369         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
370         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
371         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
372         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
373         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
374         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
375         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
376         { .vendor_id = 0, /* sentinel */ },
377 };
378
379 static struct rte_pci_driver ioat_pmd_drv = {
380         .id_table = pci_id_ioat_map,
381         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
382         .probe = ioat_rawdev_probe,
383         .remove = ioat_rawdev_remove,
384 };
385
386 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
387 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
388 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");