0c91b35797d5c68a50fea4dd95a1a60cbd02272d
[dpdk.git] / drivers / raw / ioat / ioat_rawdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev_pmd.h>
8
9 #include "rte_ioat_rawdev.h"
10
11 /* Dynamic log type identifier */
12 int ioat_pmd_logtype;
13
14 static struct rte_pci_driver ioat_pmd_drv;
15
16 #define IOAT_VENDOR_ID          0x8086
17 #define IOAT_DEVICE_ID_SKX      0x2021
18 #define IOAT_DEVICE_ID_BDX0     0x6f20
19 #define IOAT_DEVICE_ID_BDX1     0x6f21
20 #define IOAT_DEVICE_ID_BDX2     0x6f22
21 #define IOAT_DEVICE_ID_BDX3     0x6f23
22 #define IOAT_DEVICE_ID_BDX4     0x6f24
23 #define IOAT_DEVICE_ID_BDX5     0x6f25
24 #define IOAT_DEVICE_ID_BDX6     0x6f26
25 #define IOAT_DEVICE_ID_BDX7     0x6f27
26 #define IOAT_DEVICE_ID_BDXE     0x6f2E
27 #define IOAT_DEVICE_ID_BDXF     0x6f2F
28
29 #define IOAT_PMD_LOG(level, fmt, args...) rte_log(RTE_LOG_ ## level, \
30         ioat_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)
31
32 #define IOAT_PMD_DEBUG(fmt, args...)  IOAT_PMD_LOG(DEBUG, fmt, ## args)
33 #define IOAT_PMD_INFO(fmt, args...)   IOAT_PMD_LOG(INFO, fmt, ## args)
34 #define IOAT_PMD_ERR(fmt, args...)    IOAT_PMD_LOG(ERR, fmt, ## args)
35 #define IOAT_PMD_WARN(fmt, args...)   IOAT_PMD_LOG(WARNING, fmt, ## args)
36
37 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
38 #define COMPLETION_SZ sizeof(__m128i)
39
40 static int
41 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
42 {
43         struct rte_ioat_rawdev_config *params = config;
44         struct rte_ioat_rawdev *ioat = dev->dev_private;
45         char mz_name[RTE_MEMZONE_NAMESIZE];
46         unsigned short i;
47
48         if (dev->started)
49                 return -EBUSY;
50
51         if (params == NULL)
52                 return -EINVAL;
53
54         if (params->ring_size > 4096 || params->ring_size < 64 ||
55                         !rte_is_power_of_2(params->ring_size))
56                 return -EINVAL;
57
58         ioat->ring_size = params->ring_size;
59         if (ioat->desc_ring != NULL) {
60                 rte_memzone_free(ioat->desc_mz);
61                 ioat->desc_ring = NULL;
62                 ioat->desc_mz = NULL;
63         }
64
65         /* allocate one block of memory for both descriptors
66          * and completion handles.
67          */
68         snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
69         ioat->desc_mz = rte_memzone_reserve(mz_name,
70                         (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
71                         dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
72         if (ioat->desc_mz == NULL)
73                 return -ENOMEM;
74         ioat->desc_ring = ioat->desc_mz->addr;
75         ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
76
77         ioat->ring_addr = ioat->desc_mz->iova;
78
79         /* configure descriptor ring - each one points to next */
80         for (i = 0; i < ioat->ring_size; i++) {
81                 ioat->desc_ring[i].next = ioat->ring_addr +
82                                 (((i + 1) % ioat->ring_size) * DESC_SZ);
83         }
84
85         return 0;
86 }
87
88 static int
89 ioat_dev_start(struct rte_rawdev *dev)
90 {
91         struct rte_ioat_rawdev *ioat = dev->dev_private;
92
93         if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
94                 return -EBUSY;
95
96         /* inform hardware of where the descriptor ring is */
97         ioat->regs->chainaddr = ioat->ring_addr;
98         /* inform hardware of where to write the status/completions */
99         ioat->regs->chancmp = ioat->status_addr;
100
101         /* prime the status register to be set to the last element */
102         ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
103         return 0;
104 }
105
106 static void
107 ioat_dev_stop(struct rte_rawdev *dev)
108 {
109         RTE_SET_USED(dev);
110 }
111
112 static void
113 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
114 {
115         struct rte_ioat_rawdev_config *cfg = dev_info;
116         struct rte_ioat_rawdev *ioat = dev->dev_private;
117
118         if (cfg != NULL)
119                 cfg->ring_size = ioat->ring_size;
120 }
121
122 extern int ioat_rawdev_test(uint16_t dev_id);
123
124 static int
125 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
126 {
127         static const struct rte_rawdev_ops ioat_rawdev_ops = {
128                         .dev_configure = ioat_dev_configure,
129                         .dev_start = ioat_dev_start,
130                         .dev_stop = ioat_dev_stop,
131                         .dev_info_get = ioat_dev_info_get,
132                         .dev_selftest = ioat_rawdev_test,
133         };
134
135         struct rte_rawdev *rawdev = NULL;
136         struct rte_ioat_rawdev *ioat = NULL;
137         const struct rte_memzone *mz = NULL;
138         char mz_name[RTE_MEMZONE_NAMESIZE];
139         int ret = 0;
140         int retry = 0;
141
142         if (!name) {
143                 IOAT_PMD_ERR("Invalid name of the device!");
144                 ret = -EINVAL;
145                 goto cleanup;
146         }
147
148         /* Allocate device structure */
149         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
150                                          dev->device.numa_node);
151         if (rawdev == NULL) {
152                 IOAT_PMD_ERR("Unable to allocate raw device");
153                 ret = -ENOMEM;
154                 goto cleanup;
155         }
156
157         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
158         mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
159                         dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
160         if (mz == NULL) {
161                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
162                 ret = -ENOMEM;
163                 goto cleanup;
164         }
165
166         rawdev->dev_private = mz->addr;
167         rawdev->dev_ops = &ioat_rawdev_ops;
168         rawdev->device = &dev->device;
169         rawdev->driver_name = dev->device.driver->name;
170
171         ioat = rawdev->dev_private;
172         ioat->rawdev = rawdev;
173         ioat->mz = mz;
174         ioat->regs = dev->mem_resource[0].addr;
175         ioat->ring_size = 0;
176         ioat->desc_ring = NULL;
177         ioat->status_addr = ioat->mz->iova +
178                         offsetof(struct rte_ioat_rawdev, status);
179
180         /* do device initialization - reset and set error behaviour */
181         if (ioat->regs->chancnt != 1)
182                 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
183                                 ioat->regs->chancnt);
184
185         if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
186                 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
187                 ioat->regs->chanctrl = 0;
188         }
189
190         ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
191         rte_delay_ms(1);
192         ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
193         rte_delay_ms(1);
194         while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
195                 ioat->regs->chainaddr = 0;
196                 rte_delay_ms(1);
197                 if (++retry >= 200) {
198                         IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
199                                         __func__,
200                                         ioat->regs->chancmd,
201                                         ioat->regs->chansts,
202                                         ioat->regs->chanerr);
203                         ret = -EIO;
204                 }
205         }
206         ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
207                         RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
208
209         return 0;
210
211 cleanup:
212         if (rawdev)
213                 rte_rawdev_pmd_release(rawdev);
214
215         return ret;
216 }
217
218 static int
219 ioat_rawdev_destroy(const char *name)
220 {
221         int ret;
222         struct rte_rawdev *rdev;
223
224         if (!name) {
225                 IOAT_PMD_ERR("Invalid device name");
226                 return -EINVAL;
227         }
228
229         rdev = rte_rawdev_pmd_get_named_dev(name);
230         if (!rdev) {
231                 IOAT_PMD_ERR("Invalid device name (%s)", name);
232                 return -EINVAL;
233         }
234
235         if (rdev->dev_private != NULL) {
236                 struct rte_ioat_rawdev *ioat = rdev->dev_private;
237                 rdev->dev_private = NULL;
238                 rte_memzone_free(ioat->desc_mz);
239                 rte_memzone_free(ioat->mz);
240         }
241
242         /* rte_rawdev_close is called by pmd_release */
243         ret = rte_rawdev_pmd_release(rdev);
244         if (ret)
245                 IOAT_PMD_DEBUG("Device cleanup failed");
246
247         return 0;
248 }
249
250 static int
251 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
252 {
253         char name[32];
254         int ret = 0;
255
256
257         rte_pci_device_name(&dev->addr, name, sizeof(name));
258         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
259
260         dev->device.driver = &drv->driver;
261         ret = ioat_rawdev_create(name, dev);
262         return ret;
263 }
264
265 static int
266 ioat_rawdev_remove(struct rte_pci_device *dev)
267 {
268         char name[32];
269         int ret;
270
271         rte_pci_device_name(&dev->addr, name, sizeof(name));
272
273         IOAT_PMD_INFO("Closing %s on NUMA node %d",
274                         name, dev->device.numa_node);
275
276         ret = ioat_rawdev_destroy(name);
277         return ret;
278 }
279
280 static const struct rte_pci_id pci_id_ioat_map[] = {
281         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
282         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
283         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
284         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
285         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
286         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
287         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
288         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
289         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
290         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
291         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
292         { .vendor_id = 0, /* sentinel */ },
293 };
294
295 static struct rte_pci_driver ioat_pmd_drv = {
296         .id_table = pci_id_ioat_map,
297         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
298                      RTE_PCI_DRV_IOVA_AS_VA,
299         .probe = ioat_rawdev_probe,
300         .remove = ioat_rawdev_remove,
301 };
302
303 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
304 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
305 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");
306
307 RTE_INIT(ioat_pmd_init_log)
308 {
309         ioat_pmd_logtype = rte_log_register(IOAT_PMD_LOG_NAME);
310         if (ioat_pmd_logtype >= 0)
311                 rte_log_set_level(ioat_pmd_logtype, RTE_LOG_INFO);
312 }