net/txgbe: add Rx and Tx start and stop
[dpdk.git] / drivers / raw / ioat / ioat_rawdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <rte_cycles.h>
6 #include <rte_bus_pci.h>
7 #include <rte_memzone.h>
8 #include <rte_string_fns.h>
9 #include <rte_rawdev_pmd.h>
10
11 #include "rte_ioat_rawdev.h"
12 #include "ioat_spec.h"
13 #include "ioat_private.h"
14
15 static struct rte_pci_driver ioat_pmd_drv;
16
17 #define IOAT_VENDOR_ID          0x8086
18 #define IOAT_DEVICE_ID_SKX      0x2021
19 #define IOAT_DEVICE_ID_BDX0     0x6f20
20 #define IOAT_DEVICE_ID_BDX1     0x6f21
21 #define IOAT_DEVICE_ID_BDX2     0x6f22
22 #define IOAT_DEVICE_ID_BDX3     0x6f23
23 #define IOAT_DEVICE_ID_BDX4     0x6f24
24 #define IOAT_DEVICE_ID_BDX5     0x6f25
25 #define IOAT_DEVICE_ID_BDX6     0x6f26
26 #define IOAT_DEVICE_ID_BDX7     0x6f27
27 #define IOAT_DEVICE_ID_BDXE     0x6f2E
28 #define IOAT_DEVICE_ID_BDXF     0x6f2F
29 #define IOAT_DEVICE_ID_ICX      0x0b00
30
31 RTE_LOG_REGISTER(ioat_pmd_logtype, rawdev.ioat, INFO);
32
33 #define DESC_SZ sizeof(struct rte_ioat_generic_hw_desc)
34 #define COMPLETION_SZ sizeof(__m128i)
35
36 static int
37 ioat_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config,
38                 size_t config_size)
39 {
40         struct rte_ioat_rawdev_config *params = config;
41         struct rte_ioat_rawdev *ioat = dev->dev_private;
42         char mz_name[RTE_MEMZONE_NAMESIZE];
43         unsigned short i;
44
45         if (dev->started)
46                 return -EBUSY;
47
48         if (params == NULL || config_size != sizeof(*params))
49                 return -EINVAL;
50
51         if (params->ring_size > 4096 || params->ring_size < 64 ||
52                         !rte_is_power_of_2(params->ring_size))
53                 return -EINVAL;
54
55         ioat->ring_size = params->ring_size;
56         ioat->hdls_disable = params->hdls_disable;
57         if (ioat->desc_ring != NULL) {
58                 rte_memzone_free(ioat->desc_mz);
59                 ioat->desc_ring = NULL;
60                 ioat->desc_mz = NULL;
61         }
62
63         /* allocate one block of memory for both descriptors
64          * and completion handles.
65          */
66         snprintf(mz_name, sizeof(mz_name), "rawdev%u_desc_ring", dev->dev_id);
67         ioat->desc_mz = rte_memzone_reserve(mz_name,
68                         (DESC_SZ + COMPLETION_SZ) * ioat->ring_size,
69                         dev->device->numa_node, RTE_MEMZONE_IOVA_CONTIG);
70         if (ioat->desc_mz == NULL)
71                 return -ENOMEM;
72         ioat->desc_ring = ioat->desc_mz->addr;
73         ioat->hdls = (void *)&ioat->desc_ring[ioat->ring_size];
74
75         ioat->ring_addr = ioat->desc_mz->iova;
76
77         /* configure descriptor ring - each one points to next */
78         for (i = 0; i < ioat->ring_size; i++) {
79                 ioat->desc_ring[i].next = ioat->ring_addr +
80                                 (((i + 1) % ioat->ring_size) * DESC_SZ);
81         }
82
83         return 0;
84 }
85
86 static int
87 ioat_dev_start(struct rte_rawdev *dev)
88 {
89         struct rte_ioat_rawdev *ioat = dev->dev_private;
90
91         if (ioat->ring_size == 0 || ioat->desc_ring == NULL)
92                 return -EBUSY;
93
94         /* inform hardware of where the descriptor ring is */
95         ioat->regs->chainaddr = ioat->ring_addr;
96         /* inform hardware of where to write the status/completions */
97         ioat->regs->chancmp = ioat->status_addr;
98
99         /* prime the status register to be set to the last element */
100         ioat->status =  ioat->ring_addr + ((ioat->ring_size - 1) * DESC_SZ);
101         return 0;
102 }
103
104 static void
105 ioat_dev_stop(struct rte_rawdev *dev)
106 {
107         RTE_SET_USED(dev);
108 }
109
110 static int
111 ioat_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info,
112                 size_t dev_info_size)
113 {
114         struct rte_ioat_rawdev_config *cfg = dev_info;
115         struct rte_ioat_rawdev *ioat = dev->dev_private;
116
117         if (dev_info == NULL || dev_info_size != sizeof(*cfg))
118                 return -EINVAL;
119
120         cfg->ring_size = ioat->ring_size;
121         cfg->hdls_disable = ioat->hdls_disable;
122         return 0;
123 }
124
125 static int
126 ioat_dev_close(struct rte_rawdev *dev __rte_unused)
127 {
128         return 0;
129 }
130
131 static int
132 ioat_rawdev_create(const char *name, struct rte_pci_device *dev)
133 {
134         static const struct rte_rawdev_ops ioat_rawdev_ops = {
135                         .dev_configure = ioat_dev_configure,
136                         .dev_start = ioat_dev_start,
137                         .dev_stop = ioat_dev_stop,
138                         .dev_close = ioat_dev_close,
139                         .dev_info_get = ioat_dev_info_get,
140                         .xstats_get = ioat_xstats_get,
141                         .xstats_get_names = ioat_xstats_get_names,
142                         .xstats_reset = ioat_xstats_reset,
143                         .dev_selftest = ioat_rawdev_test,
144         };
145
146         struct rte_rawdev *rawdev = NULL;
147         struct rte_ioat_rawdev *ioat = NULL;
148         const struct rte_memzone *mz = NULL;
149         char mz_name[RTE_MEMZONE_NAMESIZE];
150         int ret = 0;
151         int retry = 0;
152
153         if (!name) {
154                 IOAT_PMD_ERR("Invalid name of the device!");
155                 ret = -EINVAL;
156                 goto cleanup;
157         }
158
159         /* Allocate device structure */
160         rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct rte_ioat_rawdev),
161                                          dev->device.numa_node);
162         if (rawdev == NULL) {
163                 IOAT_PMD_ERR("Unable to allocate raw device");
164                 ret = -ENOMEM;
165                 goto cleanup;
166         }
167
168         snprintf(mz_name, sizeof(mz_name), "rawdev%u_private", rawdev->dev_id);
169         mz = rte_memzone_reserve(mz_name, sizeof(struct rte_ioat_rawdev),
170                         dev->device.numa_node, RTE_MEMZONE_IOVA_CONTIG);
171         if (mz == NULL) {
172                 IOAT_PMD_ERR("Unable to reserve memzone for private data\n");
173                 ret = -ENOMEM;
174                 goto cleanup;
175         }
176
177         rawdev->dev_private = mz->addr;
178         rawdev->dev_ops = &ioat_rawdev_ops;
179         rawdev->device = &dev->device;
180         rawdev->driver_name = dev->device.driver->name;
181
182         ioat = rawdev->dev_private;
183         ioat->type = RTE_IOAT_DEV;
184         ioat->rawdev = rawdev;
185         ioat->mz = mz;
186         ioat->regs = dev->mem_resource[0].addr;
187         ioat->doorbell = &ioat->regs->dmacount;
188         ioat->ring_size = 0;
189         ioat->desc_ring = NULL;
190         ioat->status_addr = ioat->mz->iova +
191                         offsetof(struct rte_ioat_rawdev, status);
192
193         /* do device initialization - reset and set error behaviour */
194         if (ioat->regs->chancnt != 1)
195                 IOAT_PMD_ERR("%s: Channel count == %d\n", __func__,
196                                 ioat->regs->chancnt);
197
198         if (ioat->regs->chanctrl & 0x100) { /* locked by someone else */
199                 IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
200                 ioat->regs->chanctrl = 0;
201         }
202
203         ioat->regs->chancmd = RTE_IOAT_CHANCMD_SUSPEND;
204         rte_delay_ms(1);
205         ioat->regs->chancmd = RTE_IOAT_CHANCMD_RESET;
206         rte_delay_ms(1);
207         while (ioat->regs->chancmd & RTE_IOAT_CHANCMD_RESET) {
208                 ioat->regs->chainaddr = 0;
209                 rte_delay_ms(1);
210                 if (++retry >= 200) {
211                         IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=0x%"PRIx8", CHANSTS=0x%"PRIx64", CHANERR=0x%"PRIx32"\n",
212                                         __func__,
213                                         ioat->regs->chancmd,
214                                         ioat->regs->chansts,
215                                         ioat->regs->chanerr);
216                         ret = -EIO;
217                 }
218         }
219         ioat->regs->chanctrl = RTE_IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
220                         RTE_IOAT_CHANCTRL_ERR_COMPLETION_EN;
221
222         return 0;
223
224 cleanup:
225         if (rawdev)
226                 rte_rawdev_pmd_release(rawdev);
227
228         return ret;
229 }
230
231 static int
232 ioat_rawdev_destroy(const char *name)
233 {
234         int ret;
235         struct rte_rawdev *rdev;
236
237         if (!name) {
238                 IOAT_PMD_ERR("Invalid device name");
239                 return -EINVAL;
240         }
241
242         rdev = rte_rawdev_pmd_get_named_dev(name);
243         if (!rdev) {
244                 IOAT_PMD_ERR("Invalid device name (%s)", name);
245                 return -EINVAL;
246         }
247
248         if (rdev->dev_private != NULL) {
249                 struct rte_ioat_rawdev *ioat = rdev->dev_private;
250                 rdev->dev_private = NULL;
251                 rte_memzone_free(ioat->desc_mz);
252                 rte_memzone_free(ioat->mz);
253         }
254
255         /* rte_rawdev_close is called by pmd_release */
256         ret = rte_rawdev_pmd_release(rdev);
257         if (ret)
258                 IOAT_PMD_DEBUG("Device cleanup failed");
259
260         return 0;
261 }
262
263 static int
264 ioat_rawdev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
265 {
266         char name[32];
267         int ret = 0;
268
269
270         rte_pci_device_name(&dev->addr, name, sizeof(name));
271         IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
272
273         dev->device.driver = &drv->driver;
274         ret = ioat_rawdev_create(name, dev);
275         return ret;
276 }
277
278 static int
279 ioat_rawdev_remove(struct rte_pci_device *dev)
280 {
281         char name[32];
282         int ret;
283
284         rte_pci_device_name(&dev->addr, name, sizeof(name));
285
286         IOAT_PMD_INFO("Closing %s on NUMA node %d",
287                         name, dev->device.numa_node);
288
289         ret = ioat_rawdev_destroy(name);
290         return ret;
291 }
292
293 static const struct rte_pci_id pci_id_ioat_map[] = {
294         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_SKX) },
295         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX0) },
296         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX1) },
297         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX2) },
298         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX3) },
299         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX4) },
300         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX5) },
301         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX6) },
302         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDX7) },
303         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXE) },
304         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_BDXF) },
305         { RTE_PCI_DEVICE(IOAT_VENDOR_ID, IOAT_DEVICE_ID_ICX) },
306         { .vendor_id = 0, /* sentinel */ },
307 };
308
309 static struct rte_pci_driver ioat_pmd_drv = {
310         .id_table = pci_id_ioat_map,
311         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
312         .probe = ioat_rawdev_probe,
313         .remove = ioat_rawdev_remove,
314 };
315
316 RTE_PMD_REGISTER_PCI(IOAT_PMD_RAWDEV_NAME, ioat_pmd_drv);
317 RTE_PMD_REGISTER_PCI_TABLE(IOAT_PMD_RAWDEV_NAME, pci_id_ioat_map);
318 RTE_PMD_REGISTER_KMOD_DEP(IOAT_PMD_RAWDEV_NAME, "* igb_uio | uio_pci_generic");