69fd040ec30870eada1768eb2253f338dfd24daa
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <rte_bus_pci.h>
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_eal.h>
8 #include <rte_lcore.h>
9 #include <rte_pci.h>
10 #include <rte_rawdev.h>
11 #include <rte_rawdev_pmd.h>
12
13 #include <roc_api.h>
14
15 #include "cnxk_bphy_irq.h"
16 #include "rte_pmd_bphy.h"
17
18 static const struct rte_pci_id pci_bphy_map[] = {
19         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_BPHY)},
20         {
21                 .vendor_id = 0,
22         },
23 };
24
25 struct bphy_test {
26         int irq_num;
27         cnxk_bphy_intr_handler_t handler;
28         void *data;
29         int cpu;
30         bool handled_intr;
31         int handled_data;
32         int test_data;
33 };
34
35 static struct bphy_test *test;
36
37 static void
38 bphy_test_handler_fn(int irq_num, void *isr_data)
39 {
40         test[irq_num].handled_intr = true;
41         test[irq_num].handled_data = *((int *)isr_data);
42 }
43
44 static int
45 bphy_rawdev_selftest(uint16_t dev_id)
46 {
47         unsigned int i, queues, descs;
48         uint64_t max_irq;
49         int ret;
50
51         queues = rte_rawdev_queue_count(dev_id);
52         if (queues == 0)
53                 return -ENODEV;
54         if (queues != BPHY_QUEUE_CNT)
55                 return -EINVAL;
56
57         ret = rte_rawdev_start(dev_id);
58         if (ret)
59                 return ret;
60
61         ret = rte_rawdev_queue_conf_get(dev_id, CNXK_BPHY_DEF_QUEUE, &descs,
62                                         sizeof(descs));
63         if (ret)
64                 goto err_desc;
65         if (descs != 1) {
66                 ret = -ENODEV;
67                 plt_err("Wrong number of descs reported\n");
68                 goto err_desc;
69         }
70
71         ret = rte_pmd_bphy_intr_init(dev_id);
72         if (ret) {
73                 plt_err("intr init failed");
74                 return ret;
75         }
76
77         max_irq = cnxk_bphy_irq_max_get(dev_id);
78
79         test = rte_zmalloc("BPHY", max_irq * sizeof(*test), 0);
80         if (test == NULL) {
81                 plt_err("intr alloc failed");
82                 goto err_alloc;
83         }
84
85         for (i = 0; i < max_irq; i++) {
86                 test[i].test_data = i;
87                 test[i].irq_num = i;
88                 test[i].handler = bphy_test_handler_fn;
89                 test[i].data = &test[i].test_data;
90         }
91
92         for (i = 0; i < max_irq; i++) {
93                 ret = rte_pmd_bphy_intr_register(dev_id, test[i].irq_num,
94                                                  test[i].handler, test[i].data,
95                                                  0);
96                 if (ret == -ENOTSUP) {
97                         /* In the test we iterate over all irq numbers
98                          * so if some of them are not supported by given
99                          * platform we treat respective results as valid
100                          * ones. This way they have no impact on overall
101                          * test results.
102                          */
103                         test[i].handled_intr = true;
104                         test[i].handled_data = test[i].test_data;
105                         ret = 0;
106                         continue;
107                 }
108
109                 if (ret) {
110                         plt_err("intr register failed at irq %d", i);
111                         goto err_register;
112                 }
113         }
114
115         for (i = 0; i < max_irq; i++)
116                 roc_bphy_intr_handler(i);
117
118         for (i = 0; i < max_irq; i++) {
119                 if (!test[i].handled_intr) {
120                         plt_err("intr %u not handled", i);
121                         ret = -1;
122                         break;
123                 }
124                 if (test[i].handled_data != test[i].test_data) {
125                         plt_err("intr %u has wrong handler", i);
126                         ret = -1;
127                         break;
128                 }
129         }
130
131 err_register:
132         /*
133          * In case of registration failure the loop goes over all
134          * interrupts which is safe due to internal guards in
135          * rte_pmd_bphy_intr_unregister().
136          */
137         for (i = 0; i < max_irq; i++)
138                 rte_pmd_bphy_intr_unregister(dev_id, i);
139
140         rte_free(test);
141 err_alloc:
142         rte_pmd_bphy_intr_fini(dev_id);
143 err_desc:
144         rte_rawdev_stop(dev_id);
145
146         return ret;
147 }
148
149 static void
150 bphy_rawdev_get_name(char *name, struct rte_pci_device *pci_dev)
151 {
152         snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "BPHY:%x:%02x.%x",
153                  pci_dev->addr.bus, pci_dev->addr.devid,
154                  pci_dev->addr.function);
155 }
156
157 static int
158 cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
159                            struct rte_rawdev_buf **buffers, unsigned int count,
160                            rte_rawdev_obj_t context)
161 {
162         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
163         struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
164         unsigned int queue = (size_t)context;
165         struct cnxk_bphy_irq_info *info;
166         int ret = 0;
167
168         if (queue >= RTE_DIM(bphy_dev->queues))
169                 return -EINVAL;
170
171         if (count == 0)
172                 return 0;
173
174         switch (msg->type) {
175         case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
176                 ret = cnxk_bphy_intr_init(dev->dev_id);
177                 break;
178         case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
179                 cnxk_bphy_intr_fini(dev->dev_id);
180                 break;
181         case CNXK_BPHY_IRQ_MSG_TYPE_REGISTER:
182                 info = (struct cnxk_bphy_irq_info *)msg->data;
183                 ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
184                                               info->handler, info->data,
185                                               info->cpu);
186                 break;
187         case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
188                 info = (struct cnxk_bphy_irq_info *)msg->data;
189                 cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
190                 break;
191         case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
192                 bphy_dev->queues[queue].rsp = &bphy_dev->mem;
193                 break;
194         default:
195                 ret = -EINVAL;
196         }
197
198         return ret;
199 }
200
201 static int
202 cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
203                            struct rte_rawdev_buf **buffers, unsigned int count,
204                            rte_rawdev_obj_t context)
205 {
206         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
207         unsigned int queue = (size_t)context;
208
209         if (queue >= RTE_DIM(bphy_dev->queues))
210                 return -EINVAL;
211
212         if (count == 0)
213                 return 0;
214
215         buffers[0]->buf_addr = bphy_dev->queues[queue].rsp;
216
217         return 0;
218 }
219
220 static uint16_t
221 cnxk_bphy_irq_queue_count(struct rte_rawdev *dev)
222 {
223         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
224
225         return RTE_DIM(bphy_dev->queues);
226 }
227
228 static int
229 cnxk_bphy_irq_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
230                              rte_rawdev_obj_t queue_conf,
231                              size_t queue_conf_size)
232 {
233         RTE_SET_USED(dev);
234         RTE_SET_USED(queue_id);
235
236         if (queue_conf_size != sizeof(unsigned int))
237                 return -EINVAL;
238
239         *(unsigned int *)queue_conf = 1;
240
241         return 0;
242 }
243
244 static const struct rte_rawdev_ops bphy_rawdev_ops = {
245         .queue_def_conf = cnxk_bphy_irq_queue_def_conf,
246         .enqueue_bufs = cnxk_bphy_irq_enqueue_bufs,
247         .dequeue_bufs = cnxk_bphy_irq_dequeue_bufs,
248         .queue_count = cnxk_bphy_irq_queue_count,
249         .dev_selftest = bphy_rawdev_selftest,
250 };
251
252 static int
253 bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
254                   struct rte_pci_device *pci_dev)
255 {
256         struct bphy_device *bphy_dev = NULL;
257         char name[RTE_RAWDEV_NAME_MAX_LEN];
258         struct rte_rawdev *bphy_rawdev;
259         int ret;
260
261         RTE_SET_USED(pci_drv);
262
263         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
264                 return 0;
265
266         if (!pci_dev->mem_resource[0].addr) {
267                 plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
268                         pci_dev->mem_resource[0].addr,
269                         pci_dev->mem_resource[2].addr);
270                 return -ENODEV;
271         }
272
273         ret = roc_plt_init();
274         if (ret)
275                 return ret;
276
277         bphy_rawdev_get_name(name, pci_dev);
278         bphy_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*bphy_dev),
279                                               rte_socket_id());
280         if (bphy_rawdev == NULL) {
281                 plt_err("Failed to allocate rawdev");
282                 return -ENOMEM;
283         }
284
285         bphy_rawdev->dev_ops = &bphy_rawdev_ops;
286         bphy_rawdev->device = &pci_dev->device;
287         bphy_rawdev->driver_name = pci_dev->driver->driver.name;
288
289         bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
290         bphy_dev->mem.res0 = pci_dev->mem_resource[0];
291         bphy_dev->mem.res2 = pci_dev->mem_resource[2];
292
293         return 0;
294 }
295
296 static int
297 bphy_rawdev_remove(struct rte_pci_device *pci_dev)
298 {
299         char name[RTE_RAWDEV_NAME_MAX_LEN];
300         struct rte_rawdev *rawdev;
301
302         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
303                 return 0;
304
305         if (pci_dev == NULL) {
306                 plt_err("invalid pci_dev");
307                 return -EINVAL;
308         }
309
310         bphy_rawdev_get_name(name, pci_dev);
311         rawdev = rte_rawdev_pmd_get_named_dev(name);
312         if (rawdev == NULL) {
313                 plt_err("invalid device name (%s)", name);
314                 return -EINVAL;
315         }
316
317         return rte_rawdev_pmd_release(rawdev);
318 }
319
320 static struct rte_pci_driver cnxk_bphy_rawdev_pmd = {
321         .id_table = pci_bphy_map,
322         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
323         .probe = bphy_rawdev_probe,
324         .remove = bphy_rawdev_remove,
325 };
326
327 RTE_PMD_REGISTER_PCI(bphy_rawdev_pci_driver, cnxk_bphy_rawdev_pmd);
328 RTE_PMD_REGISTER_PCI_TABLE(bphy_rawdev_pci_driver, pci_bphy_map);
329 RTE_PMD_REGISTER_KMOD_DEP(bphy_rawdev_pci_driver, "vfio-pci");