net/hns3: refactor multi-process initialization
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <rte_bus_pci.h>
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_eal.h>
8 #include <rte_lcore.h>
9 #include <rte_pci.h>
10 #include <rte_rawdev.h>
11 #include <rte_rawdev_pmd.h>
12
13 #include <roc_api.h>
14
15 #include "cnxk_bphy_irq.h"
16 #include "rte_pmd_bphy.h"
17
18 static const struct rte_pci_id pci_bphy_map[] = {
19         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_BPHY)},
20         {
21                 .vendor_id = 0,
22         },
23 };
24
25 struct bphy_test {
26         int irq_num;
27         cnxk_bphy_intr_handler_t handler;
28         void *data;
29         int cpu;
30         bool handled_intr;
31         int handled_data;
32         int test_data;
33 };
34
35 static struct bphy_test *test;
36
37 static void
38 bphy_test_handler_fn(int irq_num, void *isr_data)
39 {
40         test[irq_num].handled_intr = true;
41         test[irq_num].handled_data = *((int *)isr_data);
42 }
43
44 static int
45 bphy_rawdev_selftest(uint16_t dev_id)
46 {
47         unsigned int i, queues, descs;
48         uint16_t pf_func;
49         uint64_t max_irq;
50         int ret;
51
52         queues = rte_rawdev_queue_count(dev_id);
53         if (queues == 0)
54                 return -ENODEV;
55         if (queues != BPHY_QUEUE_CNT)
56                 return -EINVAL;
57
58         ret = rte_rawdev_start(dev_id);
59         if (ret)
60                 return ret;
61
62         ret = rte_rawdev_queue_conf_get(dev_id, CNXK_BPHY_DEF_QUEUE, &descs,
63                                         sizeof(descs));
64         if (ret)
65                 goto err_desc;
66         if (descs != 1) {
67                 ret = -ENODEV;
68                 plt_err("Wrong number of descs reported\n");
69                 goto err_desc;
70         }
71
72         ret = rte_pmd_bphy_npa_pf_func_get(dev_id, &pf_func);
73         if (ret || pf_func == 0)
74                 plt_warn("NPA pf_func is invalid");
75
76         ret = rte_pmd_bphy_sso_pf_func_get(dev_id, &pf_func);
77         if (ret || pf_func == 0)
78                 plt_warn("SSO pf_func is invalid");
79
80         ret = rte_pmd_bphy_intr_init(dev_id);
81         if (ret) {
82                 plt_err("intr init failed");
83                 return ret;
84         }
85
86         max_irq = cnxk_bphy_irq_max_get(dev_id);
87
88         test = rte_zmalloc("BPHY", max_irq * sizeof(*test), 0);
89         if (test == NULL) {
90                 plt_err("intr alloc failed");
91                 goto err_alloc;
92         }
93
94         for (i = 0; i < max_irq; i++) {
95                 test[i].test_data = i;
96                 test[i].irq_num = i;
97                 test[i].handler = bphy_test_handler_fn;
98                 test[i].data = &test[i].test_data;
99         }
100
101         for (i = 0; i < max_irq; i++) {
102                 ret = rte_pmd_bphy_intr_register(dev_id, test[i].irq_num,
103                                                  test[i].handler, test[i].data,
104                                                  0);
105                 if (ret == -ENOTSUP) {
106                         /* In the test we iterate over all irq numbers
107                          * so if some of them are not supported by given
108                          * platform we treat respective results as valid
109                          * ones. This way they have no impact on overall
110                          * test results.
111                          */
112                         test[i].handled_intr = true;
113                         test[i].handled_data = test[i].test_data;
114                         ret = 0;
115                         continue;
116                 }
117
118                 if (ret) {
119                         plt_err("intr register failed at irq %d", i);
120                         goto err_register;
121                 }
122         }
123
124         for (i = 0; i < max_irq; i++)
125                 roc_bphy_intr_handler(i);
126
127         for (i = 0; i < max_irq; i++) {
128                 if (!test[i].handled_intr) {
129                         plt_err("intr %u not handled", i);
130                         ret = -1;
131                         break;
132                 }
133                 if (test[i].handled_data != test[i].test_data) {
134                         plt_err("intr %u has wrong handler", i);
135                         ret = -1;
136                         break;
137                 }
138         }
139
140 err_register:
141         /*
142          * In case of registration failure the loop goes over all
143          * interrupts which is safe due to internal guards in
144          * rte_pmd_bphy_intr_unregister().
145          */
146         for (i = 0; i < max_irq; i++)
147                 rte_pmd_bphy_intr_unregister(dev_id, i);
148
149         rte_free(test);
150 err_alloc:
151         rte_pmd_bphy_intr_fini(dev_id);
152 err_desc:
153         rte_rawdev_stop(dev_id);
154
155         return ret;
156 }
157
158 static void
159 bphy_rawdev_get_name(char *name, struct rte_pci_device *pci_dev)
160 {
161         snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "BPHY:%02x:%02x.%x",
162                  pci_dev->addr.bus, pci_dev->addr.devid,
163                  pci_dev->addr.function);
164 }
165
166 static int
167 cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
168                            struct rte_rawdev_buf **buffers, unsigned int count,
169                            rte_rawdev_obj_t context)
170 {
171         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
172         struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
173         struct bphy_irq_queue *qp = &bphy_dev->queues[0];
174         unsigned int queue = (size_t)context;
175         struct cnxk_bphy_irq_info *info;
176         struct cnxk_bphy_mem *mem;
177         uint16_t *pf_func;
178         void *rsp = NULL;
179         int ret;
180
181         if (queue >= RTE_DIM(bphy_dev->queues))
182                 return -EINVAL;
183
184         if (count == 0)
185                 return 0;
186
187         switch (msg->type) {
188         case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
189                 ret = cnxk_bphy_intr_init(dev->dev_id);
190                 if (ret)
191                         return ret;
192                 break;
193         case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
194                 cnxk_bphy_intr_fini(dev->dev_id);
195                 break;
196         case CNXK_BPHY_IRQ_MSG_TYPE_REGISTER:
197                 info = (struct cnxk_bphy_irq_info *)msg->data;
198                 ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
199                                               info->handler, info->data,
200                                               info->cpu);
201                 if (ret)
202                         return ret;
203                 break;
204         case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
205                 info = (struct cnxk_bphy_irq_info *)msg->data;
206                 cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
207                 break;
208         case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
209                 mem = rte_zmalloc(NULL, sizeof(*mem), 0);
210                 if (!mem)
211                         return -ENOMEM;
212
213                 *mem = bphy_dev->mem;
214                 rsp = mem;
215                 break;
216         case CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC:
217                 pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
218                 if (!pf_func)
219                         return -ENOMEM;
220
221                 *pf_func = roc_bphy_npa_pf_func_get();
222                 rsp = pf_func;
223                 break;
224         case CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC:
225                 pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
226                 if (!pf_func)
227                         return -ENOMEM;
228
229                 *pf_func = roc_bphy_sso_pf_func_get();
230                 rsp = pf_func;
231                 break;
232         default:
233                 return -EINVAL;
234         }
235
236         /* get rid of last response if any */
237         if (qp->rsp) {
238                 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
239                 rte_free(qp->rsp);
240         }
241         qp->rsp = rsp;
242
243         return 1;
244 }
245
246 static int
247 cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
248                            struct rte_rawdev_buf **buffers, unsigned int count,
249                            rte_rawdev_obj_t context)
250 {
251         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
252         unsigned int queue = (size_t)context;
253         struct bphy_irq_queue *qp;
254
255         if (queue >= RTE_DIM(bphy_dev->queues))
256                 return -EINVAL;
257
258         if (count == 0)
259                 return 0;
260
261         qp = &bphy_dev->queues[queue];
262         if (qp->rsp) {
263                 buffers[0]->buf_addr = qp->rsp;
264                 qp->rsp = NULL;
265
266                 return 1;
267         }
268
269         return 0;
270 }
271
272 static uint16_t
273 cnxk_bphy_irq_queue_count(struct rte_rawdev *dev)
274 {
275         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
276
277         return RTE_DIM(bphy_dev->queues);
278 }
279
280 static int
281 cnxk_bphy_irq_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
282                              rte_rawdev_obj_t queue_conf,
283                              size_t queue_conf_size)
284 {
285         RTE_SET_USED(dev);
286         RTE_SET_USED(queue_id);
287
288         if (queue_conf_size != sizeof(unsigned int))
289                 return -EINVAL;
290
291         *(unsigned int *)queue_conf = 1;
292
293         return 0;
294 }
295
296 static const struct rte_rawdev_ops bphy_rawdev_ops = {
297         .queue_def_conf = cnxk_bphy_irq_queue_def_conf,
298         .enqueue_bufs = cnxk_bphy_irq_enqueue_bufs,
299         .dequeue_bufs = cnxk_bphy_irq_dequeue_bufs,
300         .queue_count = cnxk_bphy_irq_queue_count,
301         .dev_selftest = bphy_rawdev_selftest,
302 };
303
304 static int
305 bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
306                   struct rte_pci_device *pci_dev)
307 {
308         struct bphy_device *bphy_dev = NULL;
309         char name[RTE_RAWDEV_NAME_MAX_LEN];
310         struct rte_rawdev *bphy_rawdev;
311         int ret;
312
313         RTE_SET_USED(pci_drv);
314
315         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
316                 return 0;
317
318         if (!pci_dev->mem_resource[0].addr) {
319                 plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
320                         pci_dev->mem_resource[0].addr,
321                         pci_dev->mem_resource[2].addr);
322                 return -ENODEV;
323         }
324
325         ret = roc_plt_init();
326         if (ret)
327                 return ret;
328
329         bphy_rawdev_get_name(name, pci_dev);
330         bphy_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*bphy_dev),
331                                               rte_socket_id());
332         if (bphy_rawdev == NULL) {
333                 plt_err("Failed to allocate rawdev");
334                 return -ENOMEM;
335         }
336
337         bphy_rawdev->dev_ops = &bphy_rawdev_ops;
338         bphy_rawdev->device = &pci_dev->device;
339         bphy_rawdev->driver_name = pci_dev->driver->driver.name;
340
341         bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
342         bphy_dev->mem.res0 = pci_dev->mem_resource[0];
343         bphy_dev->mem.res2 = pci_dev->mem_resource[2];
344         bphy_dev->bphy.pci_dev = pci_dev;
345
346         ret = roc_bphy_dev_init(&bphy_dev->bphy);
347         if (ret) {
348                 rte_rawdev_pmd_release(bphy_rawdev);
349                 return ret;
350         }
351
352         return 0;
353 }
354
355 static int
356 bphy_rawdev_remove(struct rte_pci_device *pci_dev)
357 {
358         char name[RTE_RAWDEV_NAME_MAX_LEN];
359         struct bphy_device *bphy_dev;
360         struct rte_rawdev *rawdev;
361
362         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
363                 return 0;
364
365         if (pci_dev == NULL) {
366                 plt_err("invalid pci_dev");
367                 return -EINVAL;
368         }
369
370         bphy_rawdev_get_name(name, pci_dev);
371         rawdev = rte_rawdev_pmd_get_named_dev(name);
372         if (rawdev == NULL) {
373                 plt_err("invalid device name (%s)", name);
374                 return -EINVAL;
375         }
376
377         bphy_dev = (struct bphy_device *)rawdev->dev_private;
378         roc_bphy_dev_fini(&bphy_dev->bphy);
379
380         return rte_rawdev_pmd_release(rawdev);
381 }
382
383 static struct rte_pci_driver cnxk_bphy_rawdev_pmd = {
384         .id_table = pci_bphy_map,
385         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
386         .probe = bphy_rawdev_probe,
387         .remove = bphy_rawdev_remove,
388 };
389
390 RTE_PMD_REGISTER_PCI(bphy_rawdev_pci_driver, cnxk_bphy_rawdev_pmd);
391 RTE_PMD_REGISTER_PCI_TABLE(bphy_rawdev_pci_driver, pci_bphy_map);
392 RTE_PMD_REGISTER_KMOD_DEP(bphy_rawdev_pci_driver, "vfio-pci");