net/cnxk: support device infos query
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <rte_bus_pci.h>
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_eal.h>
8 #include <rte_lcore.h>
9 #include <rte_pci.h>
10 #include <rte_rawdev.h>
11 #include <rte_rawdev_pmd.h>
12
13 #include <roc_api.h>
14 #include <roc_bphy_irq.h>
15
16 #include "cnxk_bphy_irq.h"
17 #include "rte_pmd_bphy.h"
18
19 static const struct rte_pci_id pci_bphy_map[] = {
20         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_BPHY)},
21         {
22                 .vendor_id = 0,
23         },
24 };
25
26 struct bphy_test {
27         int irq_num;
28         cnxk_bphy_intr_handler_t handler;
29         void *data;
30         int cpu;
31         bool handled_intr;
32         int handled_data;
33         int test_data;
34 };
35
36 static struct bphy_test *test;
37
38 static void
39 bphy_test_handler_fn(int irq_num, void *isr_data)
40 {
41         test[irq_num].handled_intr = true;
42         test[irq_num].handled_data = *((int *)isr_data);
43 }
44
45 static int
46 bphy_rawdev_selftest(uint16_t dev_id)
47 {
48         unsigned int i, queues, descs;
49         uint64_t max_irq;
50         int ret;
51
52         queues = rte_rawdev_queue_count(dev_id);
53         if (queues == 0)
54                 return -ENODEV;
55
56         ret = rte_rawdev_start(dev_id);
57         if (ret)
58                 return ret;
59
60         ret = rte_rawdev_queue_conf_get(dev_id, CNXK_BPHY_DEF_QUEUE, &descs,
61                                         sizeof(descs));
62         if (ret)
63                 goto err_desc;
64         if (descs != 1) {
65                 ret = -ENODEV;
66                 plt_err("Wrong number of descs reported\n");
67                 goto err_desc;
68         }
69
70         ret = rte_pmd_bphy_intr_init(dev_id);
71         if (ret) {
72                 plt_err("intr init failed");
73                 return ret;
74         }
75
76         max_irq = cnxk_bphy_irq_max_get(dev_id);
77
78         test = rte_zmalloc("BPHY", max_irq * sizeof(*test), 0);
79         if (test == NULL) {
80                 plt_err("intr alloc failed");
81                 goto err_alloc;
82         }
83
84         for (i = 0; i < max_irq; i++) {
85                 test[i].test_data = i;
86                 test[i].irq_num = i;
87                 test[i].handler = bphy_test_handler_fn;
88                 test[i].data = &test[i].test_data;
89         }
90
91         for (i = 0; i < max_irq; i++) {
92                 ret = rte_pmd_bphy_intr_register(dev_id, test[i].irq_num,
93                                                  test[i].handler, test[i].data,
94                                                  0);
95                 if (ret == -ENOTSUP) {
96                         /* In the test we iterate over all irq numbers
97                          * so if some of them are not supported by given
98                          * platform we treat respective results as valid
99                          * ones. This way they have no impact on overall
100                          * test results.
101                          */
102                         test[i].handled_intr = true;
103                         test[i].handled_data = test[i].test_data;
104                         ret = 0;
105                         continue;
106                 }
107
108                 if (ret) {
109                         plt_err("intr register failed at irq %d", i);
110                         goto err_register;
111                 }
112         }
113
114         for (i = 0; i < max_irq; i++)
115                 roc_bphy_intr_handler(i);
116
117         for (i = 0; i < max_irq; i++) {
118                 if (!test[i].handled_intr) {
119                         plt_err("intr %u not handled", i);
120                         ret = -1;
121                         break;
122                 }
123                 if (test[i].handled_data != test[i].test_data) {
124                         plt_err("intr %u has wrong handler", i);
125                         ret = -1;
126                         break;
127                 }
128         }
129
130 err_register:
131         /*
132          * In case of registration failure the loop goes over all
133          * interrupts which is safe due to internal guards in
134          * rte_pmd_bphy_intr_unregister().
135          */
136         for (i = 0; i < max_irq; i++)
137                 rte_pmd_bphy_intr_unregister(dev_id, i);
138
139         rte_free(test);
140 err_alloc:
141         rte_pmd_bphy_intr_fini(dev_id);
142 err_desc:
143         rte_rawdev_stop(dev_id);
144
145         return ret;
146 }
147
148 static void
149 bphy_rawdev_get_name(char *name, struct rte_pci_device *pci_dev)
150 {
151         snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "BPHY:%x:%02x.%x",
152                  pci_dev->addr.bus, pci_dev->addr.devid,
153                  pci_dev->addr.function);
154 }
155
156 static int
157 cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
158                            struct rte_rawdev_buf **buffers, unsigned int count,
159                            rte_rawdev_obj_t context)
160 {
161         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
162         struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
163         unsigned int queue = (size_t)context;
164         struct cnxk_bphy_irq_info *info;
165         int ret = 0;
166
167         if (queue >= RTE_DIM(bphy_dev->queues))
168                 return -EINVAL;
169
170         if (count == 0)
171                 return 0;
172
173         switch (msg->type) {
174         case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
175                 ret = cnxk_bphy_intr_init(dev->dev_id);
176                 break;
177         case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
178                 cnxk_bphy_intr_fini(dev->dev_id);
179                 break;
180         case CNXK_BPHY_IRQ_MSG_TYPE_REGISTER:
181                 info = (struct cnxk_bphy_irq_info *)msg->data;
182                 ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
183                                               info->handler, info->data,
184                                               info->cpu);
185                 break;
186         case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
187                 info = (struct cnxk_bphy_irq_info *)msg->data;
188                 cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
189                 break;
190         case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
191                 bphy_dev->queues[queue].rsp = &bphy_dev->mem;
192                 break;
193         default:
194                 ret = -EINVAL;
195         }
196
197         return ret;
198 }
199
200 static int
201 cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
202                            struct rte_rawdev_buf **buffers, unsigned int count,
203                            rte_rawdev_obj_t context)
204 {
205         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
206         unsigned int queue = (size_t)context;
207
208         if (queue >= RTE_DIM(bphy_dev->queues))
209                 return -EINVAL;
210
211         if (count == 0)
212                 return 0;
213
214         buffers[0]->buf_addr = bphy_dev->queues[queue].rsp;
215
216         return 0;
217 }
218
219 static uint16_t
220 cnxk_bphy_irq_queue_count(struct rte_rawdev *dev)
221 {
222         struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
223
224         return RTE_DIM(bphy_dev->queues);
225 }
226
227 static int
228 cnxk_bphy_irq_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
229                              rte_rawdev_obj_t queue_conf,
230                              size_t queue_conf_size)
231 {
232         RTE_SET_USED(dev);
233         RTE_SET_USED(queue_id);
234
235         if (queue_conf_size != sizeof(unsigned int))
236                 return -EINVAL;
237
238         *(unsigned int *)queue_conf = 1;
239
240         return 0;
241 }
242
243 static const struct rte_rawdev_ops bphy_rawdev_ops = {
244         .queue_def_conf = cnxk_bphy_irq_queue_def_conf,
245         .enqueue_bufs = cnxk_bphy_irq_enqueue_bufs,
246         .dequeue_bufs = cnxk_bphy_irq_dequeue_bufs,
247         .queue_count = cnxk_bphy_irq_queue_count,
248         .dev_selftest = bphy_rawdev_selftest,
249 };
250
251 static int
252 bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
253                   struct rte_pci_device *pci_dev)
254 {
255         struct bphy_device *bphy_dev = NULL;
256         char name[RTE_RAWDEV_NAME_MAX_LEN];
257         struct rte_rawdev *bphy_rawdev;
258         int ret;
259
260         RTE_SET_USED(pci_drv);
261
262         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
263                 return 0;
264
265         if (!pci_dev->mem_resource[0].addr) {
266                 plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
267                         pci_dev->mem_resource[0].addr,
268                         pci_dev->mem_resource[2].addr);
269                 return -ENODEV;
270         }
271
272         ret = roc_plt_init();
273         if (ret)
274                 return ret;
275
276         bphy_rawdev_get_name(name, pci_dev);
277         bphy_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*bphy_dev),
278                                               rte_socket_id());
279         if (bphy_rawdev == NULL) {
280                 plt_err("Failed to allocate rawdev");
281                 return -ENOMEM;
282         }
283
284         bphy_rawdev->dev_ops = &bphy_rawdev_ops;
285         bphy_rawdev->device = &pci_dev->device;
286         bphy_rawdev->driver_name = pci_dev->driver->driver.name;
287
288         bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
289         bphy_dev->mem.res0 = pci_dev->mem_resource[0];
290         bphy_dev->mem.res2 = pci_dev->mem_resource[2];
291
292         return 0;
293 }
294
295 static int
296 bphy_rawdev_remove(struct rte_pci_device *pci_dev)
297 {
298         char name[RTE_RAWDEV_NAME_MAX_LEN];
299         struct rte_rawdev *rawdev;
300
301         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
302                 return 0;
303
304         if (pci_dev == NULL) {
305                 plt_err("invalid pci_dev");
306                 return -EINVAL;
307         }
308
309         rawdev = rte_rawdev_pmd_get_named_dev(name);
310         if (rawdev == NULL) {
311                 plt_err("invalid device name (%s)", name);
312                 return -EINVAL;
313         }
314
315         bphy_rawdev_get_name(name, pci_dev);
316
317         return rte_rawdev_pmd_release(rawdev);
318 }
319
320 static struct rte_pci_driver cnxk_bphy_rawdev_pmd = {
321         .id_table = pci_bphy_map,
322         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
323         .probe = bphy_rawdev_probe,
324         .remove = bphy_rawdev_remove,
325 };
326
327 RTE_PMD_REGISTER_PCI(bphy_rawdev_pci_driver, cnxk_bphy_rawdev_pmd);
328 RTE_PMD_REGISTER_PCI_TABLE(bphy_rawdev_pci_driver, pci_bphy_map);
329 RTE_PMD_REGISTER_KMOD_DEP(bphy_rawdev_pci_driver, "vfio-pci");