1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <rte_bus_pci.h>
5 #include <rte_common.h>
10 #include <rte_rawdev.h>
11 #include <rte_rawdev_pmd.h>
15 #include "cnxk_bphy_irq.h"
16 #include "rte_pmd_bphy.h"
18 static const struct rte_pci_id pci_bphy_map[] = {
19 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_BPHY)},
27 cnxk_bphy_intr_handler_t handler;
35 static struct bphy_test *test;
38 bphy_test_handler_fn(int irq_num, void *isr_data)
40 test[irq_num].handled_intr = true;
41 test[irq_num].handled_data = *((int *)isr_data);
45 bphy_rawdev_selftest(uint16_t dev_id)
47 unsigned int i, queues, descs;
52 queues = rte_rawdev_queue_count(dev_id);
55 if (queues != BPHY_QUEUE_CNT)
58 ret = rte_rawdev_start(dev_id);
62 ret = rte_rawdev_queue_conf_get(dev_id, CNXK_BPHY_DEF_QUEUE, &descs,
68 plt_err("Wrong number of descs reported\n");
72 ret = rte_pmd_bphy_npa_pf_func_get(dev_id, &pf_func);
73 if (ret || pf_func == 0)
74 plt_warn("NPA pf_func is invalid");
76 ret = rte_pmd_bphy_sso_pf_func_get(dev_id, &pf_func);
77 if (ret || pf_func == 0)
78 plt_warn("SSO pf_func is invalid");
80 ret = rte_pmd_bphy_intr_init(dev_id);
82 plt_err("intr init failed");
86 max_irq = cnxk_bphy_irq_max_get(dev_id);
88 test = rte_zmalloc("BPHY", max_irq * sizeof(*test), 0);
90 plt_err("intr alloc failed");
94 for (i = 0; i < max_irq; i++) {
95 test[i].test_data = i;
97 test[i].handler = bphy_test_handler_fn;
98 test[i].data = &test[i].test_data;
101 for (i = 0; i < max_irq; i++) {
102 ret = rte_pmd_bphy_intr_register(dev_id, test[i].irq_num,
103 test[i].handler, test[i].data,
105 if (ret == -ENOTSUP) {
106 /* In the test we iterate over all irq numbers
107 * so if some of them are not supported by given
108 * platform we treat respective results as valid
109 * ones. This way they have no impact on overall
112 test[i].handled_intr = true;
113 test[i].handled_data = test[i].test_data;
119 plt_err("intr register failed at irq %d", i);
124 for (i = 0; i < max_irq; i++)
125 roc_bphy_intr_handler(i);
127 for (i = 0; i < max_irq; i++) {
128 if (!test[i].handled_intr) {
129 plt_err("intr %u not handled", i);
133 if (test[i].handled_data != test[i].test_data) {
134 plt_err("intr %u has wrong handler", i);
142 * In case of registration failure the loop goes over all
143 * interrupts which is safe due to internal guards in
144 * rte_pmd_bphy_intr_unregister().
146 for (i = 0; i < max_irq; i++)
147 rte_pmd_bphy_intr_unregister(dev_id, i);
151 rte_pmd_bphy_intr_fini(dev_id);
153 rte_rawdev_stop(dev_id);
159 bphy_rawdev_get_name(char *name, struct rte_pci_device *pci_dev)
161 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "BPHY:%02x:%02x.%x",
162 pci_dev->addr.bus, pci_dev->addr.devid,
163 pci_dev->addr.function);
167 cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
168 struct rte_rawdev_buf **buffers, unsigned int count,
169 rte_rawdev_obj_t context)
171 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
172 struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
173 struct bphy_irq_queue *qp = &bphy_dev->queues[0];
174 unsigned int queue = (size_t)context;
175 struct cnxk_bphy_irq_info *info;
176 struct cnxk_bphy_mem *mem;
181 if (queue >= RTE_DIM(bphy_dev->queues))
188 case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
189 ret = cnxk_bphy_intr_init(dev->dev_id);
193 case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
194 cnxk_bphy_intr_fini(dev->dev_id);
196 case CNXK_BPHY_IRQ_MSG_TYPE_REGISTER:
197 info = (struct cnxk_bphy_irq_info *)msg->data;
198 ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
199 info->handler, info->data,
204 case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
205 info = (struct cnxk_bphy_irq_info *)msg->data;
206 cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
208 case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
209 mem = rte_zmalloc(NULL, sizeof(*mem), 0);
213 *mem = bphy_dev->mem;
216 case CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC:
217 pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
221 *pf_func = roc_bphy_npa_pf_func_get();
224 case CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC:
225 pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
229 *pf_func = roc_bphy_sso_pf_func_get();
236 /* get rid of last response if any */
238 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
247 cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
248 struct rte_rawdev_buf **buffers, unsigned int count,
249 rte_rawdev_obj_t context)
251 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
252 unsigned int queue = (size_t)context;
253 struct bphy_irq_queue *qp;
255 if (queue >= RTE_DIM(bphy_dev->queues))
261 qp = &bphy_dev->queues[queue];
263 buffers[0]->buf_addr = qp->rsp;
273 cnxk_bphy_irq_queue_count(struct rte_rawdev *dev)
275 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
277 return RTE_DIM(bphy_dev->queues);
281 cnxk_bphy_irq_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
282 rte_rawdev_obj_t queue_conf,
283 size_t queue_conf_size)
286 RTE_SET_USED(queue_id);
288 if (queue_conf_size != sizeof(unsigned int))
291 *(unsigned int *)queue_conf = 1;
296 static const struct rte_rawdev_ops bphy_rawdev_ops = {
297 .queue_def_conf = cnxk_bphy_irq_queue_def_conf,
298 .enqueue_bufs = cnxk_bphy_irq_enqueue_bufs,
299 .dequeue_bufs = cnxk_bphy_irq_dequeue_bufs,
300 .queue_count = cnxk_bphy_irq_queue_count,
301 .dev_selftest = bphy_rawdev_selftest,
305 bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
306 struct rte_pci_device *pci_dev)
308 struct bphy_device *bphy_dev = NULL;
309 char name[RTE_RAWDEV_NAME_MAX_LEN];
310 struct rte_rawdev *bphy_rawdev;
313 RTE_SET_USED(pci_drv);
315 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
318 if (!pci_dev->mem_resource[0].addr) {
319 plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
320 pci_dev->mem_resource[0].addr,
321 pci_dev->mem_resource[2].addr);
325 ret = roc_plt_init();
329 bphy_rawdev_get_name(name, pci_dev);
330 bphy_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*bphy_dev),
332 if (bphy_rawdev == NULL) {
333 plt_err("Failed to allocate rawdev");
337 bphy_rawdev->dev_ops = &bphy_rawdev_ops;
338 bphy_rawdev->device = &pci_dev->device;
339 bphy_rawdev->driver_name = pci_dev->driver->driver.name;
341 bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
342 bphy_dev->mem.res0 = pci_dev->mem_resource[0];
343 bphy_dev->mem.res2 = pci_dev->mem_resource[2];
344 bphy_dev->bphy.pci_dev = pci_dev;
346 ret = roc_bphy_dev_init(&bphy_dev->bphy);
348 rte_rawdev_pmd_release(bphy_rawdev);
356 bphy_rawdev_remove(struct rte_pci_device *pci_dev)
358 char name[RTE_RAWDEV_NAME_MAX_LEN];
359 struct bphy_device *bphy_dev;
360 struct rte_rawdev *rawdev;
362 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
365 if (pci_dev == NULL) {
366 plt_err("invalid pci_dev");
370 bphy_rawdev_get_name(name, pci_dev);
371 rawdev = rte_rawdev_pmd_get_named_dev(name);
372 if (rawdev == NULL) {
373 plt_err("invalid device name (%s)", name);
377 bphy_dev = (struct bphy_device *)rawdev->dev_private;
378 roc_bphy_dev_fini(&bphy_dev->bphy);
380 return rte_rawdev_pmd_release(rawdev);
383 static struct rte_pci_driver cnxk_bphy_rawdev_pmd = {
384 .id_table = pci_bphy_map,
385 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
386 .probe = bphy_rawdev_probe,
387 .remove = bphy_rawdev_remove,
390 RTE_PMD_REGISTER_PCI(bphy_rawdev_pci_driver, cnxk_bphy_rawdev_pmd);
391 RTE_PMD_REGISTER_PCI_TABLE(bphy_rawdev_pci_driver, pci_bphy_map);
392 RTE_PMD_REGISTER_KMOD_DEP(bphy_rawdev_pci_driver, "vfio-pci");