1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <rte_bus_pci.h>
5 #include <rte_common.h>
10 #include <rte_rawdev.h>
11 #include <rte_rawdev_pmd.h>
15 #include "cnxk_bphy_irq.h"
16 #include "rte_pmd_bphy.h"
18 static const struct rte_pci_id pci_bphy_map[] = {
19 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_BPHY)},
26 bphy_rawdev_get_name(char *name, struct rte_pci_device *pci_dev)
28 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "BPHY:%x:%02x.%x",
29 pci_dev->addr.bus, pci_dev->addr.devid,
30 pci_dev->addr.function);
34 cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
35 struct rte_rawdev_buf **buffers, unsigned int count,
36 rte_rawdev_obj_t context)
38 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
39 struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
40 unsigned int queue = (size_t)context;
43 if (queue >= RTE_DIM(bphy_dev->queues))
50 case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
51 ret = cnxk_bphy_intr_init(dev->dev_id);
53 case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
54 cnxk_bphy_intr_fini(dev->dev_id);
56 case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
57 bphy_dev->queues[queue].rsp = &bphy_dev->mem;
67 cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
68 struct rte_rawdev_buf **buffers, unsigned int count,
69 rte_rawdev_obj_t context)
71 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
72 unsigned int queue = (size_t)context;
74 if (queue >= RTE_DIM(bphy_dev->queues))
80 buffers[0]->buf_addr = bphy_dev->queues[queue].rsp;
86 cnxk_bphy_irq_queue_count(struct rte_rawdev *dev)
88 struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
90 return RTE_DIM(bphy_dev->queues);
94 cnxk_bphy_irq_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
95 rte_rawdev_obj_t queue_conf,
96 size_t queue_conf_size)
99 RTE_SET_USED(queue_id);
101 if (queue_conf_size != sizeof(unsigned int))
104 *(unsigned int *)queue_conf = 1;
109 static const struct rte_rawdev_ops bphy_rawdev_ops = {
110 .queue_def_conf = cnxk_bphy_irq_queue_def_conf,
111 .enqueue_bufs = cnxk_bphy_irq_enqueue_bufs,
112 .dequeue_bufs = cnxk_bphy_irq_dequeue_bufs,
113 .queue_count = cnxk_bphy_irq_queue_count,
117 bphy_rawdev_probe(struct rte_pci_driver *pci_drv,
118 struct rte_pci_device *pci_dev)
120 struct bphy_device *bphy_dev = NULL;
121 char name[RTE_RAWDEV_NAME_MAX_LEN];
122 struct rte_rawdev *bphy_rawdev;
125 RTE_SET_USED(pci_drv);
127 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
130 if (!pci_dev->mem_resource[0].addr) {
131 plt_err("BARs have invalid values: BAR0 %p\n BAR2 %p",
132 pci_dev->mem_resource[0].addr,
133 pci_dev->mem_resource[2].addr);
137 ret = roc_plt_init();
141 bphy_rawdev_get_name(name, pci_dev);
142 bphy_rawdev = rte_rawdev_pmd_allocate(name, sizeof(*bphy_dev),
144 if (bphy_rawdev == NULL) {
145 plt_err("Failed to allocate rawdev");
149 bphy_rawdev->dev_ops = &bphy_rawdev_ops;
150 bphy_rawdev->device = &pci_dev->device;
151 bphy_rawdev->driver_name = pci_dev->driver->driver.name;
153 bphy_dev = (struct bphy_device *)bphy_rawdev->dev_private;
154 bphy_dev->mem.res0 = pci_dev->mem_resource[0];
155 bphy_dev->mem.res2 = pci_dev->mem_resource[2];
161 bphy_rawdev_remove(struct rte_pci_device *pci_dev)
163 char name[RTE_RAWDEV_NAME_MAX_LEN];
164 struct rte_rawdev *rawdev;
166 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
169 if (pci_dev == NULL) {
170 plt_err("invalid pci_dev");
174 rawdev = rte_rawdev_pmd_get_named_dev(name);
175 if (rawdev == NULL) {
176 plt_err("invalid device name (%s)", name);
180 bphy_rawdev_get_name(name, pci_dev);
182 return rte_rawdev_pmd_release(rawdev);
185 static struct rte_pci_driver cnxk_bphy_rawdev_pmd = {
186 .id_table = pci_bphy_map,
187 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
188 .probe = bphy_rawdev_probe,
189 .remove = bphy_rawdev_remove,
192 RTE_PMD_REGISTER_PCI(bphy_rawdev_pci_driver, cnxk_bphy_rawdev_pmd);
193 RTE_PMD_REGISTER_PCI_TABLE(bphy_rawdev_pci_driver, pci_bphy_map);
194 RTE_PMD_REGISTER_KMOD_DEP(bphy_rawdev_pci_driver, "vfio-pci");