1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev.h>
8 #include <rte_rawdev_pmd.h>
12 #include "cnxk_bphy_cgx.h"
13 #include "rte_pmd_bphy.h"
15 struct cnxk_bphy_cgx_queue {
17 /* queue holds up to one response */
21 struct cnxk_bphy_cgx {
22 struct roc_bphy_cgx *rcgx;
23 struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
24 unsigned int num_queues;
28 cnxk_bphy_cgx_format_name(char *name, unsigned int len,
29 struct rte_pci_device *pci_dev)
31 snprintf(name, len, "BPHY_CGX:%x:%02x.%x", pci_dev->addr.bus,
32 pci_dev->addr.devid, pci_dev->addr.function);
36 cnxk_bphy_cgx_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
37 rte_rawdev_obj_t queue_conf,
38 size_t queue_conf_size)
43 RTE_SET_USED(queue_id);
45 if (queue_conf_size != sizeof(*conf))
48 conf = (unsigned int *)queue_conf;
55 cnxk_bphy_cgx_process_buf(struct cnxk_bphy_cgx *cgx, unsigned int queue,
56 struct rte_rawdev_buf *buf)
58 struct cnxk_bphy_cgx_queue *qp = &cgx->queues[queue];
59 struct cnxk_bphy_cgx_msg_set_link_state *link_state;
60 struct cnxk_bphy_cgx_msg *msg = buf->buf_addr;
61 struct cnxk_bphy_cgx_msg_link_mode *link_mode;
62 struct cnxk_bphy_cgx_msg_link_info *link_info;
63 struct roc_bphy_cgx_link_info rlink_info;
64 struct roc_bphy_cgx_link_mode rlink_mode;
65 enum roc_bphy_cgx_eth_link_fec *fec;
66 unsigned int lmac = qp->lmac;
71 case CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO:
72 memset(&rlink_info, 0, sizeof(rlink_info));
73 ret = roc_bphy_cgx_get_linkinfo(cgx->rcgx, lmac, &rlink_info);
77 link_info = rte_zmalloc(NULL, sizeof(*link_info), 0);
81 link_info->link_up = rlink_info.link_up;
82 link_info->full_duplex = rlink_info.full_duplex;
84 (enum cnxk_bphy_cgx_eth_link_speed)rlink_info.speed;
85 link_info->autoneg = rlink_info.an;
87 (enum cnxk_bphy_cgx_eth_link_fec)rlink_info.fec;
89 (enum cnxk_bphy_cgx_eth_link_mode)rlink_info.mode;
92 case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE:
93 ret = roc_bphy_cgx_intlbk_disable(cgx->rcgx, lmac);
95 case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE:
96 ret = roc_bphy_cgx_intlbk_enable(cgx->rcgx, lmac);
98 case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE:
99 ret = roc_bphy_cgx_ptp_rx_disable(cgx->rcgx, lmac);
101 case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE:
102 ret = roc_bphy_cgx_ptp_rx_enable(cgx->rcgx, lmac);
104 case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE:
105 link_mode = msg->data;
106 memset(&rlink_mode, 0, sizeof(rlink_mode));
107 rlink_mode.full_duplex = link_mode->full_duplex;
108 rlink_mode.an = link_mode->autoneg;
110 (enum roc_bphy_cgx_eth_link_speed)link_mode->speed;
112 (enum roc_bphy_cgx_eth_link_mode)link_mode->mode;
113 ret = roc_bphy_cgx_set_link_mode(cgx->rcgx, lmac, &rlink_mode);
115 case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE:
116 link_state = msg->data;
117 ret = roc_bphy_cgx_set_link_state(cgx->rcgx, lmac,
120 case CNXK_BPHY_CGX_MSG_TYPE_START_RXTX:
121 ret = roc_bphy_cgx_start_rxtx(cgx->rcgx, lmac);
123 case CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX:
124 ret = roc_bphy_cgx_stop_rxtx(cgx->rcgx, lmac);
126 case CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC:
127 fec = rte_zmalloc(NULL, sizeof(*fec), 0);
131 ret = roc_bphy_cgx_fec_supported_get(cgx->rcgx, lmac, fec);
134 case CNXK_BPHY_CGX_MSG_TYPE_SET_FEC:
136 ret = roc_bphy_cgx_fec_set(cgx->rcgx, lmac, *fec);
142 /* get rid of last response if any */
144 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
153 cnxk_bphy_cgx_enqueue_bufs(struct rte_rawdev *dev,
154 struct rte_rawdev_buf **buffers, unsigned int count,
155 rte_rawdev_obj_t context)
157 struct cnxk_bphy_cgx *cgx = dev->dev_private;
158 unsigned int queue = (size_t)context;
161 if (queue >= cgx->num_queues)
167 ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]);
175 cnxk_bphy_cgx_dequeue_bufs(struct rte_rawdev *dev,
176 struct rte_rawdev_buf **buffers, unsigned int count,
177 rte_rawdev_obj_t context)
179 struct cnxk_bphy_cgx *cgx = dev->dev_private;
180 unsigned int queue = (size_t)context;
181 struct cnxk_bphy_cgx_queue *qp;
183 if (queue >= cgx->num_queues)
189 qp = &cgx->queues[queue];
191 buffers[0]->buf_addr = qp->rsp;
201 cnxk_bphy_cgx_queue_count(struct rte_rawdev *dev)
203 struct cnxk_bphy_cgx *cgx = dev->dev_private;
205 return cgx->num_queues;
208 static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
209 .queue_def_conf = cnxk_bphy_cgx_queue_def_conf,
210 .enqueue_bufs = cnxk_bphy_cgx_enqueue_bufs,
211 .dequeue_bufs = cnxk_bphy_cgx_dequeue_bufs,
212 .queue_count = cnxk_bphy_cgx_queue_count,
213 .dev_selftest = cnxk_bphy_cgx_dev_selftest,
217 cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
219 struct roc_bphy_cgx *rcgx = cgx->rcgx;
222 for (i = 0; i < RTE_DIM(cgx->queues); i++) {
223 if (!(rcgx->lmac_bmap & BIT_ULL(i)))
226 cgx->queues[cgx->num_queues++].lmac = i;
231 cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
235 for (i = 0; i < cgx->num_queues; i++) {
236 if (cgx->queues[i].rsp)
237 rte_free(cgx->queues[i].rsp);
244 cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
245 struct rte_pci_device *pci_dev)
247 char name[RTE_RAWDEV_NAME_MAX_LEN];
248 struct rte_rawdev *rawdev;
249 struct cnxk_bphy_cgx *cgx;
250 struct roc_bphy_cgx *rcgx;
253 RTE_SET_USED(pci_drv);
255 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
258 if (!pci_dev->mem_resource[0].addr)
261 ret = roc_plt_init();
265 cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
266 rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
270 rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
271 rawdev->device = &pci_dev->device;
272 rawdev->driver_name = pci_dev->driver->driver.name;
274 cgx = rawdev->dev_private;
275 cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
278 goto out_pmd_release;
282 rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
283 rcgx->bar0_va = pci_dev->mem_resource[0].addr;
284 ret = roc_bphy_cgx_dev_init(rcgx);
288 cnxk_bphy_cgx_init_queues(cgx);
294 rte_rawdev_pmd_release(rawdev);
300 cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
302 char name[RTE_RAWDEV_NAME_MAX_LEN];
303 struct rte_rawdev *rawdev;
304 struct cnxk_bphy_cgx *cgx;
306 cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
307 rawdev = rte_rawdev_pmd_get_named_dev(name);
311 cgx = rawdev->dev_private;
312 cnxk_bphy_cgx_fini_queues(cgx);
313 roc_bphy_cgx_dev_fini(cgx->rcgx);
316 return rte_rawdev_pmd_release(rawdev);
319 static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
320 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
321 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
325 static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
326 .id_table = cnxk_bphy_cgx_map,
327 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
328 .probe = cnxk_bphy_cgx_rawdev_probe,
329 .remove = cnxk_bphy_cgx_rawdev_remove,
332 RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
333 RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
334 RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");