net/cnxk: support device infos query
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy_cgx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <string.h>
5
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include <roc_api.h>
11
12 #include "cnxk_bphy_cgx.h"
13 #include "rte_pmd_bphy.h"
14
15 struct cnxk_bphy_cgx_queue {
16         unsigned int lmac;
17         /* queue holds up to one response */
18         void *rsp;
19 };
20
21 struct cnxk_bphy_cgx {
22         struct roc_bphy_cgx *rcgx;
23         struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
24         unsigned int num_queues;
25 };
26
27 static void
28 cnxk_bphy_cgx_format_name(char *name, unsigned int len,
29                           struct rte_pci_device *pci_dev)
30 {
31         snprintf(name, len, "BPHY_CGX:%x:%02x.%x", pci_dev->addr.bus,
32                  pci_dev->addr.devid, pci_dev->addr.function);
33 }
34
35 static int
36 cnxk_bphy_cgx_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
37                              rte_rawdev_obj_t queue_conf,
38                              size_t queue_conf_size)
39 {
40         unsigned int *conf;
41
42         RTE_SET_USED(dev);
43         RTE_SET_USED(queue_id);
44
45         if (queue_conf_size != sizeof(*conf))
46                 return -EINVAL;
47
48         conf = (unsigned int *)queue_conf;
49         *conf = 1;
50
51         return 0;
52 }
53
54 static int
55 cnxk_bphy_cgx_process_buf(struct cnxk_bphy_cgx *cgx, unsigned int queue,
56                           struct rte_rawdev_buf *buf)
57 {
58         struct cnxk_bphy_cgx_queue *qp = &cgx->queues[queue];
59         struct cnxk_bphy_cgx_msg_set_link_state *link_state;
60         struct cnxk_bphy_cgx_msg *msg = buf->buf_addr;
61         struct cnxk_bphy_cgx_msg_link_mode *link_mode;
62         struct cnxk_bphy_cgx_msg_link_info *link_info;
63         struct roc_bphy_cgx_link_info rlink_info;
64         struct roc_bphy_cgx_link_mode rlink_mode;
65         unsigned int lmac = qp->lmac;
66         void *rsp = NULL;
67         int ret;
68
69         switch (msg->type) {
70         case CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO:
71                 memset(&rlink_info, 0, sizeof(rlink_info));
72                 ret = roc_bphy_cgx_get_linkinfo(cgx->rcgx, lmac, &rlink_info);
73                 if (ret)
74                         break;
75
76                 link_info = rte_zmalloc(NULL, sizeof(*link_info), 0);
77                 if (!link_info)
78                         return -ENOMEM;
79
80                 link_info->link_up = rlink_info.link_up;
81                 link_info->full_duplex = rlink_info.full_duplex;
82                 link_info->speed =
83                         (enum cnxk_bphy_cgx_eth_link_speed)rlink_info.speed;
84                 link_info->autoneg = rlink_info.an;
85                 link_info->fec =
86                         (enum cnxk_bphy_cgx_eth_link_fec)rlink_info.fec;
87                 link_info->mode =
88                         (enum cnxk_bphy_cgx_eth_link_mode)rlink_info.mode;
89                 rsp = link_info;
90                 break;
91         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE:
92                 ret = roc_bphy_cgx_intlbk_disable(cgx->rcgx, lmac);
93                 break;
94         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE:
95                 ret = roc_bphy_cgx_intlbk_enable(cgx->rcgx, lmac);
96                 break;
97         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE:
98                 ret = roc_bphy_cgx_ptp_rx_disable(cgx->rcgx, lmac);
99                 break;
100         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE:
101                 ret = roc_bphy_cgx_ptp_rx_enable(cgx->rcgx, lmac);
102                 break;
103         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE:
104                 link_mode = msg->data;
105                 memset(&rlink_mode, 0, sizeof(rlink_mode));
106                 rlink_mode.full_duplex = link_mode->full_duplex;
107                 rlink_mode.an = link_mode->autoneg;
108                 rlink_mode.speed =
109                         (enum roc_bphy_cgx_eth_link_speed)link_mode->speed;
110                 rlink_mode.mode =
111                         (enum roc_bphy_cgx_eth_link_mode)link_mode->mode;
112                 ret = roc_bphy_cgx_set_link_mode(cgx->rcgx, lmac, &rlink_mode);
113                 break;
114         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE:
115                 link_state = msg->data;
116                 ret = roc_bphy_cgx_set_link_state(cgx->rcgx, lmac,
117                                                   link_state->state);
118                 break;
119         case CNXK_BPHY_CGX_MSG_TYPE_START_RXTX:
120                 ret = roc_bphy_cgx_start_rxtx(cgx->rcgx, lmac);
121                 break;
122         case CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX:
123                 ret = roc_bphy_cgx_stop_rxtx(cgx->rcgx, lmac);
124                 break;
125         default:
126                 return -EINVAL;
127         }
128
129         /* get rid of last response if any */
130         if (qp->rsp) {
131                 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
132                 rte_free(qp->rsp);
133         }
134         qp->rsp = rsp;
135
136         return ret;
137 }
138
139 static int
140 cnxk_bphy_cgx_enqueue_bufs(struct rte_rawdev *dev,
141                            struct rte_rawdev_buf **buffers, unsigned int count,
142                            rte_rawdev_obj_t context)
143 {
144         struct cnxk_bphy_cgx *cgx = dev->dev_private;
145         unsigned int queue = (size_t)context;
146         int ret;
147
148         if (queue >= cgx->num_queues)
149                 return -EINVAL;
150
151         if (count == 0)
152                 return 0;
153
154         ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]);
155         if (ret)
156                 return ret;
157
158         return 1;
159 }
160
161 static int
162 cnxk_bphy_cgx_dequeue_bufs(struct rte_rawdev *dev,
163                            struct rte_rawdev_buf **buffers, unsigned int count,
164                            rte_rawdev_obj_t context)
165 {
166         struct cnxk_bphy_cgx *cgx = dev->dev_private;
167         unsigned int queue = (size_t)context;
168         struct cnxk_bphy_cgx_queue *qp;
169
170         if (queue >= cgx->num_queues)
171                 return -EINVAL;
172
173         if (count == 0)
174                 return 0;
175
176         qp = &cgx->queues[queue];
177         if (qp->rsp) {
178                 buffers[0]->buf_addr = qp->rsp;
179                 qp->rsp = NULL;
180
181                 return 1;
182         }
183
184         return 0;
185 }
186
187 static uint16_t
188 cnxk_bphy_cgx_queue_count(struct rte_rawdev *dev)
189 {
190         struct cnxk_bphy_cgx *cgx = dev->dev_private;
191
192         return cgx->num_queues;
193 }
194
195 static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
196         .queue_def_conf = cnxk_bphy_cgx_queue_def_conf,
197         .enqueue_bufs = cnxk_bphy_cgx_enqueue_bufs,
198         .dequeue_bufs = cnxk_bphy_cgx_dequeue_bufs,
199         .queue_count = cnxk_bphy_cgx_queue_count,
200         .dev_selftest = cnxk_bphy_cgx_dev_selftest,
201 };
202
203 static void
204 cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
205 {
206         struct roc_bphy_cgx *rcgx = cgx->rcgx;
207         unsigned int i;
208
209         for (i = 0; i < RTE_DIM(cgx->queues); i++) {
210                 if (!(rcgx->lmac_bmap & BIT_ULL(i)))
211                         continue;
212
213                 cgx->queues[cgx->num_queues++].lmac = i;
214         }
215 }
216
217 static void
218 cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
219 {
220         unsigned int i;
221
222         for (i = 0; i < cgx->num_queues; i++) {
223                 if (cgx->queues[i].rsp)
224                         rte_free(cgx->queues[i].rsp);
225         }
226
227         cgx->num_queues = 0;
228 }
229
230 static int
231 cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
232                            struct rte_pci_device *pci_dev)
233 {
234         char name[RTE_RAWDEV_NAME_MAX_LEN];
235         struct rte_rawdev *rawdev;
236         struct cnxk_bphy_cgx *cgx;
237         struct roc_bphy_cgx *rcgx;
238         int ret;
239
240         RTE_SET_USED(pci_drv);
241
242         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
243                 return 0;
244
245         if (!pci_dev->mem_resource[0].addr)
246                 return -ENODEV;
247
248         ret = roc_plt_init();
249         if (ret)
250                 return ret;
251
252         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
253         rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
254         if (!rawdev)
255                 return -ENOMEM;
256
257         rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
258         rawdev->device = &pci_dev->device;
259         rawdev->driver_name = pci_dev->driver->driver.name;
260
261         cgx = rawdev->dev_private;
262         cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
263         if (!cgx->rcgx) {
264                 ret = -ENOMEM;
265                 goto out_pmd_release;
266         }
267
268         rcgx = cgx->rcgx;
269         rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
270         rcgx->bar0_va = pci_dev->mem_resource[0].addr;
271         ret = roc_bphy_cgx_dev_init(rcgx);
272         if (ret)
273                 goto out_free;
274
275         cnxk_bphy_cgx_init_queues(cgx);
276
277         return 0;
278 out_free:
279         rte_free(rcgx);
280 out_pmd_release:
281         rte_rawdev_pmd_release(rawdev);
282
283         return ret;
284 }
285
286 static int
287 cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
288 {
289         char name[RTE_RAWDEV_NAME_MAX_LEN];
290         struct rte_rawdev *rawdev;
291         struct cnxk_bphy_cgx *cgx;
292
293         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
294         rawdev = rte_rawdev_pmd_get_named_dev(name);
295         if (!rawdev)
296                 return -ENODEV;
297
298         cgx = rawdev->dev_private;
299         cnxk_bphy_cgx_fini_queues(cgx);
300         roc_bphy_cgx_dev_fini(cgx->rcgx);
301         rte_free(cgx->rcgx);
302
303         return rte_rawdev_pmd_release(rawdev);
304 }
305
306 static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
307         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
308         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
309         {} /* sentinel */
310 };
311
312 static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
313         .id_table = cnxk_bphy_cgx_map,
314         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
315         .probe = cnxk_bphy_cgx_rawdev_probe,
316         .remove = cnxk_bphy_cgx_rawdev_remove,
317 };
318
319 RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
320 RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
321 RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");