693a9cd2d516949582386ce5167a5e2859b34886
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy_cgx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <string.h>
5
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include <roc_api.h>
11
12 #include "cnxk_bphy_cgx.h"
13 #include "rte_pmd_bphy.h"
14
15 struct cnxk_bphy_cgx_queue {
16         unsigned int lmac;
17         /* queue holds up to one response */
18         void *rsp;
19 };
20
21 struct cnxk_bphy_cgx {
22         struct roc_bphy_cgx *rcgx;
23         struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
24         unsigned int num_queues;
25 };
26
27 static void
28 cnxk_bphy_cgx_format_name(char *name, unsigned int len,
29                           struct rte_pci_device *pci_dev)
30 {
31         snprintf(name, len, "BPHY_CGX:%x:%02x.%x", pci_dev->addr.bus,
32                  pci_dev->addr.devid, pci_dev->addr.function);
33 }
34
35 static int
36 cnxk_bphy_cgx_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
37                              rte_rawdev_obj_t queue_conf,
38                              size_t queue_conf_size)
39 {
40         unsigned int *conf;
41
42         RTE_SET_USED(dev);
43         RTE_SET_USED(queue_id);
44
45         if (queue_conf_size != sizeof(*conf))
46                 return -EINVAL;
47
48         conf = (unsigned int *)queue_conf;
49         *conf = 1;
50
51         return 0;
52 }
53
54 static int
55 cnxk_bphy_cgx_process_buf(struct cnxk_bphy_cgx *cgx, unsigned int queue,
56                           struct rte_rawdev_buf *buf)
57 {
58         struct cnxk_bphy_cgx_queue *qp = &cgx->queues[queue];
59         struct cnxk_bphy_cgx_msg_set_link_state *link_state;
60         struct cnxk_bphy_cgx_msg *msg = buf->buf_addr;
61         struct cnxk_bphy_cgx_msg_link_mode *link_mode;
62         struct cnxk_bphy_cgx_msg_link_info *link_info;
63         struct roc_bphy_cgx_link_info rlink_info;
64         struct roc_bphy_cgx_link_mode rlink_mode;
65         enum roc_bphy_cgx_eth_link_fec *fec;
66         unsigned int lmac = qp->lmac;
67         void *rsp = NULL;
68         int ret;
69
70         switch (msg->type) {
71         case CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO:
72                 memset(&rlink_info, 0, sizeof(rlink_info));
73                 ret = roc_bphy_cgx_get_linkinfo(cgx->rcgx, lmac, &rlink_info);
74                 if (ret)
75                         break;
76
77                 link_info = rte_zmalloc(NULL, sizeof(*link_info), 0);
78                 if (!link_info)
79                         return -ENOMEM;
80
81                 link_info->link_up = rlink_info.link_up;
82                 link_info->full_duplex = rlink_info.full_duplex;
83                 link_info->speed =
84                         (enum cnxk_bphy_cgx_eth_link_speed)rlink_info.speed;
85                 link_info->autoneg = rlink_info.an;
86                 link_info->fec =
87                         (enum cnxk_bphy_cgx_eth_link_fec)rlink_info.fec;
88                 link_info->mode =
89                         (enum cnxk_bphy_cgx_eth_link_mode)rlink_info.mode;
90                 rsp = link_info;
91                 break;
92         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE:
93                 ret = roc_bphy_cgx_intlbk_disable(cgx->rcgx, lmac);
94                 break;
95         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE:
96                 ret = roc_bphy_cgx_intlbk_enable(cgx->rcgx, lmac);
97                 break;
98         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE:
99                 ret = roc_bphy_cgx_ptp_rx_disable(cgx->rcgx, lmac);
100                 break;
101         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE:
102                 ret = roc_bphy_cgx_ptp_rx_enable(cgx->rcgx, lmac);
103                 break;
104         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE:
105                 link_mode = msg->data;
106                 memset(&rlink_mode, 0, sizeof(rlink_mode));
107                 rlink_mode.full_duplex = link_mode->full_duplex;
108                 rlink_mode.an = link_mode->autoneg;
109                 rlink_mode.speed =
110                         (enum roc_bphy_cgx_eth_link_speed)link_mode->speed;
111                 rlink_mode.mode =
112                         (enum roc_bphy_cgx_eth_link_mode)link_mode->mode;
113                 ret = roc_bphy_cgx_set_link_mode(cgx->rcgx, lmac, &rlink_mode);
114                 break;
115         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE:
116                 link_state = msg->data;
117                 ret = roc_bphy_cgx_set_link_state(cgx->rcgx, lmac,
118                                                   link_state->state);
119                 break;
120         case CNXK_BPHY_CGX_MSG_TYPE_START_RXTX:
121                 ret = roc_bphy_cgx_start_rxtx(cgx->rcgx, lmac);
122                 break;
123         case CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX:
124                 ret = roc_bphy_cgx_stop_rxtx(cgx->rcgx, lmac);
125                 break;
126         case CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC:
127                 fec = rte_zmalloc(NULL, sizeof(*fec), 0);
128                 if (!fec)
129                         return -ENOMEM;
130
131                 ret = roc_bphy_cgx_fec_supported_get(cgx->rcgx, lmac, fec);
132                 rsp = fec;
133                 break;
134         default:
135                 return -EINVAL;
136         }
137
138         /* get rid of last response if any */
139         if (qp->rsp) {
140                 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
141                 rte_free(qp->rsp);
142         }
143         qp->rsp = rsp;
144
145         return ret;
146 }
147
148 static int
149 cnxk_bphy_cgx_enqueue_bufs(struct rte_rawdev *dev,
150                            struct rte_rawdev_buf **buffers, unsigned int count,
151                            rte_rawdev_obj_t context)
152 {
153         struct cnxk_bphy_cgx *cgx = dev->dev_private;
154         unsigned int queue = (size_t)context;
155         int ret;
156
157         if (queue >= cgx->num_queues)
158                 return -EINVAL;
159
160         if (count == 0)
161                 return 0;
162
163         ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]);
164         if (ret)
165                 return ret;
166
167         return 1;
168 }
169
170 static int
171 cnxk_bphy_cgx_dequeue_bufs(struct rte_rawdev *dev,
172                            struct rte_rawdev_buf **buffers, unsigned int count,
173                            rte_rawdev_obj_t context)
174 {
175         struct cnxk_bphy_cgx *cgx = dev->dev_private;
176         unsigned int queue = (size_t)context;
177         struct cnxk_bphy_cgx_queue *qp;
178
179         if (queue >= cgx->num_queues)
180                 return -EINVAL;
181
182         if (count == 0)
183                 return 0;
184
185         qp = &cgx->queues[queue];
186         if (qp->rsp) {
187                 buffers[0]->buf_addr = qp->rsp;
188                 qp->rsp = NULL;
189
190                 return 1;
191         }
192
193         return 0;
194 }
195
196 static uint16_t
197 cnxk_bphy_cgx_queue_count(struct rte_rawdev *dev)
198 {
199         struct cnxk_bphy_cgx *cgx = dev->dev_private;
200
201         return cgx->num_queues;
202 }
203
204 static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
205         .queue_def_conf = cnxk_bphy_cgx_queue_def_conf,
206         .enqueue_bufs = cnxk_bphy_cgx_enqueue_bufs,
207         .dequeue_bufs = cnxk_bphy_cgx_dequeue_bufs,
208         .queue_count = cnxk_bphy_cgx_queue_count,
209         .dev_selftest = cnxk_bphy_cgx_dev_selftest,
210 };
211
212 static void
213 cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
214 {
215         struct roc_bphy_cgx *rcgx = cgx->rcgx;
216         unsigned int i;
217
218         for (i = 0; i < RTE_DIM(cgx->queues); i++) {
219                 if (!(rcgx->lmac_bmap & BIT_ULL(i)))
220                         continue;
221
222                 cgx->queues[cgx->num_queues++].lmac = i;
223         }
224 }
225
226 static void
227 cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
228 {
229         unsigned int i;
230
231         for (i = 0; i < cgx->num_queues; i++) {
232                 if (cgx->queues[i].rsp)
233                         rte_free(cgx->queues[i].rsp);
234         }
235
236         cgx->num_queues = 0;
237 }
238
239 static int
240 cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
241                            struct rte_pci_device *pci_dev)
242 {
243         char name[RTE_RAWDEV_NAME_MAX_LEN];
244         struct rte_rawdev *rawdev;
245         struct cnxk_bphy_cgx *cgx;
246         struct roc_bphy_cgx *rcgx;
247         int ret;
248
249         RTE_SET_USED(pci_drv);
250
251         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
252                 return 0;
253
254         if (!pci_dev->mem_resource[0].addr)
255                 return -ENODEV;
256
257         ret = roc_plt_init();
258         if (ret)
259                 return ret;
260
261         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
262         rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
263         if (!rawdev)
264                 return -ENOMEM;
265
266         rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
267         rawdev->device = &pci_dev->device;
268         rawdev->driver_name = pci_dev->driver->driver.name;
269
270         cgx = rawdev->dev_private;
271         cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
272         if (!cgx->rcgx) {
273                 ret = -ENOMEM;
274                 goto out_pmd_release;
275         }
276
277         rcgx = cgx->rcgx;
278         rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
279         rcgx->bar0_va = pci_dev->mem_resource[0].addr;
280         ret = roc_bphy_cgx_dev_init(rcgx);
281         if (ret)
282                 goto out_free;
283
284         cnxk_bphy_cgx_init_queues(cgx);
285
286         return 0;
287 out_free:
288         rte_free(rcgx);
289 out_pmd_release:
290         rte_rawdev_pmd_release(rawdev);
291
292         return ret;
293 }
294
295 static int
296 cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
297 {
298         char name[RTE_RAWDEV_NAME_MAX_LEN];
299         struct rte_rawdev *rawdev;
300         struct cnxk_bphy_cgx *cgx;
301
302         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
303         rawdev = rte_rawdev_pmd_get_named_dev(name);
304         if (!rawdev)
305                 return -ENODEV;
306
307         cgx = rawdev->dev_private;
308         cnxk_bphy_cgx_fini_queues(cgx);
309         roc_bphy_cgx_dev_fini(cgx->rcgx);
310         rte_free(cgx->rcgx);
311
312         return rte_rawdev_pmd_release(rawdev);
313 }
314
315 static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
316         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
317         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
318         {} /* sentinel */
319 };
320
321 static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
322         .id_table = cnxk_bphy_cgx_map,
323         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
324         .probe = cnxk_bphy_cgx_rawdev_probe,
325         .remove = cnxk_bphy_cgx_rawdev_remove,
326 };
327
328 RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
329 RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
330 RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");