net/ena: make Tx completion timeout configurable
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy_cgx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <string.h>
5
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include <roc_api.h>
11
12 #include "cnxk_bphy_cgx.h"
13 #include "rte_pmd_bphy.h"
14
15 struct cnxk_bphy_cgx_queue {
16         unsigned int lmac;
17         /* queue holds up to one response */
18         void *rsp;
19 };
20
21 struct cnxk_bphy_cgx {
22         struct roc_bphy_cgx *rcgx;
23         struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
24         unsigned int num_queues;
25 };
26
27 static void
28 cnxk_bphy_cgx_format_name(char *name, unsigned int len,
29                           struct rte_pci_device *pci_dev)
30 {
31         snprintf(name, len, "BPHY_CGX:%02x:%02x.%x", pci_dev->addr.bus,
32                  pci_dev->addr.devid, pci_dev->addr.function);
33 }
34
35 static int
36 cnxk_bphy_cgx_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
37                              rte_rawdev_obj_t queue_conf,
38                              size_t queue_conf_size)
39 {
40         unsigned int *conf;
41
42         RTE_SET_USED(dev);
43         RTE_SET_USED(queue_id);
44
45         if (queue_conf_size != sizeof(*conf))
46                 return -EINVAL;
47
48         conf = (unsigned int *)queue_conf;
49         *conf = 1;
50
51         return 0;
52 }
53
54 static int
55 cnxk_bphy_cgx_process_buf(struct cnxk_bphy_cgx *cgx, unsigned int queue,
56                           struct rte_rawdev_buf *buf)
57 {
58         struct cnxk_bphy_cgx_queue *qp = &cgx->queues[queue];
59         struct cnxk_bphy_cgx_msg_set_link_state *link_state;
60         struct cnxk_bphy_cgx_msg *msg = buf->buf_addr;
61         struct cnxk_bphy_cgx_msg_link_mode *link_mode;
62         struct cnxk_bphy_cgx_msg_link_info *link_info;
63         struct roc_bphy_cgx_link_info rlink_info;
64         struct roc_bphy_cgx_link_mode rlink_mode;
65         enum roc_bphy_cgx_eth_link_fec *fec;
66         unsigned int lmac = qp->lmac;
67         void *rsp = NULL;
68         int ret;
69
70         switch (msg->type) {
71         case CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO:
72                 memset(&rlink_info, 0, sizeof(rlink_info));
73                 ret = roc_bphy_cgx_get_linkinfo(cgx->rcgx, lmac, &rlink_info);
74                 if (ret)
75                         break;
76
77                 link_info = rte_zmalloc(NULL, sizeof(*link_info), 0);
78                 if (!link_info)
79                         return -ENOMEM;
80
81                 link_info->link_up = rlink_info.link_up;
82                 link_info->full_duplex = rlink_info.full_duplex;
83                 link_info->speed =
84                         (enum cnxk_bphy_cgx_eth_link_speed)rlink_info.speed;
85                 link_info->autoneg = rlink_info.an;
86                 link_info->fec =
87                         (enum cnxk_bphy_cgx_eth_link_fec)rlink_info.fec;
88                 link_info->mode =
89                         (enum cnxk_bphy_cgx_eth_link_mode)rlink_info.mode;
90                 rsp = link_info;
91                 break;
92         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE:
93                 ret = roc_bphy_cgx_intlbk_disable(cgx->rcgx, lmac);
94                 break;
95         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE:
96                 ret = roc_bphy_cgx_intlbk_enable(cgx->rcgx, lmac);
97                 break;
98         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE:
99                 ret = roc_bphy_cgx_ptp_rx_disable(cgx->rcgx, lmac);
100                 break;
101         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE:
102                 ret = roc_bphy_cgx_ptp_rx_enable(cgx->rcgx, lmac);
103                 break;
104         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE:
105                 link_mode = msg->data;
106                 memset(&rlink_mode, 0, sizeof(rlink_mode));
107                 rlink_mode.full_duplex = link_mode->full_duplex;
108                 rlink_mode.an = link_mode->autoneg;
109                 rlink_mode.speed =
110                         (enum roc_bphy_cgx_eth_link_speed)link_mode->speed;
111                 rlink_mode.mode =
112                         (enum roc_bphy_cgx_eth_link_mode)link_mode->mode;
113                 ret = roc_bphy_cgx_set_link_mode(cgx->rcgx, lmac, &rlink_mode);
114                 break;
115         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE:
116                 link_state = msg->data;
117                 ret = roc_bphy_cgx_set_link_state(cgx->rcgx, lmac,
118                                                   link_state->state);
119                 break;
120         case CNXK_BPHY_CGX_MSG_TYPE_START_RXTX:
121                 ret = roc_bphy_cgx_start_rxtx(cgx->rcgx, lmac);
122                 break;
123         case CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX:
124                 ret = roc_bphy_cgx_stop_rxtx(cgx->rcgx, lmac);
125                 break;
126         case CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC:
127                 fec = rte_zmalloc(NULL, sizeof(*fec), 0);
128                 if (!fec)
129                         return -ENOMEM;
130
131                 ret = roc_bphy_cgx_fec_supported_get(cgx->rcgx, lmac, fec);
132                 rsp = fec;
133                 break;
134         case CNXK_BPHY_CGX_MSG_TYPE_SET_FEC:
135                 fec = msg->data;
136                 ret = roc_bphy_cgx_fec_set(cgx->rcgx, lmac, *fec);
137                 break;
138         default:
139                 return -EINVAL;
140         }
141
142         /* get rid of last response if any */
143         if (qp->rsp) {
144                 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
145                 rte_free(qp->rsp);
146         }
147         qp->rsp = rsp;
148
149         return ret;
150 }
151
152 static int
153 cnxk_bphy_cgx_enqueue_bufs(struct rte_rawdev *dev,
154                            struct rte_rawdev_buf **buffers, unsigned int count,
155                            rte_rawdev_obj_t context)
156 {
157         struct cnxk_bphy_cgx *cgx = dev->dev_private;
158         unsigned int queue = (size_t)context;
159         int ret;
160
161         if (queue >= cgx->num_queues)
162                 return -EINVAL;
163
164         if (count == 0)
165                 return 0;
166
167         ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]);
168         if (ret)
169                 return ret;
170
171         return 1;
172 }
173
174 static int
175 cnxk_bphy_cgx_dequeue_bufs(struct rte_rawdev *dev,
176                            struct rte_rawdev_buf **buffers, unsigned int count,
177                            rte_rawdev_obj_t context)
178 {
179         struct cnxk_bphy_cgx *cgx = dev->dev_private;
180         unsigned int queue = (size_t)context;
181         struct cnxk_bphy_cgx_queue *qp;
182
183         if (queue >= cgx->num_queues)
184                 return -EINVAL;
185
186         if (count == 0)
187                 return 0;
188
189         qp = &cgx->queues[queue];
190         if (qp->rsp) {
191                 buffers[0]->buf_addr = qp->rsp;
192                 qp->rsp = NULL;
193
194                 return 1;
195         }
196
197         return 0;
198 }
199
200 static uint16_t
201 cnxk_bphy_cgx_queue_count(struct rte_rawdev *dev)
202 {
203         struct cnxk_bphy_cgx *cgx = dev->dev_private;
204
205         return cgx->num_queues;
206 }
207
208 static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
209         .queue_def_conf = cnxk_bphy_cgx_queue_def_conf,
210         .enqueue_bufs = cnxk_bphy_cgx_enqueue_bufs,
211         .dequeue_bufs = cnxk_bphy_cgx_dequeue_bufs,
212         .queue_count = cnxk_bphy_cgx_queue_count,
213         .dev_selftest = cnxk_bphy_cgx_dev_selftest,
214 };
215
216 static void
217 cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
218 {
219         struct roc_bphy_cgx *rcgx = cgx->rcgx;
220         unsigned int i;
221
222         for (i = 0; i < RTE_DIM(cgx->queues); i++) {
223                 if (!(rcgx->lmac_bmap & BIT_ULL(i)))
224                         continue;
225
226                 cgx->queues[cgx->num_queues++].lmac = i;
227         }
228 }
229
230 static void
231 cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
232 {
233         unsigned int i;
234
235         for (i = 0; i < cgx->num_queues; i++) {
236                 rte_free(cgx->queues[i].rsp);
237         }
238
239         cgx->num_queues = 0;
240 }
241
242 static int
243 cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
244                            struct rte_pci_device *pci_dev)
245 {
246         char name[RTE_RAWDEV_NAME_MAX_LEN];
247         struct rte_rawdev *rawdev;
248         struct cnxk_bphy_cgx *cgx;
249         struct roc_bphy_cgx *rcgx;
250         int ret;
251
252         RTE_SET_USED(pci_drv);
253
254         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
255                 return 0;
256
257         if (!pci_dev->mem_resource[0].addr)
258                 return -ENODEV;
259
260         ret = roc_plt_init();
261         if (ret)
262                 return ret;
263
264         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
265         rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
266         if (!rawdev)
267                 return -ENOMEM;
268
269         rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
270         rawdev->device = &pci_dev->device;
271         rawdev->driver_name = pci_dev->driver->driver.name;
272
273         cgx = rawdev->dev_private;
274         cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
275         if (!cgx->rcgx) {
276                 ret = -ENOMEM;
277                 goto out_pmd_release;
278         }
279
280         rcgx = cgx->rcgx;
281         rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
282         rcgx->bar0_va = pci_dev->mem_resource[0].addr;
283         ret = roc_bphy_cgx_dev_init(rcgx);
284         if (ret)
285                 goto out_free;
286
287         cnxk_bphy_cgx_init_queues(cgx);
288
289         return 0;
290 out_free:
291         rte_free(rcgx);
292 out_pmd_release:
293         rte_rawdev_pmd_release(rawdev);
294
295         return ret;
296 }
297
298 static int
299 cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
300 {
301         char name[RTE_RAWDEV_NAME_MAX_LEN];
302         struct rte_rawdev *rawdev;
303         struct cnxk_bphy_cgx *cgx;
304
305         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
306         rawdev = rte_rawdev_pmd_get_named_dev(name);
307         if (!rawdev)
308                 return -ENODEV;
309
310         cgx = rawdev->dev_private;
311         cnxk_bphy_cgx_fini_queues(cgx);
312         roc_bphy_cgx_dev_fini(cgx->rcgx);
313         rte_free(cgx->rcgx);
314
315         return rte_rawdev_pmd_release(rawdev);
316 }
317
318 static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
319         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
320         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
321         {} /* sentinel */
322 };
323
324 static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
325         .id_table = cnxk_bphy_cgx_map,
326         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
327         .probe = cnxk_bphy_cgx_rawdev_probe,
328         .remove = cnxk_bphy_cgx_rawdev_remove,
329 };
330
331 RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
332 RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
333 RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");