6375144065db73e744358e4c07946677f9e90135
[dpdk.git] / drivers / raw / cnxk_bphy / cnxk_bphy_cgx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <string.h>
5
6 #include <rte_bus_pci.h>
7 #include <rte_rawdev.h>
8 #include <rte_rawdev_pmd.h>
9
10 #include <roc_api.h>
11
12 #include "rte_pmd_bphy.h"
13
14 struct cnxk_bphy_cgx_queue {
15         unsigned int lmac;
16         /* queue holds up to one response */
17         void *rsp;
18 };
19
20 struct cnxk_bphy_cgx {
21         struct roc_bphy_cgx *rcgx;
22         struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
23         unsigned int num_queues;
24 };
25
26 static void
27 cnxk_bphy_cgx_format_name(char *name, unsigned int len,
28                           struct rte_pci_device *pci_dev)
29 {
30         snprintf(name, len, "BPHY_CGX:%x:%02x.%x", pci_dev->addr.bus,
31                  pci_dev->addr.devid, pci_dev->addr.function);
32 }
33
34 static int
35 cnxk_bphy_cgx_queue_def_conf(struct rte_rawdev *dev, uint16_t queue_id,
36                              rte_rawdev_obj_t queue_conf,
37                              size_t queue_conf_size)
38 {
39         unsigned int *conf;
40
41         RTE_SET_USED(dev);
42         RTE_SET_USED(queue_id);
43
44         if (queue_conf_size != sizeof(*conf))
45                 return -EINVAL;
46
47         conf = (unsigned int *)queue_conf;
48         *conf = 1;
49
50         return 0;
51 }
52
53 static int
54 cnxk_bphy_cgx_process_buf(struct cnxk_bphy_cgx *cgx, unsigned int queue,
55                           struct rte_rawdev_buf *buf)
56 {
57         struct cnxk_bphy_cgx_queue *qp = &cgx->queues[queue];
58         struct cnxk_bphy_cgx_msg_set_link_state *link_state;
59         struct cnxk_bphy_cgx_msg *msg = buf->buf_addr;
60         struct cnxk_bphy_cgx_msg_link_mode *link_mode;
61         struct cnxk_bphy_cgx_msg_link_info *link_info;
62         struct roc_bphy_cgx_link_info rlink_info;
63         struct roc_bphy_cgx_link_mode rlink_mode;
64         unsigned int lmac = qp->lmac;
65         void *rsp = NULL;
66         int ret;
67
68         switch (msg->type) {
69         case CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO:
70                 memset(&rlink_info, 0, sizeof(rlink_info));
71                 ret = roc_bphy_cgx_get_linkinfo(cgx->rcgx, lmac, &rlink_info);
72                 if (ret)
73                         break;
74
75                 link_info = rte_zmalloc(NULL, sizeof(*link_info), 0);
76                 if (!link_info)
77                         return -ENOMEM;
78
79                 link_info->link_up = rlink_info.link_up;
80                 link_info->full_duplex = rlink_info.full_duplex;
81                 link_info->speed =
82                         (enum cnxk_bphy_cgx_eth_link_speed)rlink_info.speed;
83                 link_info->autoneg = rlink_info.an;
84                 link_info->fec =
85                         (enum cnxk_bphy_cgx_eth_link_fec)rlink_info.fec;
86                 link_info->mode =
87                         (enum cnxk_bphy_cgx_eth_link_mode)rlink_info.mode;
88                 rsp = link_info;
89                 break;
90         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE:
91                 ret = roc_bphy_cgx_intlbk_disable(cgx->rcgx, lmac);
92                 break;
93         case CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE:
94                 ret = roc_bphy_cgx_intlbk_enable(cgx->rcgx, lmac);
95                 break;
96         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE:
97                 ret = roc_bphy_cgx_ptp_rx_disable(cgx->rcgx, lmac);
98                 break;
99         case CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE:
100                 ret = roc_bphy_cgx_ptp_rx_enable(cgx->rcgx, lmac);
101                 break;
102         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE:
103                 link_mode = msg->data;
104                 memset(&rlink_mode, 0, sizeof(rlink_mode));
105                 rlink_mode.full_duplex = link_mode->full_duplex;
106                 rlink_mode.an = link_mode->autoneg;
107                 rlink_mode.speed =
108                         (enum roc_bphy_cgx_eth_link_speed)link_mode->speed;
109                 rlink_mode.mode =
110                         (enum roc_bphy_cgx_eth_link_mode)link_mode->mode;
111                 ret = roc_bphy_cgx_set_link_mode(cgx->rcgx, lmac, &rlink_mode);
112                 break;
113         case CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE:
114                 link_state = msg->data;
115                 ret = roc_bphy_cgx_set_link_state(cgx->rcgx, lmac,
116                                                   link_state->state);
117                 break;
118         case CNXK_BPHY_CGX_MSG_TYPE_START_RXTX:
119                 ret = roc_bphy_cgx_start_rxtx(cgx->rcgx, lmac);
120                 break;
121         case CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX:
122                 ret = roc_bphy_cgx_stop_rxtx(cgx->rcgx, lmac);
123                 break;
124         default:
125                 return -EINVAL;
126         }
127
128         /* get rid of last response if any */
129         if (qp->rsp) {
130                 RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
131                 rte_free(qp->rsp);
132         }
133         qp->rsp = rsp;
134
135         return ret;
136 }
137
138 static int
139 cnxk_bphy_cgx_enqueue_bufs(struct rte_rawdev *dev,
140                            struct rte_rawdev_buf **buffers, unsigned int count,
141                            rte_rawdev_obj_t context)
142 {
143         struct cnxk_bphy_cgx *cgx = dev->dev_private;
144         unsigned int queue = (size_t)context;
145         int ret;
146
147         if (queue >= cgx->num_queues)
148                 return -EINVAL;
149
150         if (count == 0)
151                 return 0;
152
153         ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]);
154         if (ret)
155                 return ret;
156
157         return 1;
158 }
159
160 static uint16_t
161 cnxk_bphy_cgx_queue_count(struct rte_rawdev *dev)
162 {
163         struct cnxk_bphy_cgx *cgx = dev->dev_private;
164
165         return cgx->num_queues;
166 }
167
168 static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
169         .queue_def_conf = cnxk_bphy_cgx_queue_def_conf,
170         .enqueue_bufs = cnxk_bphy_cgx_enqueue_bufs,
171         .queue_count = cnxk_bphy_cgx_queue_count,
172 };
173
174 static void
175 cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
176 {
177         struct roc_bphy_cgx *rcgx = cgx->rcgx;
178         unsigned int i;
179
180         for (i = 0; i < RTE_DIM(cgx->queues); i++) {
181                 if (!(rcgx->lmac_bmap & BIT_ULL(i)))
182                         continue;
183
184                 cgx->queues[cgx->num_queues++].lmac = i;
185         }
186 }
187
188 static void
189 cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
190 {
191         unsigned int i;
192
193         for (i = 0; i < cgx->num_queues; i++) {
194                 if (cgx->queues[i].rsp)
195                         rte_free(cgx->queues[i].rsp);
196         }
197
198         cgx->num_queues = 0;
199 }
200
201 static int
202 cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
203                            struct rte_pci_device *pci_dev)
204 {
205         char name[RTE_RAWDEV_NAME_MAX_LEN];
206         struct rte_rawdev *rawdev;
207         struct cnxk_bphy_cgx *cgx;
208         struct roc_bphy_cgx *rcgx;
209         int ret;
210
211         RTE_SET_USED(pci_drv);
212
213         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
214                 return 0;
215
216         if (!pci_dev->mem_resource[0].addr)
217                 return -ENODEV;
218
219         ret = roc_plt_init();
220         if (ret)
221                 return ret;
222
223         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
224         rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
225         if (!rawdev)
226                 return -ENOMEM;
227
228         rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
229         rawdev->device = &pci_dev->device;
230         rawdev->driver_name = pci_dev->driver->driver.name;
231
232         cgx = rawdev->dev_private;
233         cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
234         if (!cgx->rcgx) {
235                 ret = -ENOMEM;
236                 goto out_pmd_release;
237         }
238
239         rcgx = cgx->rcgx;
240         rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
241         rcgx->bar0_va = pci_dev->mem_resource[0].addr;
242         ret = roc_bphy_cgx_dev_init(rcgx);
243         if (ret)
244                 goto out_free;
245
246         cnxk_bphy_cgx_init_queues(cgx);
247
248         return 0;
249 out_free:
250         rte_free(rcgx);
251 out_pmd_release:
252         rte_rawdev_pmd_release(rawdev);
253
254         return ret;
255 }
256
257 static int
258 cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
259 {
260         char name[RTE_RAWDEV_NAME_MAX_LEN];
261         struct rte_rawdev *rawdev;
262         struct cnxk_bphy_cgx *cgx;
263
264         cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
265         rawdev = rte_rawdev_pmd_get_named_dev(name);
266         if (!rawdev)
267                 return -ENODEV;
268
269         cgx = rawdev->dev_private;
270         cnxk_bphy_cgx_fini_queues(cgx);
271         roc_bphy_cgx_dev_fini(cgx->rcgx);
272         rte_free(cgx->rcgx);
273
274         return rte_rawdev_pmd_release(rawdev);
275 }
276
277 static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
278         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
279         {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
280         {} /* sentinel */
281 };
282
283 static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
284         .id_table = cnxk_bphy_cgx_map,
285         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
286         .probe = cnxk_bphy_cgx_rawdev_probe,
287         .remove = cnxk_bphy_cgx_rawdev_remove,
288 };
289
290 RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
291 RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
292 RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");