net/ngbe: support MTU set
[dpdk.git] / drivers / raw / cnxk_bphy / rte_pmd_bphy.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_BPHY_H_
6 #define _CNXK_BPHY_H_
7
8 #include <rte_memcpy.h>
9
10 #include "cnxk_bphy_irq.h"
11
12 enum cnxk_bphy_cgx_msg_type {
13         CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO,
14         CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE,
15         CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE,
16         CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE,
17         CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE,
18         CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE,
19         CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE,
20         CNXK_BPHY_CGX_MSG_TYPE_START_RXTX,
21         CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX,
22         CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
23         CNXK_BPHY_CGX_MSG_TYPE_SET_FEC,
24 };
25
26 enum cnxk_bphy_cgx_eth_link_speed {
27         CNXK_BPHY_CGX_ETH_LINK_SPEED_NONE,
28         CNXK_BPHY_CGX_ETH_LINK_SPEED_10M,
29         CNXK_BPHY_CGX_ETH_LINK_SPEED_100M,
30         CNXK_BPHY_CGX_ETH_LINK_SPEED_1G,
31         CNXK_BPHY_CGX_ETH_LINK_SPEED_2HG,
32         CNXK_BPHY_CGX_ETH_LINK_SPEED_5G,
33         CNXK_BPHY_CGX_ETH_LINK_SPEED_10G,
34         CNXK_BPHY_CGX_ETH_LINK_SPEED_20G,
35         CNXK_BPHY_CGX_ETH_LINK_SPEED_25G,
36         CNXK_BPHY_CGX_ETH_LINK_SPEED_40G,
37         CNXK_BPHY_CGX_ETH_LINK_SPEED_50G,
38         CNXK_BPHY_CGX_ETH_LINK_SPEED_80G,
39         CNXK_BPHY_CGX_ETH_LINK_SPEED_100G,
40         __CNXK_BPHY_CGX_ETH_LINK_SPEED_MAX
41 };
42
43 enum cnxk_bphy_cgx_eth_link_fec {
44         CNXK_BPHY_CGX_ETH_LINK_FEC_NONE,
45         CNXK_BPHY_CGX_ETH_LINK_FEC_BASE_R,
46         CNXK_BPHY_CGX_ETH_LINK_FEC_RS,
47         __CNXK_BPHY_CGX_ETH_LINK_FEC_MAX
48 };
49
50 enum cnxk_bphy_cgx_eth_link_mode {
51         CNXK_BPHY_CGX_ETH_LINK_MODE_SGMII_BIT,
52         CNXK_BPHY_CGX_ETH_LINK_MODE_1000_BASEX_BIT,
53         CNXK_BPHY_CGX_ETH_LINK_MODE_QSGMII_BIT,
54         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_C2C_BIT,
55         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_C2M_BIT,
56         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_KR_BIT,
57         CNXK_BPHY_CGX_ETH_LINK_MODE_20G_C2C_BIT,
58         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_C2C_BIT,
59         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_C2M_BIT,
60         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_2_C2C_BIT,
61         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_CR_BIT,
62         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_KR_BIT,
63         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_C2C_BIT,
64         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_C2M_BIT,
65         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_CR4_BIT,
66         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_KR4_BIT,
67         CNXK_BPHY_CGX_ETH_LINK_MODE_40GAUI_C2C_BIT,
68         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_C2C_BIT,
69         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_C2M_BIT,
70         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_4_C2C_BIT,
71         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_CR_BIT,
72         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_KR_BIT,
73         CNXK_BPHY_CGX_ETH_LINK_MODE_80GAUI_C2C_BIT,
74         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_C2C_BIT,
75         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_C2M_BIT,
76         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_CR4_BIT,
77         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_KR4_BIT,
78         __CNXK_BPHY_CGX_ETH_LINK_MODE_MAX
79 };
80
81 struct cnxk_bphy_cgx_msg_link_mode {
82         bool full_duplex;
83         bool autoneg;
84         enum cnxk_bphy_cgx_eth_link_speed speed;
85         enum cnxk_bphy_cgx_eth_link_mode mode;
86 };
87
88 struct cnxk_bphy_cgx_msg_link_info {
89         bool link_up;
90         bool full_duplex;
91         enum cnxk_bphy_cgx_eth_link_speed speed;
92         bool autoneg;
93         enum cnxk_bphy_cgx_eth_link_fec fec;
94         enum cnxk_bphy_cgx_eth_link_mode mode;
95 };
96
97 struct cnxk_bphy_cgx_msg_set_link_state {
98         bool state; /* up or down */
99 };
100
101 struct cnxk_bphy_cgx_msg {
102         enum cnxk_bphy_cgx_msg_type type;
103         /*
104          * data depends on message type and whether
105          * it's a request or a response
106          */
107         void *data;
108 };
109
110 #define cnxk_bphy_mem       bphy_mem
111 #define CNXK_BPHY_DEF_QUEUE 0
112
113 enum cnxk_bphy_irq_msg_type {
114         CNXK_BPHY_IRQ_MSG_TYPE_INIT,
115         CNXK_BPHY_IRQ_MSG_TYPE_FINI,
116         CNXK_BPHY_IRQ_MSG_TYPE_REGISTER,
117         CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
118         CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
119         CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
120         CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
121 };
122
123 struct cnxk_bphy_irq_msg {
124         enum cnxk_bphy_irq_msg_type type;
125         /*
126          * The data field, depending on message type, may point to
127          * - (enq) full struct cnxk_bphy_irq_info for registration request
128          * - (enq) struct cnxk_bphy_irq_info with irq_num set for unregistration
129          * - (deq) struct cnxk_bphy_mem for memory range request response
130          * - (xxx) NULL
131          */
132         void *data;
133 };
134
135 struct cnxk_bphy_irq_info {
136         int irq_num;
137         cnxk_bphy_intr_handler_t handler;
138         void *data;
139         int cpu;
140 };
141
142 static __rte_always_inline int
143 __rte_pmd_bphy_enq_deq(uint16_t dev_id, unsigned int queue, void *req,
144                        void *rsp, size_t rsp_size)
145 {
146         struct rte_rawdev_buf *bufs[1];
147         struct rte_rawdev_buf buf;
148         void *q;
149         int ret;
150
151         q = (void *)(size_t)queue;
152         buf.buf_addr = req;
153         bufs[0] = &buf;
154
155         ret = rte_rawdev_enqueue_buffers(dev_id, bufs, RTE_DIM(bufs), q);
156         if (ret < 0)
157                 return ret;
158         if (ret != RTE_DIM(bufs))
159                 return -EIO;
160
161         if (!rsp)
162                 return 0;
163
164         ret = rte_rawdev_dequeue_buffers(dev_id, bufs, RTE_DIM(bufs), q);
165         if (ret < 0)
166                 return ret;
167         if (ret != RTE_DIM(bufs))
168                 return -EIO;
169
170         rte_memcpy(rsp, buf.buf_addr, rsp_size);
171         rte_free(buf.buf_addr);
172
173         return 0;
174 }
175
176 static __rte_always_inline int
177 rte_pmd_bphy_intr_init(uint16_t dev_id)
178 {
179         struct cnxk_bphy_irq_msg msg = {
180                 .type = CNXK_BPHY_IRQ_MSG_TYPE_INIT,
181         };
182
183         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
184                                       NULL, 0);
185 }
186
187 static __rte_always_inline int
188 rte_pmd_bphy_intr_fini(uint16_t dev_id)
189 {
190         struct cnxk_bphy_irq_msg msg = {
191                 .type = CNXK_BPHY_IRQ_MSG_TYPE_FINI,
192         };
193
194         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
195                                       NULL, 0);
196 }
197
198 static __rte_always_inline int
199 rte_pmd_bphy_intr_register(uint16_t dev_id, int irq_num,
200                            cnxk_bphy_intr_handler_t handler, void *data,
201                            int cpu)
202 {
203         struct cnxk_bphy_irq_info info = {
204                 .irq_num = irq_num,
205                 .handler = handler,
206                 .data = data,
207                 .cpu = cpu,
208         };
209         struct cnxk_bphy_irq_msg msg = {
210                 .type = CNXK_BPHY_IRQ_MSG_TYPE_REGISTER,
211                 .data = &info
212         };
213
214         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
215                                       NULL, 0);
216 }
217
218 static __rte_always_inline int
219 rte_pmd_bphy_intr_unregister(uint16_t dev_id, int irq_num)
220 {
221         struct cnxk_bphy_irq_info info = {
222                 .irq_num = irq_num,
223         };
224         struct cnxk_bphy_irq_msg msg = {
225                 .type = CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
226                 .data = &info
227         };
228
229         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
230                                       NULL, 0);
231 }
232
233 static __rte_always_inline int
234 rte_pmd_bphy_intr_mem_get(uint16_t dev_id, struct cnxk_bphy_mem *mem)
235 {
236         struct cnxk_bphy_irq_msg msg = {
237                 .type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
238         };
239
240         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
241                                       mem, sizeof(*mem));
242 }
243
244 static __rte_always_inline int
245 rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
246 {
247         struct cnxk_bphy_irq_msg msg = {
248                 .type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
249         };
250
251         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
252                                       pf_func, sizeof(*pf_func));
253 }
254
255 static __rte_always_inline int
256 rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
257 {
258         struct cnxk_bphy_irq_msg msg = {
259                 .type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
260         };
261
262         return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
263                                       pf_func, sizeof(*pf_func));
264 }
265
266 static __rte_always_inline int
267 rte_pmd_bphy_cgx_get_link_info(uint16_t dev_id, uint16_t lmac,
268                                struct cnxk_bphy_cgx_msg_link_info *info)
269 {
270         struct cnxk_bphy_cgx_msg msg = {
271                 .type = CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO,
272         };
273
274         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, info, sizeof(*info));
275 }
276
277 static __rte_always_inline int
278 rte_pmd_bphy_cgx_intlbk_disable(uint16_t dev_id, uint16_t lmac)
279 {
280         struct cnxk_bphy_cgx_msg msg = {
281                 .type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE,
282         };
283
284         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
285 }
286
287 static __rte_always_inline int
288 rte_pmd_bphy_cgx_intlbk_enable(uint16_t dev_id, uint16_t lmac)
289 {
290         struct cnxk_bphy_cgx_msg msg = {
291                 .type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE,
292         };
293
294         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
295 }
296
297 static __rte_always_inline int
298 rte_pmd_bphy_cgx_ptp_rx_disable(uint16_t dev_id, uint16_t lmac)
299 {
300         struct cnxk_bphy_cgx_msg msg = {
301                 .type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE,
302         };
303
304         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
305 }
306
307 static __rte_always_inline int
308 rte_pmd_bphy_cgx_ptp_rx_enable(uint16_t dev_id, uint16_t lmac)
309 {
310         struct cnxk_bphy_cgx_msg msg = {
311                 .type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE,
312         };
313
314         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
315 }
316
317 static __rte_always_inline int
318 rte_pmd_bphy_cgx_set_link_mode(uint16_t dev_id, uint16_t lmac,
319                                struct cnxk_bphy_cgx_msg_link_mode *mode)
320 {
321         struct cnxk_bphy_cgx_msg msg = {
322                 .type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE,
323                 .data = mode,
324         };
325
326         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
327 }
328
329 static __rte_always_inline int
330 rte_pmd_bphy_cgx_set_link_state(uint16_t dev_id, uint16_t lmac, bool up)
331 {
332         struct cnxk_bphy_cgx_msg_set_link_state state = {
333                 .state = up,
334         };
335         struct cnxk_bphy_cgx_msg msg = {
336                 .type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE,
337                 .data = &state,
338         };
339
340         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
341 }
342
343 static __rte_always_inline int
344 rte_pmd_bphy_cgx_start_rxtx(uint16_t dev_id, uint16_t lmac)
345 {
346         struct cnxk_bphy_cgx_msg msg = {
347                 .type = CNXK_BPHY_CGX_MSG_TYPE_START_RXTX,
348         };
349
350         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
351 }
352
353 static __rte_always_inline int
354 rte_pmd_bphy_cgx_stop_rxtx(uint16_t dev_id, uint16_t lmac)
355 {
356         struct cnxk_bphy_cgx_msg msg = {
357                 .type = CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX,
358         };
359
360         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
361 }
362
363 static __rte_always_inline int
364 rte_pmd_bphy_cgx_get_supported_fec(uint16_t dev_id, uint16_t lmac,
365                                    enum cnxk_bphy_cgx_eth_link_fec *fec)
366 {
367         struct cnxk_bphy_cgx_msg msg = {
368                 .type = CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
369         };
370
371         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, fec, sizeof(*fec));
372 }
373
374 static __rte_always_inline int
375 rte_pmd_bphy_cgx_set_fec(uint16_t dev_id, uint16_t lmac,
376                          enum cnxk_bphy_cgx_eth_link_fec fec)
377 {
378         struct cnxk_bphy_cgx_msg msg = {
379                 .type = CNXK_BPHY_CGX_MSG_TYPE_SET_FEC,
380                 .data = &fec,
381         };
382
383         return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
384 }
385
386 #endif /* _CNXK_BPHY_H_ */