net/virtio: fix incorrect cast of void *
[dpdk.git] / cnxk_bphy / rte_pmd_bphy.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _CNXK_BPHY_H_
6 #define _CNXK_BPHY_H_
7
8 #include "cnxk_bphy_irq.h"
9
10 enum cnxk_bphy_cgx_msg_type {
11         CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO,
12         CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE,
13         CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE,
14         CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE,
15         CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE,
16         CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE,
17         CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE,
18         CNXK_BPHY_CGX_MSG_TYPE_START_RXTX,
19         CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX,
20         CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
21         CNXK_BPHY_CGX_MSG_TYPE_SET_FEC,
22 };
23
24 enum cnxk_bphy_cgx_eth_link_speed {
25         CNXK_BPHY_CGX_ETH_LINK_SPEED_NONE,
26         CNXK_BPHY_CGX_ETH_LINK_SPEED_10M,
27         CNXK_BPHY_CGX_ETH_LINK_SPEED_100M,
28         CNXK_BPHY_CGX_ETH_LINK_SPEED_1G,
29         CNXK_BPHY_CGX_ETH_LINK_SPEED_2HG,
30         CNXK_BPHY_CGX_ETH_LINK_SPEED_5G,
31         CNXK_BPHY_CGX_ETH_LINK_SPEED_10G,
32         CNXK_BPHY_CGX_ETH_LINK_SPEED_20G,
33         CNXK_BPHY_CGX_ETH_LINK_SPEED_25G,
34         CNXK_BPHY_CGX_ETH_LINK_SPEED_40G,
35         CNXK_BPHY_CGX_ETH_LINK_SPEED_50G,
36         CNXK_BPHY_CGX_ETH_LINK_SPEED_80G,
37         CNXK_BPHY_CGX_ETH_LINK_SPEED_100G,
38         __CNXK_BPHY_CGX_ETH_LINK_SPEED_MAX
39 };
40
41 enum cnxk_bphy_cgx_eth_link_fec {
42         CNXK_BPHY_CGX_ETH_LINK_FEC_NONE,
43         CNXK_BPHY_CGX_ETH_LINK_FEC_BASE_R,
44         CNXK_BPHY_CGX_ETH_LINK_FEC_RS,
45         __CNXK_BPHY_CGX_ETH_LINK_FEC_MAX
46 };
47
48 enum cnxk_bphy_cgx_eth_link_mode {
49         CNXK_BPHY_CGX_ETH_LINK_MODE_SGMII_BIT,
50         CNXK_BPHY_CGX_ETH_LINK_MODE_1000_BASEX_BIT,
51         CNXK_BPHY_CGX_ETH_LINK_MODE_QSGMII_BIT,
52         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_C2C_BIT,
53         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_C2M_BIT,
54         CNXK_BPHY_CGX_ETH_LINK_MODE_10G_KR_BIT,
55         CNXK_BPHY_CGX_ETH_LINK_MODE_20G_C2C_BIT,
56         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_C2C_BIT,
57         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_C2M_BIT,
58         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_2_C2C_BIT,
59         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_CR_BIT,
60         CNXK_BPHY_CGX_ETH_LINK_MODE_25G_KR_BIT,
61         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_C2C_BIT,
62         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_C2M_BIT,
63         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_CR4_BIT,
64         CNXK_BPHY_CGX_ETH_LINK_MODE_40G_KR4_BIT,
65         CNXK_BPHY_CGX_ETH_LINK_MODE_40GAUI_C2C_BIT,
66         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_C2C_BIT,
67         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_C2M_BIT,
68         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_4_C2C_BIT,
69         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_CR_BIT,
70         CNXK_BPHY_CGX_ETH_LINK_MODE_50G_KR_BIT,
71         CNXK_BPHY_CGX_ETH_LINK_MODE_80GAUI_C2C_BIT,
72         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_C2C_BIT,
73         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_C2M_BIT,
74         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_CR4_BIT,
75         CNXK_BPHY_CGX_ETH_LINK_MODE_100G_KR4_BIT,
76         __CNXK_BPHY_CGX_ETH_LINK_MODE_MAX
77 };
78
79 struct cnxk_bphy_cgx_msg_link_mode {
80         bool full_duplex;
81         bool autoneg;
82         enum cnxk_bphy_cgx_eth_link_speed speed;
83         enum cnxk_bphy_cgx_eth_link_mode mode;
84 };
85
86 struct cnxk_bphy_cgx_msg_link_info {
87         bool link_up;
88         bool full_duplex;
89         enum cnxk_bphy_cgx_eth_link_speed speed;
90         bool autoneg;
91         enum cnxk_bphy_cgx_eth_link_fec fec;
92         enum cnxk_bphy_cgx_eth_link_mode mode;
93 };
94
95 struct cnxk_bphy_cgx_msg_set_link_state {
96         bool state; /* up or down */
97 };
98
99 struct cnxk_bphy_cgx_msg {
100         enum cnxk_bphy_cgx_msg_type type;
101         /*
102          * data depends on message type and whether
103          * it's a request or a response
104          */
105         void *data;
106 };
107
108 #define cnxk_bphy_mem       bphy_mem
109 #define CNXK_BPHY_DEF_QUEUE 0
110
111 enum cnxk_bphy_irq_msg_type {
112         CNXK_BPHY_IRQ_MSG_TYPE_INIT,
113         CNXK_BPHY_IRQ_MSG_TYPE_FINI,
114         CNXK_BPHY_IRQ_MSG_TYPE_REGISTER,
115         CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
116         CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
117         CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
118         CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
119 };
120
121 struct cnxk_bphy_irq_msg {
122         enum cnxk_bphy_irq_msg_type type;
123         /*
124          * The data field, depending on message type, may point to
125          * - (enq) full struct cnxk_bphy_irq_info for registration request
126          * - (enq) struct cnxk_bphy_irq_info with irq_num set for unregistration
127          * - (deq) struct cnxk_bphy_mem for memory range request response
128          * - (xxx) NULL
129          */
130         void *data;
131 };
132
133 struct cnxk_bphy_irq_info {
134         int irq_num;
135         cnxk_bphy_intr_handler_t handler;
136         void *data;
137         int cpu;
138 };
139
140 static __rte_always_inline int
141 rte_pmd_bphy_intr_init(uint16_t dev_id)
142 {
143         struct cnxk_bphy_irq_msg msg = {
144                 .type = CNXK_BPHY_IRQ_MSG_TYPE_INIT,
145         };
146         struct rte_rawdev_buf *bufs[1];
147         struct rte_rawdev_buf buf;
148
149         buf.buf_addr = &msg;
150         bufs[0] = &buf;
151
152         return rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
153 }
154
155 static __rte_always_inline void
156 rte_pmd_bphy_intr_fini(uint16_t dev_id)
157 {
158         struct cnxk_bphy_irq_msg msg = {
159                 .type = CNXK_BPHY_IRQ_MSG_TYPE_FINI,
160         };
161         struct rte_rawdev_buf *bufs[1];
162         struct rte_rawdev_buf buf;
163
164         buf.buf_addr = &msg;
165         bufs[0] = &buf;
166
167         rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
168 }
169
170 static __rte_always_inline int
171 rte_pmd_bphy_intr_register(uint16_t dev_id, int irq_num,
172                            cnxk_bphy_intr_handler_t handler, void *data,
173                            int cpu)
174 {
175         struct cnxk_bphy_irq_info info = {
176                 .irq_num = irq_num,
177                 .handler = handler,
178                 .data = data,
179                 .cpu = cpu,
180         };
181         struct cnxk_bphy_irq_msg msg = {
182                 .type = CNXK_BPHY_IRQ_MSG_TYPE_REGISTER,
183                 .data = &info
184         };
185         struct rte_rawdev_buf *bufs[1];
186         struct rte_rawdev_buf buf;
187
188         buf.buf_addr = &msg;
189         bufs[0] = &buf;
190
191         return rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
192 }
193
194 static __rte_always_inline void
195 rte_pmd_bphy_intr_unregister(uint16_t dev_id, int irq_num)
196 {
197         struct cnxk_bphy_irq_info info = {
198                 .irq_num = irq_num,
199         };
200         struct cnxk_bphy_irq_msg msg = {
201                 .type = CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
202                 .data = &info
203         };
204         struct rte_rawdev_buf *bufs[1];
205         struct rte_rawdev_buf buf;
206
207         buf.buf_addr = &msg;
208         bufs[0] = &buf;
209
210         rte_rawdev_enqueue_buffers(dev_id, bufs, 1, 0);
211 }
212
213 static __rte_always_inline struct cnxk_bphy_mem *
214 rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
215 {
216         struct cnxk_bphy_irq_msg msg = {
217                 .type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
218         };
219         struct rte_rawdev_buf *bufs[1];
220         struct rte_rawdev_buf buf;
221         int ret;
222
223         buf.buf_addr = &msg;
224         bufs[0] = &buf;
225
226         ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
227         if (ret)
228                 return NULL;
229
230         ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
231         if (ret)
232                 return NULL;
233
234         return buf.buf_addr;
235 }
236
237 static __rte_always_inline uint16_t
238 rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id)
239 {
240         struct cnxk_bphy_irq_msg msg = {
241                 .type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
242         };
243         struct rte_rawdev_buf *bufs[1];
244         struct rte_rawdev_buf buf;
245         int ret;
246
247         buf.buf_addr = &msg;
248         bufs[0] = &buf;
249
250         ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
251         if (ret)
252                 return 0;
253
254         ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
255         if (ret)
256                 return 0;
257
258         return (uint16_t)(size_t)buf.buf_addr;
259 }
260
261 static __rte_always_inline uint16_t
262 rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id)
263 {
264         struct cnxk_bphy_irq_msg msg = {
265                 .type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
266         };
267         struct rte_rawdev_buf *bufs[1];
268         struct rte_rawdev_buf buf;
269         int ret;
270
271         buf.buf_addr = &msg;
272         bufs[0] = &buf;
273
274         ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
275         if (ret)
276                 return 0;
277
278         ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
279         if (ret)
280                 return 0;
281
282         return (uint16_t)(size_t)buf.buf_addr;
283 }
284
285 #endif /* _CNXK_BPHY_H_ */