1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
15 #include "otx2_evdev_stats.h"
16 #include "otx2_evdev.h"
18 #include "otx2_tim_evdev.h"
21 sso_get_msix_offsets(const struct rte_eventdev *event_dev)
23 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
24 uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
25 struct otx2_mbox *mbox = dev->mbox;
26 struct msix_offset_rsp *msix_rsp;
29 /* Get SSO and SSOW MSIX vector offsets */
30 otx2_mbox_alloc_msg_msix_offset(mbox);
31 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
33 for (i = 0; i < nb_ports; i++)
34 dev->ssow_msixoff[i] = msix_rsp->ssow_msixoff[i];
36 for (i = 0; i < dev->nb_event_queues; i++)
37 dev->sso_msixoff[i] = msix_rsp->sso_msixoff[i];
43 sso_fastpath_fns_set(struct rte_eventdev *event_dev)
45 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
47 const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = {
48 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
49 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name,
50 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
54 const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = {
55 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
56 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name,
57 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
61 const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = {
62 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
63 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name,
64 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
68 const event_dequeue_burst_t
69 ssogws_deq_timeout_burst[2][2][2][2][2][2] = {
70 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
71 [f5][f4][f3][f2][f1][f0] = \
72 otx2_ssogws_deq_timeout_burst_ ##name,
73 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
77 const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = {
78 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
79 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name,
80 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
84 const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = {
85 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
86 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name,
87 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
91 const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = {
92 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
93 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name,
94 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
98 const event_dequeue_burst_t
99 ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = {
100 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
101 [f5][f4][f3][f2][f1][f0] = \
102 otx2_ssogws_deq_seg_timeout_burst_ ##name,
103 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
109 const event_dequeue_t ssogws_dual_deq[2][2][2][2][2][2] = {
110 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
111 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_ ##name,
112 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
116 const event_dequeue_burst_t ssogws_dual_deq_burst[2][2][2][2][2][2] = {
117 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
118 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_burst_ ##name,
119 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
123 const event_dequeue_t ssogws_dual_deq_timeout[2][2][2][2][2][2] = {
124 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
125 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_ ##name,
126 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
130 const event_dequeue_burst_t
131 ssogws_dual_deq_timeout_burst[2][2][2][2][2][2] = {
132 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
133 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_timeout_burst_ ##name,
134 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
138 const event_dequeue_t ssogws_dual_deq_seg[2][2][2][2][2][2] = {
139 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
140 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_dual_deq_seg_ ##name,
141 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
145 const event_dequeue_burst_t
146 ssogws_dual_deq_seg_burst[2][2][2][2][2][2] = {
147 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
148 [f5][f4][f3][f2][f1][f0] = \
149 otx2_ssogws_dual_deq_seg_burst_ ##name,
150 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
154 const event_dequeue_t ssogws_dual_deq_seg_timeout[2][2][2][2][2][2] = {
155 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
156 [f5][f4][f3][f2][f1][f0] = \
157 otx2_ssogws_dual_deq_seg_timeout_ ##name,
158 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
162 const event_dequeue_burst_t
163 ssogws_dual_deq_seg_timeout_burst[2][2][2][2][2][2] = {
164 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
165 [f5][f4][f3][f2][f1][f0] = \
166 otx2_ssogws_dual_deq_seg_timeout_burst_ ##name,
167 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
172 const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2][2] = {
173 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
174 [f5][f4][f3][f2][f1][f0] = otx2_ssogws_tx_adptr_enq_ ## name,
175 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
179 const event_tx_adapter_enqueue
180 ssogws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
181 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
182 [f5][f4][f3][f2][f1][f0] = \
183 otx2_ssogws_tx_adptr_enq_seg_ ## name,
184 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
188 const event_tx_adapter_enqueue
189 ssogws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
190 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
191 [f5][f4][f3][f2][f1][f0] = \
192 otx2_ssogws_dual_tx_adptr_enq_ ## name,
193 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
197 const event_tx_adapter_enqueue
198 ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
199 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
200 [f5][f4][f3][f2][f1][f0] = \
201 otx2_ssogws_dual_tx_adptr_enq_seg_ ## name,
202 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
206 event_dev->enqueue = otx2_ssogws_enq;
207 event_dev->enqueue_burst = otx2_ssogws_enq_burst;
208 event_dev->enqueue_new_burst = otx2_ssogws_enq_new_burst;
209 event_dev->enqueue_forward_burst = otx2_ssogws_enq_fwd_burst;
210 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
211 event_dev->dequeue = ssogws_deq_seg
212 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
213 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
214 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
215 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
216 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
217 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
218 event_dev->dequeue_burst = ssogws_deq_seg_burst
219 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
220 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
221 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
222 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
223 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
224 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
225 if (dev->is_timeout_deq) {
226 event_dev->dequeue = ssogws_deq_seg_timeout
227 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
228 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
229 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
230 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
231 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
232 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
233 event_dev->dequeue_burst =
234 ssogws_deq_seg_timeout_burst
235 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
236 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
237 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
238 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
239 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
240 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
243 event_dev->dequeue = ssogws_deq
244 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
245 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
246 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
247 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
248 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
249 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
250 event_dev->dequeue_burst = ssogws_deq_burst
251 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
252 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
253 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
254 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
255 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
256 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
257 if (dev->is_timeout_deq) {
258 event_dev->dequeue = ssogws_deq_timeout
259 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
260 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
261 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
262 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
263 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
264 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
265 event_dev->dequeue_burst =
266 ssogws_deq_timeout_burst
267 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
268 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
269 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
270 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
271 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
272 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
276 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
277 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
278 event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg
279 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
280 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
281 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
282 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
283 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
284 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
286 event_dev->txa_enqueue = ssogws_tx_adptr_enq
287 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
288 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
289 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
290 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
291 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
292 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
296 event_dev->enqueue = otx2_ssogws_dual_enq;
297 event_dev->enqueue_burst = otx2_ssogws_dual_enq_burst;
298 event_dev->enqueue_new_burst =
299 otx2_ssogws_dual_enq_new_burst;
300 event_dev->enqueue_forward_burst =
301 otx2_ssogws_dual_enq_fwd_burst;
303 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
304 event_dev->dequeue = ssogws_dual_deq_seg
305 [!!(dev->rx_offloads &
306 NIX_RX_OFFLOAD_TSTAMP_F)]
307 [!!(dev->rx_offloads &
308 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
309 [!!(dev->rx_offloads &
310 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
311 [!!(dev->rx_offloads &
312 NIX_RX_OFFLOAD_CHECKSUM_F)]
313 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
314 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
315 event_dev->dequeue_burst = ssogws_dual_deq_seg_burst
316 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
317 [!!(dev->rx_offloads &
318 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
319 [!!(dev->rx_offloads &
320 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
321 [!!(dev->rx_offloads &
322 NIX_RX_OFFLOAD_CHECKSUM_F)]
323 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
324 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
325 if (dev->is_timeout_deq) {
327 ssogws_dual_deq_seg_timeout
328 [!!(dev->rx_offloads &
329 NIX_RX_OFFLOAD_TSTAMP_F)]
330 [!!(dev->rx_offloads &
331 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
332 [!!(dev->rx_offloads &
333 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
334 [!!(dev->rx_offloads &
335 NIX_RX_OFFLOAD_CHECKSUM_F)]
336 [!!(dev->rx_offloads &
337 NIX_RX_OFFLOAD_PTYPE_F)]
338 [!!(dev->rx_offloads &
339 NIX_RX_OFFLOAD_RSS_F)];
340 event_dev->dequeue_burst =
341 ssogws_dual_deq_seg_timeout_burst
342 [!!(dev->rx_offloads &
343 NIX_RX_OFFLOAD_TSTAMP_F)]
344 [!!(dev->rx_offloads &
345 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
346 [!!(dev->rx_offloads &
347 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
348 [!!(dev->rx_offloads &
349 NIX_RX_OFFLOAD_CHECKSUM_F)]
350 [!!(dev->rx_offloads &
351 NIX_RX_OFFLOAD_PTYPE_F)]
352 [!!(dev->rx_offloads &
353 NIX_RX_OFFLOAD_RSS_F)];
356 event_dev->dequeue = ssogws_dual_deq
357 [!!(dev->rx_offloads &
358 NIX_RX_OFFLOAD_TSTAMP_F)]
359 [!!(dev->rx_offloads &
360 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
361 [!!(dev->rx_offloads &
362 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
363 [!!(dev->rx_offloads &
364 NIX_RX_OFFLOAD_CHECKSUM_F)]
365 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
366 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
367 event_dev->dequeue_burst = ssogws_dual_deq_burst
368 [!!(dev->rx_offloads &
369 NIX_RX_OFFLOAD_TSTAMP_F)]
370 [!!(dev->rx_offloads &
371 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
372 [!!(dev->rx_offloads &
373 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
374 [!!(dev->rx_offloads &
375 NIX_RX_OFFLOAD_CHECKSUM_F)]
376 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
377 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
378 if (dev->is_timeout_deq) {
380 ssogws_dual_deq_timeout
381 [!!(dev->rx_offloads &
382 NIX_RX_OFFLOAD_TSTAMP_F)]
383 [!!(dev->rx_offloads &
384 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
385 [!!(dev->rx_offloads &
386 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
387 [!!(dev->rx_offloads &
388 NIX_RX_OFFLOAD_CHECKSUM_F)]
389 [!!(dev->rx_offloads &
390 NIX_RX_OFFLOAD_PTYPE_F)]
391 [!!(dev->rx_offloads &
392 NIX_RX_OFFLOAD_RSS_F)];
393 event_dev->dequeue_burst =
394 ssogws_dual_deq_timeout_burst
395 [!!(dev->rx_offloads &
396 NIX_RX_OFFLOAD_TSTAMP_F)]
397 [!!(dev->rx_offloads &
398 NIX_RX_OFFLOAD_MARK_UPDATE_F)]
399 [!!(dev->rx_offloads &
400 NIX_RX_OFFLOAD_VLAN_STRIP_F)]
401 [!!(dev->rx_offloads &
402 NIX_RX_OFFLOAD_CHECKSUM_F)]
403 [!!(dev->rx_offloads &
404 NIX_RX_OFFLOAD_PTYPE_F)]
405 [!!(dev->rx_offloads &
406 NIX_RX_OFFLOAD_RSS_F)];
410 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) {
411 /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */
412 event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg
413 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
414 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
415 [!!(dev->tx_offloads &
416 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
417 [!!(dev->tx_offloads &
418 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
419 [!!(dev->tx_offloads &
420 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
421 [!!(dev->tx_offloads &
422 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
424 event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq
425 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]
426 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]
427 [!!(dev->tx_offloads &
428 NIX_TX_OFFLOAD_MBUF_NOFF_F)]
429 [!!(dev->tx_offloads &
430 NIX_TX_OFFLOAD_VLAN_QINQ_F)]
431 [!!(dev->tx_offloads &
432 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
433 [!!(dev->tx_offloads &
434 NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
441 otx2_sso_info_get(struct rte_eventdev *event_dev,
442 struct rte_event_dev_info *dev_info)
444 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
446 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
447 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
448 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
449 dev_info->max_event_queues = dev->max_event_queues;
450 dev_info->max_event_queue_flows = (1ULL << 20);
451 dev_info->max_event_queue_priority_levels = 8;
452 dev_info->max_event_priority_levels = 1;
453 dev_info->max_event_ports = dev->max_event_ports;
454 dev_info->max_event_port_dequeue_depth = 1;
455 dev_info->max_event_port_enqueue_depth = 1;
456 dev_info->max_num_events = dev->max_num_events;
457 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
458 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
459 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
460 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
461 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
462 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
466 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
468 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
472 val |= 0ULL << 12; /* SET 0 */
473 val |= 0x8000800080000000; /* Dont modify rest of the masks */
474 val |= (uint64_t)enable << 14; /* Enable/Disable Membership. */
476 otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
480 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
481 const uint8_t queues[], const uint8_t priorities[],
484 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
488 RTE_SET_USED(priorities);
489 for (link = 0; link < nb_links; link++) {
491 struct otx2_ssogws_dual *ws = port;
494 sso_port_link_modify((struct otx2_ssogws *)
495 &ws->ws_state[0], queues[link], true);
496 sso_port_link_modify((struct otx2_ssogws *)
497 &ws->ws_state[1], queues[link], true);
499 struct otx2_ssogws *ws = port;
502 sso_port_link_modify(ws, queues[link], true);
505 sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
507 return (int)nb_links;
511 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
512 uint8_t queues[], uint16_t nb_unlinks)
514 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
518 for (unlink = 0; unlink < nb_unlinks; unlink++) {
520 struct otx2_ssogws_dual *ws = port;
523 sso_port_link_modify((struct otx2_ssogws *)
524 &ws->ws_state[0], queues[unlink],
526 sso_port_link_modify((struct otx2_ssogws *)
527 &ws->ws_state[1], queues[unlink],
530 struct otx2_ssogws *ws = port;
533 sso_port_link_modify(ws, queues[unlink], false);
536 sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
538 return (int)nb_unlinks;
542 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
543 uint16_t nb_lf, uint8_t attach)
546 struct rsrc_attach_req *req;
548 req = otx2_mbox_alloc_msg_attach_resources(mbox);
560 if (otx2_mbox_process(mbox) < 0)
563 struct rsrc_detach_req *req;
565 req = otx2_mbox_alloc_msg_detach_resources(mbox);
577 if (otx2_mbox_process(mbox) < 0)
585 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
586 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
595 struct sso_lf_alloc_req *req_ggrp;
596 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
597 req_ggrp->hwgrps = nb_lf;
602 struct ssow_lf_alloc_req *req_hws;
603 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
604 req_hws->hws = nb_lf;
614 struct sso_lf_free_req *req_ggrp;
615 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
616 req_ggrp->hwgrps = nb_lf;
621 struct ssow_lf_free_req *req_hws;
622 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
623 req_hws->hws = nb_lf;
631 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
635 if (alloc && type == SSO_LF_GGRP) {
636 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
638 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
639 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
640 dev->iue = rsp_ggrp->in_unit_entries;
647 otx2_sso_port_release(void *port)
653 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
655 RTE_SET_USED(event_dev);
656 RTE_SET_USED(queue_id);
660 sso_clr_links(const struct rte_eventdev *event_dev)
662 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
665 for (i = 0; i < dev->nb_event_ports; i++) {
667 struct otx2_ssogws_dual *ws;
669 ws = event_dev->data->ports[i];
670 for (j = 0; j < dev->nb_event_queues; j++) {
671 sso_port_link_modify((struct otx2_ssogws *)
672 &ws->ws_state[0], j, false);
673 sso_port_link_modify((struct otx2_ssogws *)
674 &ws->ws_state[1], j, false);
677 struct otx2_ssogws *ws;
679 ws = event_dev->data->ports[i];
680 for (j = 0; j < dev->nb_event_queues; j++)
681 sso_port_link_modify(ws, j, false);
687 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
689 ws->tag_op = base + SSOW_LF_GWS_TAG;
690 ws->wqp_op = base + SSOW_LF_GWS_WQP;
691 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK;
692 ws->swtp_op = base + SSOW_LF_GWS_SWTP;
693 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
694 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
698 sso_configure_dual_ports(const struct rte_eventdev *event_dev)
700 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
701 struct otx2_mbox *mbox = dev->mbox;
706 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
708 nb_lf = dev->nb_event_ports * 2;
709 /* Ask AF to attach required LFs. */
710 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
712 otx2_err("Failed to attach SSO GWS LF");
716 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
717 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
718 otx2_err("Failed to init SSO GWS LF");
722 for (i = 0; i < dev->nb_event_ports; i++) {
723 struct otx2_ssogws_dual *ws;
726 /* Free memory prior to re-allocation if needed */
727 if (event_dev->data->ports[i] != NULL) {
728 ws = event_dev->data->ports[i];
733 /* Allocate event port memory */
734 ws = rte_zmalloc_socket("otx2_sso_ws",
735 sizeof(struct otx2_ssogws_dual),
737 event_dev->data->socket_id);
739 otx2_err("Failed to alloc memory for port=%d", i);
745 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
746 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
749 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
750 sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
753 event_dev->data->ports[i] = ws;
757 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
758 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
765 sso_configure_ports(const struct rte_eventdev *event_dev)
767 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
768 struct otx2_mbox *mbox = dev->mbox;
772 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
774 nb_lf = dev->nb_event_ports;
775 /* Ask AF to attach required LFs. */
776 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
778 otx2_err("Failed to attach SSO GWS LF");
782 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
783 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
784 otx2_err("Failed to init SSO GWS LF");
788 for (i = 0; i < nb_lf; i++) {
789 struct otx2_ssogws *ws;
792 /* Free memory prior to re-allocation if needed */
793 if (event_dev->data->ports[i] != NULL) {
794 ws = event_dev->data->ports[i];
799 /* Allocate event port memory */
800 ws = rte_zmalloc_socket("otx2_sso_ws",
801 sizeof(struct otx2_ssogws),
803 event_dev->data->socket_id);
805 otx2_err("Failed to alloc memory for port=%d", i);
811 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
812 sso_set_port_ops(ws, base);
814 event_dev->data->ports[i] = ws;
818 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
819 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
826 sso_configure_queues(const struct rte_eventdev *event_dev)
828 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
829 struct otx2_mbox *mbox = dev->mbox;
833 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
835 nb_lf = dev->nb_event_queues;
836 /* Ask AF to attach required LFs. */
837 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
839 otx2_err("Failed to attach SSO GGRP LF");
843 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
844 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
845 otx2_err("Failed to init SSO GGRP LF");
853 sso_xaq_allocate(struct otx2_sso_evdev *dev)
855 const struct rte_memzone *mz;
856 struct npa_aura_s *aura;
857 static int reconfig_cnt;
858 char pool_name[RTE_MEMZONE_NAMESIZE];
863 rte_mempool_free(dev->xaq_pool);
866 * Allocate memory for Add work backpressure.
868 mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
870 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
872 sizeof(struct npa_aura_s),
874 RTE_MEMZONE_IOVA_CONTIG,
877 otx2_err("Failed to allocate mem for fcmem");
881 dev->fc_iova = mz->iova;
882 dev->fc_mem = mz->addr;
884 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
885 memset(aura, 0, sizeof(struct npa_aura_s));
888 aura->fc_addr = dev->fc_iova;
889 aura->fc_hyst_bits = 0; /* Store count on all updates */
891 /* Taken from HRM 14.3.3(4) */
892 xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
894 xaq_cnt += dev->xae_cnt / dev->xae_waes;
895 else if (dev->adptr_xae_cnt)
896 xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
897 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
899 xaq_cnt += (dev->iue / dev->xae_waes) +
900 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
902 otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
903 /* Setup XAQ based on number of nb queues. */
904 snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
905 dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
906 xaq_cnt, dev->xaq_buf_size, 0, 0,
909 if (dev->xaq_pool == NULL) {
910 otx2_err("Unable to create empty mempool.");
911 rte_memzone_free(mz);
915 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
916 rte_mbuf_platform_mempool_ops(), aura);
918 otx2_err("Unable to set xaqpool ops.");
922 rc = rte_mempool_populate_default(dev->xaq_pool);
924 otx2_err("Unable to set populate xaqpool.");
928 /* When SW does addwork (enqueue) check if there is space in XAQ by
929 * comparing fc_addr above against the xaq_lmt calculated below.
930 * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
931 * to request XAQ to cache them even before enqueue is called.
933 dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
934 dev->nb_event_queues);
935 dev->nb_xaq_cfg = xaq_cnt;
939 rte_mempool_free(dev->xaq_pool);
940 rte_memzone_free(mz);
945 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
947 struct otx2_mbox *mbox = dev->mbox;
948 struct sso_hw_setconfig *req;
950 otx2_sso_dbg("Configuring XAQ for GGRPs");
951 req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
952 req->npa_pf_func = otx2_npa_pf_func_get();
953 req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
954 req->hwgrps = dev->nb_event_queues;
956 return otx2_mbox_process(mbox);
960 sso_lf_teardown(struct otx2_sso_evdev *dev,
961 enum otx2_sso_lf_type lf_type)
967 nb_lf = dev->nb_event_queues;
970 nb_lf = dev->nb_event_ports;
971 nb_lf *= dev->dual_ws ? 2 : 1;
977 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
978 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
982 otx2_sso_configure(const struct rte_eventdev *event_dev)
984 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
985 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
990 deq_tmo_ns = conf->dequeue_timeout_ns;
993 deq_tmo_ns = dev->min_dequeue_timeout_ns;
995 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
996 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
997 otx2_err("Unsupported dequeue timeout requested");
1001 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
1002 dev->is_timeout_deq = 1;
1004 dev->deq_tmo_ns = deq_tmo_ns;
1006 if (conf->nb_event_ports > dev->max_event_ports ||
1007 conf->nb_event_queues > dev->max_event_queues) {
1008 otx2_err("Unsupported event queues/ports requested");
1012 if (conf->nb_event_port_dequeue_depth > 1) {
1013 otx2_err("Unsupported event port deq depth requested");
1017 if (conf->nb_event_port_enqueue_depth > 1) {
1018 otx2_err("Unsupported event port enq depth requested");
1022 if (dev->configured)
1023 sso_unregister_irqs(event_dev);
1025 if (dev->nb_event_queues) {
1026 /* Finit any previous queues. */
1027 sso_lf_teardown(dev, SSO_LF_GGRP);
1029 if (dev->nb_event_ports) {
1030 /* Finit any previous ports. */
1031 sso_lf_teardown(dev, SSO_LF_GWS);
1034 dev->nb_event_queues = conf->nb_event_queues;
1035 dev->nb_event_ports = conf->nb_event_ports;
1038 rc = sso_configure_dual_ports(event_dev);
1040 rc = sso_configure_ports(event_dev);
1043 otx2_err("Failed to configure event ports");
1047 if (sso_configure_queues(event_dev) < 0) {
1048 otx2_err("Failed to configure event queues");
1053 if (sso_xaq_allocate(dev) < 0) {
1055 goto teardown_hwggrp;
1058 /* Clear any prior port-queue mapping. */
1059 sso_clr_links(event_dev);
1060 rc = sso_ggrp_alloc_xaq(dev);
1062 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1063 goto teardown_hwggrp;
1066 rc = sso_get_msix_offsets(event_dev);
1068 otx2_err("Failed to get msix offsets %d", rc);
1069 goto teardown_hwggrp;
1072 rc = sso_register_irqs(event_dev);
1074 otx2_err("Failed to register irq %d", rc);
1075 goto teardown_hwggrp;
1078 dev->configured = 1;
1083 sso_lf_teardown(dev, SSO_LF_GGRP);
1085 sso_lf_teardown(dev, SSO_LF_GWS);
1086 dev->nb_event_queues = 0;
1087 dev->nb_event_ports = 0;
1088 dev->configured = 0;
1093 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
1094 struct rte_event_queue_conf *queue_conf)
1096 RTE_SET_USED(event_dev);
1097 RTE_SET_USED(queue_id);
1099 queue_conf->nb_atomic_flows = (1ULL << 20);
1100 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
1101 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
1102 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1106 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
1107 const struct rte_event_queue_conf *queue_conf)
1109 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1110 struct otx2_mbox *mbox = dev->mbox;
1111 struct sso_grp_priority *req;
1114 sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
1116 req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
1117 req->grp = queue_id;
1119 req->affinity = 0xFF;
1120 /* Normalize <0-255> to <0-7> */
1121 req->priority = queue_conf->priority / 32;
1123 rc = otx2_mbox_process(mbox);
1125 otx2_err("Failed to set priority queue=%d", queue_id);
1133 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
1134 struct rte_event_port_conf *port_conf)
1136 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1138 RTE_SET_USED(port_id);
1139 port_conf->new_event_threshold = dev->max_num_events;
1140 port_conf->dequeue_depth = 1;
1141 port_conf->enqueue_depth = 1;
1145 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
1146 const struct rte_event_port_conf *port_conf)
1148 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1149 uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
1153 sso_func_trace("Port=%d", port_id);
1154 RTE_SET_USED(port_conf);
1156 if (event_dev->data->ports[port_id] == NULL) {
1157 otx2_err("Invalid port Id %d", port_id);
1161 for (q = 0; q < dev->nb_event_queues; q++) {
1162 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
1163 if (grps_base[q] == 0) {
1164 otx2_err("Failed to get grp[%d] base addr", q);
1169 /* Set get_work timeout for HWS */
1170 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
1173 struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
1175 rte_memcpy(ws->grps_base, grps_base,
1176 sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1177 ws->fc_mem = dev->fc_mem;
1178 ws->xaq_lmt = dev->xaq_lmt;
1179 ws->tstamp = dev->tstamp;
1180 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1181 ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1182 otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
1183 ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
1185 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
1186 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1188 rte_memcpy(ws->grps_base, grps_base,
1189 sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
1190 ws->fc_mem = dev->fc_mem;
1191 ws->xaq_lmt = dev->xaq_lmt;
1192 ws->tstamp = dev->tstamp;
1193 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
1196 otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
1202 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
1203 uint64_t *tmo_ticks)
1205 RTE_SET_USED(event_dev);
1206 *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
1212 ssogws_dump(struct otx2_ssogws *ws, FILE *f)
1214 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
1216 fprintf(f, "SSOW_LF_GWS Base addr 0x%" PRIx64 "\n", (uint64_t)base);
1217 fprintf(f, "SSOW_LF_GWS_LINKS 0x%" PRIx64 "\n",
1218 otx2_read64(base + SSOW_LF_GWS_LINKS));
1219 fprintf(f, "SSOW_LF_GWS_PENDWQP 0x%" PRIx64 "\n",
1220 otx2_read64(base + SSOW_LF_GWS_PENDWQP));
1221 fprintf(f, "SSOW_LF_GWS_PENDSTATE 0x%" PRIx64 "\n",
1222 otx2_read64(base + SSOW_LF_GWS_PENDSTATE));
1223 fprintf(f, "SSOW_LF_GWS_NW_TIM 0x%" PRIx64 "\n",
1224 otx2_read64(base + SSOW_LF_GWS_NW_TIM));
1225 fprintf(f, "SSOW_LF_GWS_TAG 0x%" PRIx64 "\n",
1226 otx2_read64(base + SSOW_LF_GWS_TAG));
1227 fprintf(f, "SSOW_LF_GWS_WQP 0x%" PRIx64 "\n",
1228 otx2_read64(base + SSOW_LF_GWS_TAG));
1229 fprintf(f, "SSOW_LF_GWS_SWTP 0x%" PRIx64 "\n",
1230 otx2_read64(base + SSOW_LF_GWS_SWTP));
1231 fprintf(f, "SSOW_LF_GWS_PENDTAG 0x%" PRIx64 "\n",
1232 otx2_read64(base + SSOW_LF_GWS_PENDTAG));
1236 ssoggrp_dump(uintptr_t base, FILE *f)
1238 fprintf(f, "SSO_LF_GGRP Base addr 0x%" PRIx64 "\n", (uint64_t)base);
1239 fprintf(f, "SSO_LF_GGRP_QCTL 0x%" PRIx64 "\n",
1240 otx2_read64(base + SSO_LF_GGRP_QCTL));
1241 fprintf(f, "SSO_LF_GGRP_XAQ_CNT 0x%" PRIx64 "\n",
1242 otx2_read64(base + SSO_LF_GGRP_XAQ_CNT));
1243 fprintf(f, "SSO_LF_GGRP_INT_THR 0x%" PRIx64 "\n",
1244 otx2_read64(base + SSO_LF_GGRP_INT_THR));
1245 fprintf(f, "SSO_LF_GGRP_INT_CNT 0x%" PRIX64 "\n",
1246 otx2_read64(base + SSO_LF_GGRP_INT_CNT));
1247 fprintf(f, "SSO_LF_GGRP_AQ_CNT 0x%" PRIX64 "\n",
1248 otx2_read64(base + SSO_LF_GGRP_AQ_CNT));
1249 fprintf(f, "SSO_LF_GGRP_AQ_THR 0x%" PRIX64 "\n",
1250 otx2_read64(base + SSO_LF_GGRP_AQ_THR));
1251 fprintf(f, "SSO_LF_GGRP_MISC_CNT 0x%" PRIx64 "\n",
1252 otx2_read64(base + SSO_LF_GGRP_MISC_CNT));
1256 otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
1258 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1262 fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
1263 "dual_ws" : "single_ws");
1264 /* Dump SSOW registers */
1265 for (port = 0; port < dev->nb_event_ports; port++) {
1267 struct otx2_ssogws_dual *ws =
1268 event_dev->data->ports[port];
1270 fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
1272 ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
1273 fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
1275 ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
1277 fprintf(f, "[%s]SSO single workslot[%d] dump\n",
1279 ssogws_dump(event_dev->data->ports[port], f);
1283 /* Dump SSO registers */
1284 for (queue = 0; queue < dev->nb_event_queues; queue++) {
1285 fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
1287 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1288 ssoggrp_dump(ws->grps_base[queue], f);
1290 struct otx2_ssogws *ws = event_dev->data->ports[0];
1291 ssoggrp_dump(ws->grps_base[queue], f);
1297 otx2_handle_event(void *arg, struct rte_event event)
1299 struct rte_eventdev *event_dev = arg;
1301 if (event_dev->dev_ops->dev_stop_flush != NULL)
1302 event_dev->dev_ops->dev_stop_flush(event_dev->data->dev_id,
1303 event, event_dev->data->dev_stop_flush_arg);
1307 sso_qos_cfg(struct rte_eventdev *event_dev)
1309 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1310 struct sso_grp_qos_cfg *req;
1313 for (i = 0; i < dev->qos_queue_cnt; i++) {
1314 uint8_t xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
1315 uint8_t iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
1316 uint8_t taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
1318 if (dev->qos_parse_data[i].queue >= dev->nb_event_queues)
1321 req = otx2_mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
1322 req->xaq_limit = (dev->nb_xaq_cfg *
1323 (xaq_prcnt ? xaq_prcnt : 100)) / 100;
1324 req->taq_thr = (SSO_HWGRP_IAQ_MAX_THR_MASK *
1325 (iaq_prcnt ? iaq_prcnt : 100)) / 100;
1326 req->iaq_thr = (SSO_HWGRP_TAQ_MAX_THR_MASK *
1327 (taq_prcnt ? taq_prcnt : 100)) / 100;
1330 if (dev->qos_queue_cnt)
1331 otx2_mbox_process(dev->mbox);
1335 sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
1337 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1340 for (i = 0; i < dev->nb_event_ports; i++) {
1342 struct otx2_ssogws_dual *ws;
1344 ws = event_dev->data->ports[i];
1345 ssogws_reset((struct otx2_ssogws *)&ws->ws_state[0]);
1346 ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
1349 ws->ws_state[0].cur_grp = 0;
1350 ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1351 ws->ws_state[1].cur_grp = 0;
1352 ws->ws_state[1].cur_tt = SSO_SYNC_EMPTY;
1354 struct otx2_ssogws *ws;
1356 ws = event_dev->data->ports[i];
1360 ws->cur_tt = SSO_SYNC_EMPTY;
1366 struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
1367 struct otx2_ssogws temp_ws;
1369 memcpy(&temp_ws, &ws->ws_state[0],
1370 sizeof(struct otx2_ssogws_state));
1371 for (i = 0; i < dev->nb_event_queues; i++) {
1372 /* Consume all the events through HWS0 */
1373 ssogws_flush_events(&temp_ws, i, ws->grps_base[i],
1374 otx2_handle_event, event_dev);
1375 /* Enable/Disable SSO GGRP */
1376 otx2_write64(enable, ws->grps_base[i] +
1379 ws->ws_state[0].cur_grp = 0;
1380 ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
1382 struct otx2_ssogws *ws = event_dev->data->ports[0];
1384 for (i = 0; i < dev->nb_event_queues; i++) {
1385 /* Consume all the events through HWS0 */
1386 ssogws_flush_events(ws, i, ws->grps_base[i],
1387 otx2_handle_event, event_dev);
1388 /* Enable/Disable SSO GGRP */
1389 otx2_write64(enable, ws->grps_base[i] +
1393 ws->cur_tt = SSO_SYNC_EMPTY;
1396 /* reset SSO GWS cache */
1397 otx2_mbox_alloc_msg_sso_ws_cache_inv(dev->mbox);
1398 otx2_mbox_process(dev->mbox);
1402 sso_xae_reconfigure(struct rte_eventdev *event_dev)
1404 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1405 struct rte_mempool *prev_xaq_pool;
1408 if (event_dev->data->dev_started)
1409 sso_cleanup(event_dev, 0);
1411 prev_xaq_pool = dev->xaq_pool;
1412 dev->xaq_pool = NULL;
1413 rc = sso_xaq_allocate(dev);
1415 otx2_err("Failed to alloc xaq pool %d", rc);
1416 rte_mempool_free(prev_xaq_pool);
1419 rc = sso_ggrp_alloc_xaq(dev);
1421 otx2_err("Failed to alloc xaq to ggrp %d", rc);
1422 rte_mempool_free(prev_xaq_pool);
1426 rte_mempool_free(prev_xaq_pool);
1428 if (event_dev->data->dev_started)
1429 sso_cleanup(event_dev, 1);
1435 otx2_sso_start(struct rte_eventdev *event_dev)
1438 sso_qos_cfg(event_dev);
1439 sso_cleanup(event_dev, 1);
1440 sso_fastpath_fns_set(event_dev);
1446 otx2_sso_stop(struct rte_eventdev *event_dev)
1449 sso_cleanup(event_dev, 0);
1454 otx2_sso_close(struct rte_eventdev *event_dev)
1456 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1457 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1460 if (!dev->configured)
1463 sso_unregister_irqs(event_dev);
1465 for (i = 0; i < dev->nb_event_queues; i++)
1468 for (i = 0; i < dev->nb_event_ports; i++)
1469 otx2_sso_port_unlink(event_dev, event_dev->data->ports[i],
1470 all_queues, dev->nb_event_queues);
1472 sso_lf_teardown(dev, SSO_LF_GGRP);
1473 sso_lf_teardown(dev, SSO_LF_GWS);
1474 dev->nb_event_ports = 0;
1475 dev->nb_event_queues = 0;
1476 rte_mempool_free(dev->xaq_pool);
1477 rte_memzone_free(rte_memzone_lookup(OTX2_SSO_FC_NAME));
1482 /* Initialize and register event driver with DPDK Application */
1483 static struct rte_eventdev_ops otx2_sso_ops = {
1484 .dev_infos_get = otx2_sso_info_get,
1485 .dev_configure = otx2_sso_configure,
1486 .queue_def_conf = otx2_sso_queue_def_conf,
1487 .queue_setup = otx2_sso_queue_setup,
1488 .queue_release = otx2_sso_queue_release,
1489 .port_def_conf = otx2_sso_port_def_conf,
1490 .port_setup = otx2_sso_port_setup,
1491 .port_release = otx2_sso_port_release,
1492 .port_link = otx2_sso_port_link,
1493 .port_unlink = otx2_sso_port_unlink,
1494 .timeout_ticks = otx2_sso_timeout_ticks,
1496 .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get,
1497 .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
1498 .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
1499 .eth_rx_adapter_start = otx2_sso_rx_adapter_start,
1500 .eth_rx_adapter_stop = otx2_sso_rx_adapter_stop,
1502 .eth_tx_adapter_caps_get = otx2_sso_tx_adapter_caps_get,
1503 .eth_tx_adapter_queue_add = otx2_sso_tx_adapter_queue_add,
1504 .eth_tx_adapter_queue_del = otx2_sso_tx_adapter_queue_del,
1506 .timer_adapter_caps_get = otx2_tim_caps_get,
1508 .xstats_get = otx2_sso_xstats_get,
1509 .xstats_reset = otx2_sso_xstats_reset,
1510 .xstats_get_names = otx2_sso_xstats_get_names,
1512 .dump = otx2_sso_dump,
1513 .dev_start = otx2_sso_start,
1514 .dev_stop = otx2_sso_stop,
1515 .dev_close = otx2_sso_close,
1516 .dev_selftest = otx2_sso_selftest,
1519 #define OTX2_SSO_XAE_CNT "xae_cnt"
1520 #define OTX2_SSO_SINGLE_WS "single_ws"
1521 #define OTX2_SSO_GGRP_QOS "qos"
1522 #define OTX2_SSO_SELFTEST "selftest"
1525 parse_queue_param(char *value, void *opaque)
1527 struct otx2_sso_qos queue_qos = {0};
1528 uint8_t *val = (uint8_t *)&queue_qos;
1529 struct otx2_sso_evdev *dev = opaque;
1530 char *tok = strtok(value, "-");
1531 struct otx2_sso_qos *old_ptr;
1536 while (tok != NULL) {
1538 tok = strtok(NULL, "-");
1542 if (val != (&queue_qos.iaq_prcnt + 1)) {
1543 otx2_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
1547 dev->qos_queue_cnt++;
1548 old_ptr = dev->qos_parse_data;
1549 dev->qos_parse_data = rte_realloc(dev->qos_parse_data,
1550 sizeof(struct otx2_sso_qos) *
1551 dev->qos_queue_cnt, 0);
1552 if (dev->qos_parse_data == NULL) {
1553 dev->qos_parse_data = old_ptr;
1554 dev->qos_queue_cnt--;
1557 dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
1561 parse_qos_list(const char *value, void *opaque)
1563 char *s = strdup(value);
1574 if (start && start < end) {
1576 parse_queue_param(start + 1, opaque);
1587 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
1591 /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
1592 * isn't allowed. Everything is expressed in percentages, 0 represents
1595 parse_qos_list(value, opaque);
1601 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
1603 struct rte_kvargs *kvlist;
1604 uint8_t single_ws = 0;
1606 if (devargs == NULL)
1608 kvlist = rte_kvargs_parse(devargs->args, NULL);
1612 rte_kvargs_process(kvlist, OTX2_SSO_SELFTEST, &parse_kvargs_flag,
1614 rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
1616 rte_kvargs_process(kvlist, OTX2_SSO_SINGLE_WS, &parse_kvargs_flag,
1618 rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
1621 dev->dual_ws = !single_ws;
1622 rte_kvargs_free(kvlist);
1626 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1628 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
1629 sizeof(struct otx2_sso_evdev),
1634 otx2_sso_remove(struct rte_pci_device *pci_dev)
1636 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
1639 static const struct rte_pci_id pci_sso_map[] = {
1641 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1642 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
1649 static struct rte_pci_driver pci_sso = {
1650 .id_table = pci_sso_map,
1651 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1652 .probe = otx2_sso_probe,
1653 .remove = otx2_sso_remove,
1657 otx2_sso_init(struct rte_eventdev *event_dev)
1659 struct free_rsrcs_rsp *rsrc_cnt;
1660 struct rte_pci_device *pci_dev;
1661 struct otx2_sso_evdev *dev;
1664 event_dev->dev_ops = &otx2_sso_ops;
1665 /* For secondary processes, the primary has done all the work */
1666 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1667 sso_fastpath_fns_set(event_dev);
1671 dev = sso_pmd_priv(event_dev);
1673 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1675 /* Initialize the base otx2_dev object */
1676 rc = otx2_dev_init(pci_dev, dev);
1678 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1682 /* Get SSO and SSOW MSIX rsrc cnt */
1683 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
1684 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
1686 otx2_err("Unable to get free rsrc count");
1687 goto otx2_dev_uninit;
1689 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
1690 rsrc_cnt->ssow, rsrc_cnt->npa);
1692 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
1693 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
1694 /* Grab the NPA LF if required */
1695 rc = otx2_npa_lf_init(pci_dev, dev);
1697 otx2_err("Unable to init NPA lf. It might not be provisioned");
1698 goto otx2_dev_uninit;
1701 dev->drv_inited = true;
1702 dev->is_timeout_deq = 0;
1703 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
1704 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
1705 dev->max_num_events = -1;
1706 dev->nb_event_queues = 0;
1707 dev->nb_event_ports = 0;
1709 if (!dev->max_event_ports || !dev->max_event_queues) {
1710 otx2_err("Not enough eventdev resource queues=%d ports=%d",
1711 dev->max_event_queues, dev->max_event_ports);
1713 goto otx2_npa_lf_uninit;
1717 sso_parse_devargs(dev, pci_dev->device.devargs);
1719 otx2_sso_dbg("Using dual workslot mode");
1720 dev->max_event_ports = dev->max_event_ports / 2;
1722 otx2_sso_dbg("Using single workslot mode");
1725 otx2_sso_pf_func_set(dev->pf_func);
1726 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1727 event_dev->data->name, dev->max_event_queues,
1728 dev->max_event_ports);
1729 if (dev->selftest) {
1730 event_dev->dev->driver = &pci_sso.driver;
1731 event_dev->dev_ops->dev_selftest();
1734 otx2_tim_init(pci_dev, (struct otx2_dev *)dev);
1741 otx2_dev_fini(pci_dev, dev);
1747 otx2_sso_fini(struct rte_eventdev *event_dev)
1749 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
1750 struct rte_pci_device *pci_dev;
1752 /* For secondary processes, nothing to be done */
1753 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1756 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
1758 if (!dev->drv_inited)
1761 dev->drv_inited = false;
1765 if (otx2_npa_lf_active(dev)) {
1766 otx2_info("Common resource in use by other devices");
1771 otx2_dev_fini(pci_dev, dev);
1776 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
1777 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
1778 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
1779 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
1780 OTX2_SSO_SINGLE_WS "=1"
1781 OTX2_SSO_GGRP_QOS "=<string>"
1782 OTX2_SSO_SELFTEST "=1");