1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN9K_DUAL_WS_NB_WS 2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
13 (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] \
14 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] \
15 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
16 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] \
17 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] \
18 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
22 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] \
23 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] \
24 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] \
25 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] \
26 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
27 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
30 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
32 ws->tag_op = base + SSOW_LF_GWS_TAG;
33 ws->wqp_op = base + SSOW_LF_GWS_WQP;
34 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
35 ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
36 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
37 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
41 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
43 struct cnxk_sso_evdev *dev = arg;
44 struct cn9k_sso_hws_dual *dws;
45 struct cn9k_sso_hws *ws;
50 rc = roc_sso_hws_link(&dev->sso,
51 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
53 rc |= roc_sso_hws_link(&dev->sso,
54 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
58 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
65 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 struct cnxk_sso_evdev *dev = arg;
68 struct cn9k_sso_hws_dual *dws;
69 struct cn9k_sso_hws *ws;
74 rc = roc_sso_hws_unlink(&dev->sso,
75 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
77 rc |= roc_sso_hws_unlink(&dev->sso,
78 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
82 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
89 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
91 struct cnxk_sso_evdev *dev = arg;
92 struct cn9k_sso_hws_dual *dws;
93 struct cn9k_sso_hws *ws;
96 /* Set get_work tmo for HWS */
97 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
100 rte_memcpy(dws->grps_base, grps_base,
101 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
102 dws->fc_mem = dev->fc_mem;
103 dws->xaq_lmt = dev->xaq_lmt;
105 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
106 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
109 rte_memcpy(ws->grps_base, grps_base,
110 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
111 ws->fc_mem = dev->fc_mem;
112 ws->xaq_lmt = dev->xaq_lmt;
114 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
119 cn9k_sso_hws_release(void *arg, void *hws)
121 struct cnxk_sso_evdev *dev = arg;
122 struct cn9k_sso_hws_dual *dws;
123 struct cn9k_sso_hws *ws;
128 for (i = 0; i < dev->nb_event_queues; i++) {
129 roc_sso_hws_unlink(&dev->sso,
130 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
132 roc_sso_hws_unlink(&dev->sso,
133 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
136 memset(dws, 0, sizeof(*dws));
139 for (i = 0; i < dev->nb_event_queues; i++)
140 roc_sso_hws_unlink(&dev->sso, ws->hws_id,
142 memset(ws, 0, sizeof(*ws));
147 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
148 cnxk_handle_event_t fn, void *arg)
150 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
151 struct cn9k_sso_hws_dual *dws;
152 struct cn9k_sso_hws_state *st;
153 struct cn9k_sso_hws *ws;
154 uint64_t cq_ds_cnt = 1;
161 plt_write64(0, base + SSO_LF_GGRP_QCTL);
163 req = queue_id; /* GGRP ID */
164 req |= BIT_ULL(18); /* Grouped */
165 req |= BIT_ULL(16); /* WAIT */
167 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
168 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
169 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
170 cq_ds_cnt &= 0x3FFF3FFF0000;
174 st = &dws->ws_state[0];
175 ws_base = dws->base[0];
178 st = (struct cn9k_sso_hws_state *)ws;
182 while (aq_cnt || cq_ds_cnt || ds_cnt) {
183 plt_write64(req, st->getwrk_op);
184 cn9k_sso_hws_get_work_empty(st, &ev);
185 if (fn != NULL && ev.u64 != 0)
187 if (ev.sched_type != SSO_TT_EMPTY)
188 cnxk_sso_hws_swtag_flush(st->tag_op,
191 val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
192 } while (val & BIT_ULL(56));
193 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
194 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
195 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
196 /* Extract cq and ds count */
197 cq_ds_cnt &= 0x3FFF3FFF0000;
200 plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
204 cn9k_sso_hws_reset(void *arg, void *hws)
206 struct cnxk_sso_evdev *dev = arg;
207 struct cn9k_sso_hws_dual *dws;
208 struct cn9k_sso_hws *ws;
217 for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
218 base = dev->dual_ws ? dws->base[i] : ws->base;
219 /* Wait till getwork/swtp/waitw/desched completes. */
221 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
222 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
225 tag = plt_read64(base + SSOW_LF_GWS_TAG);
226 pend_tt = (tag >> 32) & 0x3;
227 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
228 if (pend_tt == SSO_TT_ATOMIC ||
229 pend_tt == SSO_TT_ORDERED)
230 cnxk_sso_hws_swtag_untag(
231 base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
232 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
235 /* Wait for desched to complete. */
237 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
238 } while (pend_state & BIT_ULL(58));
243 cn9k_sso_set_rsrc(void *arg)
245 struct cnxk_sso_evdev *dev = arg;
248 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
250 dev->max_event_ports = dev->sso.max_hws;
251 dev->max_event_queues =
252 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
253 RTE_EVENT_MAX_QUEUES_PER_DEV :
258 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
260 struct cnxk_sso_evdev *dev = arg;
263 hws = hws * CN9K_DUAL_WS_NB_WS;
265 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
269 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
271 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
274 if (dev->tx_adptr_data == NULL)
277 for (i = 0; i < dev->nb_event_ports; i++) {
279 struct cn9k_sso_hws_dual *dws =
280 event_dev->data->ports[i];
283 ws_cookie = cnxk_sso_hws_get_cookie(dws);
284 ws_cookie = rte_realloc_socket(
286 sizeof(struct cnxk_sso_hws_cookie) +
287 sizeof(struct cn9k_sso_hws_dual) +
289 (dev->max_port_id + 1) *
290 RTE_MAX_QUEUES_PER_PORT),
291 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
292 if (ws_cookie == NULL)
294 dws = RTE_PTR_ADD(ws_cookie,
295 sizeof(struct cnxk_sso_hws_cookie));
296 memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
297 sizeof(uint64_t) * (dev->max_port_id + 1) *
298 RTE_MAX_QUEUES_PER_PORT);
299 event_dev->data->ports[i] = dws;
301 struct cn9k_sso_hws *ws = event_dev->data->ports[i];
304 ws_cookie = cnxk_sso_hws_get_cookie(ws);
305 ws_cookie = rte_realloc_socket(
307 sizeof(struct cnxk_sso_hws_cookie) +
308 sizeof(struct cn9k_sso_hws_dual) +
310 (dev->max_port_id + 1) *
311 RTE_MAX_QUEUES_PER_PORT),
312 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
313 if (ws_cookie == NULL)
315 ws = RTE_PTR_ADD(ws_cookie,
316 sizeof(struct cnxk_sso_hws_cookie));
317 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
318 sizeof(uint64_t) * (dev->max_port_id + 1) *
319 RTE_MAX_QUEUES_PER_PORT);
320 event_dev->data->ports[i] = ws;
329 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
331 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
332 /* Single WS modes */
333 const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
334 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
335 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
336 NIX_RX_FASTPATH_MODES
340 const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
341 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
342 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
343 NIX_RX_FASTPATH_MODES
347 const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
348 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
349 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
350 NIX_RX_FASTPATH_MODES
354 const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
355 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
356 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
357 NIX_RX_FASTPATH_MODES
361 const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
362 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
363 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
364 NIX_RX_FASTPATH_MODES
368 const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
369 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
370 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
371 NIX_RX_FASTPATH_MODES
375 const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
376 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
377 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
378 NIX_RX_FASTPATH_MODES
382 const event_dequeue_burst_t
383 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
384 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
385 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
386 NIX_RX_FASTPATH_MODES
391 const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
392 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
393 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
394 NIX_RX_FASTPATH_MODES
398 const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
399 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
400 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
401 NIX_RX_FASTPATH_MODES
405 const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
406 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
407 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
408 NIX_RX_FASTPATH_MODES
412 const event_dequeue_burst_t
413 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
414 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
415 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
416 NIX_RX_FASTPATH_MODES
420 const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
421 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
422 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
423 NIX_RX_FASTPATH_MODES
427 const event_dequeue_burst_t
428 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
429 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
430 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
431 NIX_RX_FASTPATH_MODES
435 const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
436 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
437 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
438 NIX_RX_FASTPATH_MODES
442 const event_dequeue_burst_t
443 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
444 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
445 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
446 NIX_RX_FASTPATH_MODES
451 const event_tx_adapter_enqueue
452 sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
453 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
454 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
455 NIX_TX_FASTPATH_MODES
459 const event_tx_adapter_enqueue
460 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
461 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
462 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
463 NIX_TX_FASTPATH_MODES
467 const event_tx_adapter_enqueue
468 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
469 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
470 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
471 NIX_TX_FASTPATH_MODES
475 const event_tx_adapter_enqueue
476 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
477 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
478 [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
479 NIX_TX_FASTPATH_MODES
483 event_dev->enqueue = cn9k_sso_hws_enq;
484 event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
485 event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
486 event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
487 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
488 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
489 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
490 sso_hws_deq_seg_burst);
491 if (dev->is_timeout_deq) {
492 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
493 sso_hws_deq_tmo_seg);
494 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
495 sso_hws_deq_tmo_seg_burst);
498 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
499 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
501 if (dev->is_timeout_deq) {
502 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
504 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
505 sso_hws_deq_tmo_burst);
509 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
510 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
511 sso_hws_tx_adptr_enq_seg);
513 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
514 sso_hws_tx_adptr_enq);
517 event_dev->enqueue = cn9k_sso_hws_dual_enq;
518 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
519 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
520 event_dev->enqueue_forward_burst =
521 cn9k_sso_hws_dual_enq_fwd_burst;
523 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
524 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
525 sso_hws_dual_deq_seg);
526 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
527 sso_hws_dual_deq_seg_burst);
528 if (dev->is_timeout_deq) {
529 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
530 sso_hws_dual_deq_tmo_seg);
531 CN9K_SET_EVDEV_DEQ_OP(
532 dev, event_dev->dequeue_burst,
533 sso_hws_dual_deq_tmo_seg_burst);
536 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
538 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
539 sso_hws_dual_deq_burst);
540 if (dev->is_timeout_deq) {
541 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
542 sso_hws_dual_deq_tmo);
543 CN9K_SET_EVDEV_DEQ_OP(
544 dev, event_dev->dequeue_burst,
545 sso_hws_dual_deq_tmo_burst);
549 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
550 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
551 sso_hws_dual_tx_adptr_enq_seg);
553 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
554 sso_hws_dual_tx_adptr_enq);
557 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
562 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
564 struct cnxk_sso_evdev *dev = arg;
565 struct cn9k_sso_hws_dual *dws;
566 struct cn9k_sso_hws *ws;
570 dws = rte_zmalloc("cn9k_dual_ws",
571 sizeof(struct cn9k_sso_hws_dual) +
573 RTE_CACHE_LINE_SIZE);
575 plt_err("Failed to alloc memory for port=%d", port_id);
579 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
580 dws->base[0] = roc_sso_hws_base_get(
581 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
582 dws->base[1] = roc_sso_hws_base_get(
583 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
584 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
585 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
586 dws->hws_id = port_id;
592 /* Allocate event port memory */
593 ws = rte_zmalloc("cn9k_ws",
594 sizeof(struct cn9k_sso_hws) +
596 RTE_CACHE_LINE_SIZE);
598 plt_err("Failed to alloc memory for port=%d", port_id);
602 /* First cache line is reserved for cookie */
603 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
604 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
605 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
606 ws->hws_id = port_id;
616 cn9k_sso_info_get(struct rte_eventdev *event_dev,
617 struct rte_event_dev_info *dev_info)
619 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
621 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
622 cnxk_sso_info_get(dev, dev_info);
626 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
628 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
631 rc = cnxk_sso_dev_validate(event_dev);
633 plt_err("Invalid event device configuration");
637 roc_sso_rsrc_fini(&dev->sso);
639 rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
641 plt_err("Failed to initialize SSO resources");
645 rc = cnxk_sso_xaq_allocate(dev);
649 rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
654 /* Restore any prior port-queue mapping. */
655 cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
662 roc_sso_rsrc_fini(&dev->sso);
663 dev->nb_event_ports = 0;
668 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
669 const struct rte_event_port_conf *port_conf)
672 RTE_SET_USED(port_conf);
673 return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
677 cn9k_sso_port_release(void *port)
679 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
680 struct cnxk_sso_evdev *dev;
685 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
686 if (!gws_cookie->configured)
689 cn9k_sso_hws_release(dev, port);
690 memset(gws_cookie, 0, sizeof(*gws_cookie));
692 rte_free(gws_cookie);
696 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
697 const uint8_t queues[], const uint8_t priorities[],
700 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
701 uint16_t hwgrp_ids[nb_links];
704 RTE_SET_USED(priorities);
705 for (link = 0; link < nb_links; link++)
706 hwgrp_ids[link] = queues[link];
707 nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
709 return (int)nb_links;
713 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
714 uint8_t queues[], uint16_t nb_unlinks)
716 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
717 uint16_t hwgrp_ids[nb_unlinks];
720 for (unlink = 0; unlink < nb_unlinks; unlink++)
721 hwgrp_ids[unlink] = queues[unlink];
722 nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
724 return (int)nb_unlinks;
728 cn9k_sso_start(struct rte_eventdev *event_dev)
732 rc = cn9k_sso_updt_tx_adptr_data(event_dev);
736 rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
737 cn9k_sso_hws_flush_events);
741 cn9k_sso_fp_fns_set(event_dev);
747 cn9k_sso_stop(struct rte_eventdev *event_dev)
749 cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
753 cn9k_sso_close(struct rte_eventdev *event_dev)
755 return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
759 cn9k_sso_selftest(void)
761 return cnxk_sso_selftest(RTE_STR(event_cn9k));
765 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
766 const struct rte_eth_dev *eth_dev, uint32_t *caps)
770 RTE_SET_USED(event_dev);
771 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
773 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
775 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
776 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
777 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
783 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
786 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
789 for (i = 0; i < dev->nb_event_ports; i++) {
791 struct cn9k_sso_hws_dual *dws =
792 event_dev->data->ports[i];
793 dws->lookup_mem = lookup_mem;
794 dws->tstamp = tstmp_info;
796 struct cn9k_sso_hws *ws = event_dev->data->ports[i];
797 ws->lookup_mem = lookup_mem;
798 ws->tstamp = tstmp_info;
804 cn9k_sso_rx_adapter_queue_add(
805 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
807 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
809 struct cn9k_eth_rxq *rxq;
814 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
818 rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
823 rxq = eth_dev->data->rx_queues[0];
824 lookup_mem = rxq->lookup_mem;
825 tstmp_info = rxq->tstamp;
826 cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
827 cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
833 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
834 const struct rte_eth_dev *eth_dev,
839 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
843 return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
847 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
848 const struct rte_eth_dev *eth_dev, uint32_t *caps)
853 ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
857 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
863 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
866 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
867 struct cn9k_eth_txq *txq;
868 struct roc_nix_sq *sq;
871 if (tx_queue_id < 0) {
872 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
873 cn9k_sso_txq_fc_update(eth_dev, i, ena);
877 sq = &cnxk_eth_dev->sqs[tx_queue_id];
878 txq = eth_dev->data->tx_queues[tx_queue_id];
880 ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
882 txq->nb_sqb_bufs_adj =
884 RTE_ALIGN_MUL_CEIL(sq_limit,
885 (1ULL << txq->sqes_per_sqb_log2)) /
886 (1ULL << txq->sqes_per_sqb_log2);
887 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
892 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
893 const struct rte_eth_dev *eth_dev,
899 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
902 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
903 rc = cn9k_sso_updt_tx_adptr_data(event_dev);
906 cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
912 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
913 const struct rte_eth_dev *eth_dev,
919 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
922 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
923 return cn9k_sso_updt_tx_adptr_data(event_dev);
926 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
927 .dev_infos_get = cn9k_sso_info_get,
928 .dev_configure = cn9k_sso_dev_configure,
929 .queue_def_conf = cnxk_sso_queue_def_conf,
930 .queue_setup = cnxk_sso_queue_setup,
931 .queue_release = cnxk_sso_queue_release,
932 .port_def_conf = cnxk_sso_port_def_conf,
933 .port_setup = cn9k_sso_port_setup,
934 .port_release = cn9k_sso_port_release,
935 .port_link = cn9k_sso_port_link,
936 .port_unlink = cn9k_sso_port_unlink,
937 .timeout_ticks = cnxk_sso_timeout_ticks,
939 .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
940 .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
941 .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
942 .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
943 .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
945 .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
946 .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
947 .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
949 .timer_adapter_caps_get = cnxk_tim_caps_get,
951 .dump = cnxk_sso_dump,
952 .dev_start = cn9k_sso_start,
953 .dev_stop = cn9k_sso_stop,
954 .dev_close = cn9k_sso_close,
955 .dev_selftest = cn9k_sso_selftest,
959 cn9k_sso_init(struct rte_eventdev *event_dev)
961 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
964 if (RTE_CACHE_LINE_SIZE != 128) {
965 plt_err("Driver not compiled for CN9K");
971 plt_err("Failed to initialize platform model");
975 event_dev->dev_ops = &cn9k_sso_dev_ops;
976 /* For secondary processes, the primary has done all the work */
977 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
978 cn9k_sso_fp_fns_set(event_dev);
982 rc = cnxk_sso_init(event_dev);
986 cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
987 if (!dev->max_event_ports || !dev->max_event_queues) {
988 plt_err("Not enough eventdev resource queues=%d ports=%d",
989 dev->max_event_queues, dev->max_event_ports);
990 cnxk_sso_fini(event_dev);
994 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
995 event_dev->data->name, dev->max_event_queues,
996 dev->max_event_ports);
1002 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1004 return rte_event_pmd_pci_probe(
1005 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1008 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1014 static struct rte_pci_driver cn9k_pci_sso = {
1015 .id_table = cn9k_pci_sso_map,
1016 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1017 .probe = cn9k_sso_probe,
1018 .remove = cnxk_sso_remove,
1021 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1022 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1023 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1024 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1025 CNXK_SSO_GGRP_QOS "=<string>"
1026 CNXK_SSO_FORCE_BP "=1"
1027 CN9K_SSO_SINGLE_WS "=1"
1028 CNXK_TIM_DISABLE_NPA "=1"
1029 CNXK_TIM_CHNK_SLOTS "=<int>"
1030 CNXK_TIM_RINGS_LMT "=<int>"
1031 CNXK_TIM_STATS_ENA "=1");