1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN9K_DUAL_WS_NB_WS 2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
13 deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
15 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
16 (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] \
17 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] \
18 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] \
19 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] \
20 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] \
21 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
22 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
25 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
27 struct cnxk_sso_evdev *dev = arg;
28 struct cn9k_sso_hws_dual *dws;
29 struct cn9k_sso_hws *ws;
34 rc = roc_sso_hws_link(&dev->sso,
35 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
37 rc |= roc_sso_hws_link(&dev->sso,
38 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
42 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
49 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
51 struct cnxk_sso_evdev *dev = arg;
52 struct cn9k_sso_hws_dual *dws;
53 struct cn9k_sso_hws *ws;
58 rc = roc_sso_hws_unlink(&dev->sso,
59 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
61 rc |= roc_sso_hws_unlink(&dev->sso,
62 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
66 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
73 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
75 struct cnxk_sso_evdev *dev = arg;
76 struct cn9k_sso_hws_dual *dws;
77 struct cn9k_sso_hws *ws;
80 /* Set get_work tmo for HWS */
81 val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
84 dws->grp_base = grp_base;
85 dws->fc_mem = (uint64_t *)dev->fc_iova;
86 dws->xaq_lmt = dev->xaq_lmt;
88 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
92 ws->grp_base = grp_base;
93 ws->fc_mem = (uint64_t *)dev->fc_iova;
94 ws->xaq_lmt = dev->xaq_lmt;
96 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
101 cn9k_sso_hws_release(void *arg, void *hws)
103 struct cnxk_sso_evdev *dev = arg;
104 struct cn9k_sso_hws_dual *dws;
105 struct cn9k_sso_hws *ws;
110 for (i = 0; i < dev->nb_event_queues; i++) {
111 roc_sso_hws_unlink(&dev->sso,
112 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114 roc_sso_hws_unlink(&dev->sso,
115 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
118 memset(dws, 0, sizeof(*dws));
121 for (i = 0; i < dev->nb_event_queues; i++)
122 roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124 memset(ws, 0, sizeof(*ws));
129 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
130 cnxk_handle_event_t fn, void *arg)
132 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
133 struct cn9k_sso_hws_dual *dws;
134 struct cn9k_sso_hws *ws;
135 uint64_t cq_ds_cnt = 1;
142 plt_write64(0, base + SSO_LF_GGRP_QCTL);
144 req = queue_id; /* GGRP ID */
145 req |= BIT_ULL(18); /* Grouped */
146 req |= BIT_ULL(16); /* WAIT */
148 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
149 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
150 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
151 cq_ds_cnt &= 0x3FFF3FFF0000;
155 ws_base = dws->base[0];
161 while (aq_cnt || cq_ds_cnt || ds_cnt) {
162 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
163 cn9k_sso_hws_get_work_empty(ws_base, &ev);
164 if (fn != NULL && ev.u64 != 0)
166 if (ev.sched_type != SSO_TT_EMPTY)
167 cnxk_sso_hws_swtag_flush(
168 ws_base + SSOW_LF_GWS_TAG,
169 ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
171 val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
172 } while (val & BIT_ULL(56));
173 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
174 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
175 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
176 /* Extract cq and ds count */
177 cq_ds_cnt &= 0x3FFF3FFF0000;
180 plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
184 cn9k_sso_hws_reset(void *arg, void *hws)
186 struct cnxk_sso_evdev *dev = arg;
187 struct cn9k_sso_hws_dual *dws;
188 struct cn9k_sso_hws *ws;
197 for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
198 base = dev->dual_ws ? dws->base[i] : ws->base;
199 /* Wait till getwork/swtp/waitw/desched completes. */
201 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
202 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
205 tag = plt_read64(base + SSOW_LF_GWS_TAG);
206 pend_tt = (tag >> 32) & 0x3;
207 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
208 if (pend_tt == SSO_TT_ATOMIC ||
209 pend_tt == SSO_TT_ORDERED)
210 cnxk_sso_hws_swtag_untag(
211 base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
212 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
215 /* Wait for desched to complete. */
217 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
218 } while (pend_state & BIT_ULL(58));
223 cn9k_sso_set_rsrc(void *arg)
225 struct cnxk_sso_evdev *dev = arg;
228 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
230 dev->max_event_ports = dev->sso.max_hws;
231 dev->max_event_queues =
232 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
233 RTE_EVENT_MAX_QUEUES_PER_DEV :
238 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
240 struct cnxk_sso_evdev *dev = arg;
243 hws = hws * CN9K_DUAL_WS_NB_WS;
245 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
249 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
251 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
254 if (dev->tx_adptr_data == NULL)
257 for (i = 0; i < dev->nb_event_ports; i++) {
259 struct cn9k_sso_hws_dual *dws =
260 event_dev->data->ports[i];
263 ws_cookie = cnxk_sso_hws_get_cookie(dws);
264 ws_cookie = rte_realloc_socket(
266 sizeof(struct cnxk_sso_hws_cookie) +
267 sizeof(struct cn9k_sso_hws_dual) +
269 (dev->max_port_id + 1) *
270 RTE_MAX_QUEUES_PER_PORT),
271 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
272 if (ws_cookie == NULL)
274 dws = RTE_PTR_ADD(ws_cookie,
275 sizeof(struct cnxk_sso_hws_cookie));
276 memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
277 sizeof(uint64_t) * (dev->max_port_id + 1) *
278 RTE_MAX_QUEUES_PER_PORT);
279 event_dev->data->ports[i] = dws;
281 struct cn9k_sso_hws *ws = event_dev->data->ports[i];
284 ws_cookie = cnxk_sso_hws_get_cookie(ws);
285 ws_cookie = rte_realloc_socket(
287 sizeof(struct cnxk_sso_hws_cookie) +
288 sizeof(struct cn9k_sso_hws_dual) +
290 (dev->max_port_id + 1) *
291 RTE_MAX_QUEUES_PER_PORT),
292 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
293 if (ws_cookie == NULL)
295 ws = RTE_PTR_ADD(ws_cookie,
296 sizeof(struct cnxk_sso_hws_cookie));
297 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
298 sizeof(uint64_t) * (dev->max_port_id + 1) *
299 RTE_MAX_QUEUES_PER_PORT);
300 event_dev->data->ports[i] = ws;
309 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
311 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
312 /* Single WS modes */
313 const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
314 #define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
315 NIX_RX_FASTPATH_MODES
319 const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
320 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
321 NIX_RX_FASTPATH_MODES
325 const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
326 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
327 NIX_RX_FASTPATH_MODES
331 const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
332 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
333 NIX_RX_FASTPATH_MODES
337 const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
338 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_##name,
339 NIX_RX_FASTPATH_MODES
343 const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
344 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_burst_##name,
345 NIX_RX_FASTPATH_MODES
349 const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
350 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_##name,
351 NIX_RX_FASTPATH_MODES
355 const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
356 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_burst_##name,
357 NIX_RX_FASTPATH_MODES
361 const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
362 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
363 NIX_RX_FASTPATH_MODES
367 const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
368 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
369 NIX_RX_FASTPATH_MODES
373 const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
374 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
375 NIX_RX_FASTPATH_MODES
379 const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
380 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
381 NIX_RX_FASTPATH_MODES
385 const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
386 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_##name,
387 NIX_RX_FASTPATH_MODES
391 const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
392 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_burst_##name,
393 NIX_RX_FASTPATH_MODES
397 const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
398 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_##name,
399 NIX_RX_FASTPATH_MODES
403 const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
404 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_burst_##name,
405 NIX_RX_FASTPATH_MODES
410 const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
411 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
412 NIX_RX_FASTPATH_MODES
416 const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = {
417 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
418 NIX_RX_FASTPATH_MODES
422 const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
423 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
424 NIX_RX_FASTPATH_MODES
428 const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
429 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
430 NIX_RX_FASTPATH_MODES
434 const event_dequeue_t sso_hws_dual_deq_ca[NIX_RX_OFFLOAD_MAX] = {
435 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_##name,
436 NIX_RX_FASTPATH_MODES
440 const event_dequeue_burst_t sso_hws_dual_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
441 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_burst_##name,
442 NIX_RX_FASTPATH_MODES
446 const event_dequeue_t sso_hws_dual_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
447 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_##name,
448 NIX_RX_FASTPATH_MODES
452 const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
453 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_burst_##name,
454 NIX_RX_FASTPATH_MODES
458 const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
459 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
460 NIX_RX_FASTPATH_MODES
464 const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
465 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
466 NIX_RX_FASTPATH_MODES
470 const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
471 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
472 NIX_RX_FASTPATH_MODES
476 const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
477 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
478 NIX_RX_FASTPATH_MODES
482 const event_dequeue_t sso_hws_dual_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
483 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_##name,
484 NIX_RX_FASTPATH_MODES
488 const event_dequeue_burst_t sso_hws_dual_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
489 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
490 NIX_RX_FASTPATH_MODES
494 const event_dequeue_t sso_hws_dual_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
495 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_##name,
496 NIX_RX_FASTPATH_MODES
500 const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
501 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name,
502 NIX_RX_FASTPATH_MODES
507 const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
508 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
509 [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
510 NIX_TX_FASTPATH_MODES
514 const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
515 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
516 [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
517 NIX_TX_FASTPATH_MODES
521 const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
522 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
523 [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
524 NIX_TX_FASTPATH_MODES
528 const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
529 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
530 [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
531 NIX_TX_FASTPATH_MODES
535 event_dev->enqueue = cn9k_sso_hws_enq;
536 event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
537 event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
538 event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
539 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
540 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
541 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
542 sso_hws_deq_seg_burst);
543 if (dev->is_timeout_deq) {
544 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
545 sso_hws_deq_tmo_seg);
546 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
547 sso_hws_deq_tmo_seg_burst);
549 if (dev->is_ca_internal_port) {
550 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
552 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
553 sso_hws_deq_ca_seg_burst);
556 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
557 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558 sso_hws_deq_tmo_ca_seg);
559 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560 sso_hws_deq_tmo_ca_seg_burst);
563 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
564 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
566 if (dev->is_timeout_deq) {
567 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
569 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
570 sso_hws_deq_tmo_burst);
572 if (dev->is_ca_internal_port) {
573 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
575 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
576 sso_hws_deq_ca_burst);
579 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
580 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
582 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
583 sso_hws_deq_tmo_ca_burst);
586 event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
588 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
589 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
590 sso_hws_tx_adptr_enq_seg);
592 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
593 sso_hws_tx_adptr_enq);
596 event_dev->enqueue = cn9k_sso_hws_dual_enq;
597 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
598 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
599 event_dev->enqueue_forward_burst =
600 cn9k_sso_hws_dual_enq_fwd_burst;
601 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
603 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
604 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
605 sso_hws_dual_deq_seg);
606 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
607 sso_hws_dual_deq_seg_burst);
608 if (dev->is_timeout_deq) {
609 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
610 sso_hws_dual_deq_tmo_seg);
611 CN9K_SET_EVDEV_DEQ_OP(
612 dev, event_dev->dequeue_burst,
613 sso_hws_dual_deq_tmo_seg_burst);
615 if (dev->is_ca_internal_port) {
616 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
617 sso_hws_dual_deq_ca_seg);
618 CN9K_SET_EVDEV_DEQ_OP(
619 dev, event_dev->dequeue_burst,
620 sso_hws_dual_deq_ca_seg_burst);
622 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
623 CN9K_SET_EVDEV_DEQ_OP(
624 dev, event_dev->dequeue,
625 sso_hws_dual_deq_tmo_ca_seg);
626 CN9K_SET_EVDEV_DEQ_OP(
627 dev, event_dev->dequeue_burst,
628 sso_hws_dual_deq_tmo_ca_seg_burst);
631 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
633 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
634 sso_hws_dual_deq_burst);
635 if (dev->is_timeout_deq) {
636 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
637 sso_hws_dual_deq_tmo);
638 CN9K_SET_EVDEV_DEQ_OP(
639 dev, event_dev->dequeue_burst,
640 sso_hws_dual_deq_tmo_burst);
642 if (dev->is_ca_internal_port) {
643 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
644 sso_hws_dual_deq_ca);
645 CN9K_SET_EVDEV_DEQ_OP(
646 dev, event_dev->dequeue_burst,
647 sso_hws_dual_deq_ca_burst);
649 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
650 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
651 sso_hws_dual_deq_tmo_ca);
652 CN9K_SET_EVDEV_DEQ_OP(
653 dev, event_dev->dequeue_burst,
654 sso_hws_dual_deq_tmo_ca_burst);
658 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
659 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
660 sso_hws_dual_tx_adptr_enq_seg);
662 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
663 sso_hws_dual_tx_adptr_enq);
666 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
671 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
673 struct cnxk_sso_evdev *dev = arg;
674 struct cn9k_sso_hws_dual *dws;
675 struct cn9k_sso_hws *ws;
679 dws = rte_zmalloc("cn9k_dual_ws",
680 sizeof(struct cn9k_sso_hws_dual) +
682 RTE_CACHE_LINE_SIZE);
684 plt_err("Failed to alloc memory for port=%d", port_id);
688 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
689 dws->base[0] = roc_sso_hws_base_get(
690 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
691 dws->base[1] = roc_sso_hws_base_get(
692 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
693 dws->hws_id = port_id;
699 /* Allocate event port memory */
700 ws = rte_zmalloc("cn9k_ws",
701 sizeof(struct cn9k_sso_hws) +
703 RTE_CACHE_LINE_SIZE);
705 plt_err("Failed to alloc memory for port=%d", port_id);
709 /* First cache line is reserved for cookie */
710 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
711 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
712 ws->hws_id = port_id;
722 cn9k_sso_info_get(struct rte_eventdev *event_dev,
723 struct rte_event_dev_info *dev_info)
725 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
727 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
728 cnxk_sso_info_get(dev, dev_info);
732 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
734 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
737 rc = cnxk_sso_dev_validate(event_dev);
739 plt_err("Invalid event device configuration");
743 rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
745 plt_err("Failed to initialize SSO resources");
749 rc = cnxk_sso_xaq_allocate(dev);
753 rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
758 /* Restore any prior port-queue mapping. */
759 cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
766 roc_sso_rsrc_fini(&dev->sso);
767 dev->nb_event_ports = 0;
772 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
773 const struct rte_event_port_conf *port_conf)
776 RTE_SET_USED(port_conf);
777 return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
781 cn9k_sso_port_release(void *port)
783 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
784 struct cnxk_sso_evdev *dev;
789 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
790 if (!gws_cookie->configured)
793 cn9k_sso_hws_release(dev, port);
794 memset(gws_cookie, 0, sizeof(*gws_cookie));
796 rte_free(gws_cookie);
800 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
801 const uint8_t queues[], const uint8_t priorities[],
804 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
805 uint16_t hwgrp_ids[nb_links];
808 RTE_SET_USED(priorities);
809 for (link = 0; link < nb_links; link++)
810 hwgrp_ids[link] = queues[link];
811 nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
813 return (int)nb_links;
817 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
818 uint8_t queues[], uint16_t nb_unlinks)
820 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
821 uint16_t hwgrp_ids[nb_unlinks];
824 for (unlink = 0; unlink < nb_unlinks; unlink++)
825 hwgrp_ids[unlink] = queues[unlink];
826 nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
828 return (int)nb_unlinks;
832 cn9k_sso_start(struct rte_eventdev *event_dev)
836 rc = cn9k_sso_updt_tx_adptr_data(event_dev);
840 rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
841 cn9k_sso_hws_flush_events);
845 cn9k_sso_fp_fns_set(event_dev);
851 cn9k_sso_stop(struct rte_eventdev *event_dev)
853 cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
857 cn9k_sso_close(struct rte_eventdev *event_dev)
859 return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
863 cn9k_sso_selftest(void)
865 return cnxk_sso_selftest(RTE_STR(event_cn9k));
869 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
870 const struct rte_eth_dev *eth_dev, uint32_t *caps)
874 RTE_SET_USED(event_dev);
875 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
877 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
879 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
880 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
881 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
887 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
890 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
893 for (i = 0; i < dev->nb_event_ports; i++) {
895 struct cn9k_sso_hws_dual *dws =
896 event_dev->data->ports[i];
897 dws->lookup_mem = lookup_mem;
898 dws->tstamp = tstmp_info;
900 struct cn9k_sso_hws *ws = event_dev->data->ports[i];
901 ws->lookup_mem = lookup_mem;
902 ws->tstamp = tstmp_info;
908 cn9k_sso_rx_adapter_queue_add(
909 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
911 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
913 struct cn9k_eth_rxq *rxq;
918 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
922 rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
927 rxq = eth_dev->data->rx_queues[0];
928 lookup_mem = rxq->lookup_mem;
929 tstmp_info = rxq->tstamp;
930 cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
931 cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
937 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
938 const struct rte_eth_dev *eth_dev,
943 rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
947 return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
951 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
952 const struct rte_eth_dev *eth_dev, uint32_t *caps)
957 ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
961 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
967 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
970 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
971 struct cn9k_eth_txq *txq;
972 struct roc_nix_sq *sq;
975 if (tx_queue_id < 0) {
976 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
977 cn9k_sso_txq_fc_update(eth_dev, i, ena);
981 sq = &cnxk_eth_dev->sqs[tx_queue_id];
982 txq = eth_dev->data->tx_queues[tx_queue_id];
984 ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
986 txq->nb_sqb_bufs_adj =
988 RTE_ALIGN_MUL_CEIL(sq_limit,
989 (1ULL << txq->sqes_per_sqb_log2)) /
990 (1ULL << txq->sqes_per_sqb_log2);
991 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
996 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
997 const struct rte_eth_dev *eth_dev,
1003 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
1006 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
1007 rc = cn9k_sso_updt_tx_adptr_data(event_dev);
1010 cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1016 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1017 const struct rte_eth_dev *eth_dev,
1018 int32_t tx_queue_id)
1023 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1026 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1027 return cn9k_sso_updt_tx_adptr_data(event_dev);
1031 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1032 const struct rte_cryptodev *cdev, uint32_t *caps)
1034 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1035 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1037 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1038 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1044 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1045 const struct rte_cryptodev *cdev,
1046 int32_t queue_pair_id, const struct rte_event *event)
1048 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1050 RTE_SET_USED(event);
1052 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1053 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1055 dev->is_ca_internal_port = 1;
1056 cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1058 return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1062 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1063 const struct rte_cryptodev *cdev,
1064 int32_t queue_pair_id)
1066 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1067 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1069 return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1072 static struct eventdev_ops cn9k_sso_dev_ops = {
1073 .dev_infos_get = cn9k_sso_info_get,
1074 .dev_configure = cn9k_sso_dev_configure,
1075 .queue_def_conf = cnxk_sso_queue_def_conf,
1076 .queue_setup = cnxk_sso_queue_setup,
1077 .queue_release = cnxk_sso_queue_release,
1078 .port_def_conf = cnxk_sso_port_def_conf,
1079 .port_setup = cn9k_sso_port_setup,
1080 .port_release = cn9k_sso_port_release,
1081 .port_link = cn9k_sso_port_link,
1082 .port_unlink = cn9k_sso_port_unlink,
1083 .timeout_ticks = cnxk_sso_timeout_ticks,
1085 .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1086 .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1087 .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1088 .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1089 .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1091 .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1092 .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1093 .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1095 .timer_adapter_caps_get = cnxk_tim_caps_get,
1097 .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1098 .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1099 .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1101 .dump = cnxk_sso_dump,
1102 .dev_start = cn9k_sso_start,
1103 .dev_stop = cn9k_sso_stop,
1104 .dev_close = cn9k_sso_close,
1105 .dev_selftest = cn9k_sso_selftest,
1109 cn9k_sso_init(struct rte_eventdev *event_dev)
1111 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1114 if (RTE_CACHE_LINE_SIZE != 128) {
1115 plt_err("Driver not compiled for CN9K");
1119 rc = roc_plt_init();
1121 plt_err("Failed to initialize platform model");
1125 event_dev->dev_ops = &cn9k_sso_dev_ops;
1126 /* For secondary processes, the primary has done all the work */
1127 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1128 cn9k_sso_fp_fns_set(event_dev);
1132 rc = cnxk_sso_init(event_dev);
1136 cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1137 if (!dev->max_event_ports || !dev->max_event_queues) {
1138 plt_err("Not enough eventdev resource queues=%d ports=%d",
1139 dev->max_event_queues, dev->max_event_ports);
1140 cnxk_sso_fini(event_dev);
1144 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1145 event_dev->data->name, dev->max_event_queues,
1146 dev->max_event_ports);
1152 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1154 return rte_event_pmd_pci_probe(
1155 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1158 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1159 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1160 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1161 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1162 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1163 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1164 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1165 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1166 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1167 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1168 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1174 static struct rte_pci_driver cn9k_pci_sso = {
1175 .id_table = cn9k_pci_sso_map,
1176 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1177 .probe = cn9k_sso_probe,
1178 .remove = cnxk_sso_remove,
1181 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1182 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1183 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1184 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1185 CNXK_SSO_GGRP_QOS "=<string>"
1186 CNXK_SSO_FORCE_BP "=1"
1187 CN9K_SSO_SINGLE_WS "=1"
1188 CNXK_TIM_DISABLE_NPA "=1"
1189 CNXK_TIM_CHNK_SLOTS "=<int>"
1190 CNXK_TIM_RINGS_LMT "=<int>"
1191 CNXK_TIM_STATS_ENA "=1");