1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
10 deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
12 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
13 enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
16 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
23 switch (dev->gw_mode) {
24 case CN10K_GW_MODE_NONE:
27 case CN10K_GW_MODE_PREF:
30 case CN10K_GW_MODE_PREF_WFE:
31 wdata |= BIT(20) | BIT(19);
39 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
41 struct cnxk_sso_evdev *dev = arg;
42 struct cn10k_sso_hws *ws;
44 /* Allocate event port memory */
45 ws = rte_zmalloc("cn10k_ws",
46 sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
49 plt_err("Failed to alloc memory for port=%d", port_id);
53 /* First cache line is reserved for cookie */
54 ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
55 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
58 ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
59 ws->lmt_base = dev->sso.lmt_base;
65 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 struct cnxk_sso_evdev *dev = arg;
68 struct cn10k_sso_hws *ws = port;
70 return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
74 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
76 struct cnxk_sso_evdev *dev = arg;
77 struct cn10k_sso_hws *ws = port;
79 return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
83 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
85 struct cnxk_sso_evdev *dev = arg;
86 struct cn10k_sso_hws *ws = hws;
89 ws->grp_base = grp_base;
90 ws->fc_mem = (uint64_t *)dev->fc_iova;
91 ws->xaq_lmt = dev->xaq_lmt;
93 /* Set get_work timeout for HWS */
94 val = NSEC2USEC(dev->deq_tmo_ns);
95 val = val ? val - 1 : 0;
96 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
100 cn10k_sso_hws_release(void *arg, void *hws)
102 struct cnxk_sso_evdev *dev = arg;
103 struct cn10k_sso_hws *ws = hws;
106 for (i = 0; i < dev->nb_event_queues; i++)
107 roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
108 memset(ws, 0, sizeof(*ws));
112 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
113 cnxk_handle_event_t fn, void *arg)
115 struct cn10k_sso_hws *ws = hws;
116 uint64_t cq_ds_cnt = 1;
122 plt_write64(0, base + SSO_LF_GGRP_QCTL);
124 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
125 req = queue_id; /* GGRP ID */
126 req |= BIT_ULL(18); /* Grouped */
127 req |= BIT_ULL(16); /* WAIT */
129 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
130 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
131 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
132 cq_ds_cnt &= 0x3FFF3FFF0000;
134 while (aq_cnt || cq_ds_cnt || ds_cnt) {
135 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
136 cn10k_sso_hws_get_work_empty(ws, &ev);
137 if (fn != NULL && ev.u64 != 0)
139 if (ev.sched_type != SSO_TT_EMPTY)
140 cnxk_sso_hws_swtag_flush(
141 ws->base + SSOW_LF_GWS_WQE0,
142 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
144 val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
145 } while (val & BIT_ULL(56));
146 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
147 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
148 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
149 /* Extract cq and ds count */
150 cq_ds_cnt &= 0x3FFF3FFF0000;
153 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
158 cn10k_sso_hws_reset(void *arg, void *hws)
160 struct cnxk_sso_evdev *dev = arg;
161 struct cn10k_sso_hws *ws = hws;
162 uintptr_t base = ws->base;
170 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
171 /* Wait till getwork/swtp/waitw/desched completes. */
173 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
174 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
175 BIT_ULL(56) | BIT_ULL(54)));
176 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
177 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
178 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
179 cnxk_sso_hws_swtag_untag(base +
180 SSOW_LF_GWS_OP_SWTAG_UNTAG);
181 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
184 /* Wait for desched to complete. */
186 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
187 } while (pend_state & BIT_ULL(58));
189 switch (dev->gw_mode) {
190 case CN10K_GW_MODE_PREF:
191 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
194 case CN10K_GW_MODE_PREF_WFE:
195 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
196 SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
198 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
200 case CN10K_GW_MODE_NONE:
205 if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
207 plt_write64(BIT_ULL(16) | 1,
208 ws->base + SSOW_LF_GWS_OP_GET_WORK0);
210 roc_load_pair(gw.u64[0], gw.u64[1],
211 ws->base + SSOW_LF_GWS_WQE0);
212 } while (gw.u64[0] & BIT_ULL(63));
213 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
214 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
215 if (pend_tt == SSO_TT_ATOMIC ||
216 pend_tt == SSO_TT_ORDERED)
217 cnxk_sso_hws_swtag_untag(
218 base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
219 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
223 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
228 cn10k_sso_set_rsrc(void *arg)
230 struct cnxk_sso_evdev *dev = arg;
232 dev->max_event_ports = dev->sso.max_hws;
233 dev->max_event_queues =
234 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
235 RTE_EVENT_MAX_QUEUES_PER_DEV :
240 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
242 struct cnxk_sso_evdev *dev = arg;
244 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
248 cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
250 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
253 if (dev->tx_adptr_data == NULL)
256 for (i = 0; i < dev->nb_event_ports; i++) {
257 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
260 ws_cookie = cnxk_sso_hws_get_cookie(ws);
261 ws_cookie = rte_realloc_socket(
263 sizeof(struct cnxk_sso_hws_cookie) +
264 sizeof(struct cn10k_sso_hws) +
265 dev->tx_adptr_data_sz,
266 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
267 if (ws_cookie == NULL)
269 ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
270 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
271 dev->tx_adptr_data_sz);
272 event_dev->data->ports[i] = ws;
279 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
281 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
282 const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
283 #define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
284 NIX_RX_FASTPATH_MODES
288 const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
289 #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
290 NIX_RX_FASTPATH_MODES
294 const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
295 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
296 NIX_RX_FASTPATH_MODES
300 const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
301 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
302 NIX_RX_FASTPATH_MODES
306 const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
307 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
308 NIX_RX_FASTPATH_MODES
312 const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
313 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
314 NIX_RX_FASTPATH_MODES
318 const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
319 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
320 NIX_RX_FASTPATH_MODES
324 const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
325 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
326 NIX_RX_FASTPATH_MODES
330 const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
331 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
333 NIX_RX_FASTPATH_MODES
337 const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
338 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
339 NIX_RX_FASTPATH_MODES
343 const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
344 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
345 NIX_RX_FASTPATH_MODES
349 const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
350 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
351 NIX_RX_FASTPATH_MODES
355 const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
356 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
357 NIX_RX_FASTPATH_MODES
361 const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
362 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
363 NIX_RX_FASTPATH_MODES
367 const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
368 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
369 NIX_RX_FASTPATH_MODES
373 const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
374 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
375 NIX_RX_FASTPATH_MODES
380 const event_tx_adapter_enqueue_t
381 sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
382 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
383 NIX_TX_FASTPATH_MODES
387 const event_tx_adapter_enqueue_t
388 sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
389 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
390 NIX_TX_FASTPATH_MODES
394 event_dev->enqueue = cn10k_sso_hws_enq;
395 event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
396 event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
397 event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
398 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
399 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
401 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
402 sso_hws_deq_seg_burst);
403 if (dev->is_timeout_deq) {
404 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
405 sso_hws_deq_tmo_seg);
406 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
407 sso_hws_deq_tmo_seg_burst);
409 if (dev->is_ca_internal_port) {
410 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
412 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
413 sso_hws_deq_ca_seg_burst);
415 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
416 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
417 sso_hws_deq_tmo_ca_seg);
418 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
419 sso_hws_deq_tmo_ca_seg_burst);
422 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
423 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
425 if (dev->is_timeout_deq) {
426 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
428 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
429 sso_hws_deq_tmo_burst);
431 if (dev->is_ca_internal_port) {
432 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
434 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
435 sso_hws_deq_ca_burst);
437 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
438 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
440 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
441 sso_hws_deq_tmo_ca_burst);
444 event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
446 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
447 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
448 sso_hws_tx_adptr_enq_seg);
450 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
451 sso_hws_tx_adptr_enq);
453 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
457 cn10k_sso_info_get(struct rte_eventdev *event_dev,
458 struct rte_event_dev_info *dev_info)
460 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
462 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
463 cnxk_sso_info_get(dev, dev_info);
467 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
469 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
472 rc = cnxk_sso_dev_validate(event_dev);
474 plt_err("Invalid event device configuration");
478 rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
479 dev->nb_event_queues);
481 plt_err("Failed to initialize SSO resources");
485 rc = cnxk_sso_xaq_allocate(dev);
489 rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
490 cn10k_sso_hws_setup);
494 /* Restore any prior port-queue mapping. */
495 cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
502 roc_sso_rsrc_fini(&dev->sso);
503 dev->nb_event_ports = 0;
508 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
509 const struct rte_event_port_conf *port_conf)
512 RTE_SET_USED(port_conf);
513 return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
517 cn10k_sso_port_release(void *port)
519 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
520 struct cnxk_sso_evdev *dev;
525 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
526 if (!gws_cookie->configured)
529 cn10k_sso_hws_release(dev, port);
530 memset(gws_cookie, 0, sizeof(*gws_cookie));
532 rte_free(gws_cookie);
536 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
537 const uint8_t queues[], const uint8_t priorities[],
540 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
541 uint16_t hwgrp_ids[nb_links];
544 RTE_SET_USED(priorities);
545 for (link = 0; link < nb_links; link++)
546 hwgrp_ids[link] = queues[link];
547 nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
549 return (int)nb_links;
553 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
554 uint8_t queues[], uint16_t nb_unlinks)
556 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
557 uint16_t hwgrp_ids[nb_unlinks];
560 for (unlink = 0; unlink < nb_unlinks; unlink++)
561 hwgrp_ids[unlink] = queues[unlink];
562 nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
564 return (int)nb_unlinks;
568 cn10k_sso_start(struct rte_eventdev *event_dev)
572 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
576 rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
577 cn10k_sso_hws_flush_events);
580 cn10k_sso_fp_fns_set(event_dev);
586 cn10k_sso_stop(struct rte_eventdev *event_dev)
588 cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
589 cn10k_sso_hws_flush_events);
593 cn10k_sso_close(struct rte_eventdev *event_dev)
595 return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
599 cn10k_sso_selftest(void)
601 return cnxk_sso_selftest(RTE_STR(event_cn10k));
605 cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
606 const struct rte_eth_dev *eth_dev, uint32_t *caps)
610 RTE_SET_USED(event_dev);
611 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
613 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
615 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
616 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
617 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
618 RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
624 cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
627 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
630 for (i = 0; i < dev->nb_event_ports; i++) {
631 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
632 ws->lookup_mem = lookup_mem;
633 ws->tstamp = tstmp_info;
638 cn10k_sso_rx_adapter_queue_add(
639 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
641 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
643 struct cn10k_eth_rxq *rxq;
648 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
652 rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
656 rxq = eth_dev->data->rx_queues[0];
657 lookup_mem = rxq->lookup_mem;
658 tstmp_info = rxq->tstamp;
659 cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
660 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
666 cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
667 const struct rte_eth_dev *eth_dev,
672 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
676 return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
680 cn10k_sso_rx_adapter_vector_limits(
681 const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
682 struct rte_event_eth_rx_adapter_vector_limits *limits)
684 struct cnxk_eth_dev *cnxk_eth_dev;
688 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
692 cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
693 limits->log2_sz = true;
694 limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
695 limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
696 limits->min_timeout_ns =
697 (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
698 limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
704 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
705 const struct rte_eth_dev *eth_dev, uint32_t *caps)
710 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
714 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
715 RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
721 cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
723 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
724 struct cn10k_eth_txq *txq;
725 struct roc_nix_sq *sq;
728 if (tx_queue_id < 0) {
729 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
730 cn10k_sso_txq_fc_update(eth_dev, i);
732 uint16_t sqes_per_sqb;
734 sq = &cnxk_eth_dev->sqs[tx_queue_id];
735 txq = eth_dev->data->tx_queues[tx_queue_id];
736 sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
737 sq->nb_sqb_bufs_adj =
739 RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
741 if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
742 sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
744 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
745 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
750 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
751 const struct rte_eth_dev *eth_dev,
754 struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
755 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
756 uint64_t tx_offloads;
760 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
764 /* Can't enable tstamp if all the ports don't have it enabled. */
765 tx_offloads = cnxk_eth_dev->tx_offload_flags;
766 if (dev->tx_adptr_configured) {
767 uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
769 !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
771 if (tstmp_ena && !tstmp_req)
772 dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
773 else if (!tstmp_ena && tstmp_req)
774 tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
777 dev->tx_offloads |= tx_offloads;
778 cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
779 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
782 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
783 dev->tx_adptr_configured = 1;
789 cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
790 const struct rte_eth_dev *eth_dev,
796 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
799 return cn10k_sso_updt_tx_adptr_data(event_dev);
803 cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
804 const struct rte_cryptodev *cdev, uint32_t *caps)
806 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
807 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
809 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
810 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
816 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
817 const struct rte_cryptodev *cdev,
818 int32_t queue_pair_id,
819 const struct rte_event *event)
821 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
825 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
826 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
828 dev->is_ca_internal_port = 1;
829 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
831 return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
835 cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
836 const struct rte_cryptodev *cdev,
837 int32_t queue_pair_id)
839 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
840 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
842 return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
845 static struct eventdev_ops cn10k_sso_dev_ops = {
846 .dev_infos_get = cn10k_sso_info_get,
847 .dev_configure = cn10k_sso_dev_configure,
848 .queue_def_conf = cnxk_sso_queue_def_conf,
849 .queue_setup = cnxk_sso_queue_setup,
850 .queue_release = cnxk_sso_queue_release,
851 .port_def_conf = cnxk_sso_port_def_conf,
852 .port_setup = cn10k_sso_port_setup,
853 .port_release = cn10k_sso_port_release,
854 .port_link = cn10k_sso_port_link,
855 .port_unlink = cn10k_sso_port_unlink,
856 .timeout_ticks = cnxk_sso_timeout_ticks,
858 .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
859 .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
860 .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
861 .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
862 .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
864 .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
866 .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
867 .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
868 .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
870 .timer_adapter_caps_get = cnxk_tim_caps_get,
872 .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
873 .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
874 .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
876 .dump = cnxk_sso_dump,
877 .dev_start = cn10k_sso_start,
878 .dev_stop = cn10k_sso_stop,
879 .dev_close = cn10k_sso_close,
880 .dev_selftest = cn10k_sso_selftest,
884 cn10k_sso_init(struct rte_eventdev *event_dev)
886 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
889 if (RTE_CACHE_LINE_SIZE != 64) {
890 plt_err("Driver not compiled for CN10K");
896 plt_err("Failed to initialize platform model");
900 event_dev->dev_ops = &cn10k_sso_dev_ops;
901 /* For secondary processes, the primary has done all the work */
902 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
903 cn10k_sso_fp_fns_set(event_dev);
907 rc = cnxk_sso_init(event_dev);
911 cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
912 if (!dev->max_event_ports || !dev->max_event_queues) {
913 plt_err("Not enough eventdev resource queues=%d ports=%d",
914 dev->max_event_queues, dev->max_event_ports);
915 cnxk_sso_fini(event_dev);
919 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
920 event_dev->data->name, dev->max_event_queues,
921 dev->max_event_ports);
927 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
929 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
930 sizeof(struct cnxk_sso_evdev),
934 static const struct rte_pci_id cn10k_pci_sso_map[] = {
935 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
936 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
937 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
938 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
939 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
940 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
946 static struct rte_pci_driver cn10k_pci_sso = {
947 .id_table = cn10k_pci_sso_map,
948 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
949 .probe = cn10k_sso_probe,
950 .remove = cnxk_sso_remove,
953 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
954 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
955 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
956 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
957 CNXK_SSO_GGRP_QOS "=<string>"
958 CNXK_SSO_FORCE_BP "=1"
959 CN10K_SSO_GW_MODE "=<int>"
960 CNXK_TIM_DISABLE_NPA "=1"
961 CNXK_TIM_CHNK_SLOTS "=<int>"
962 CNXK_TIM_RINGS_LMT "=<int>"
963 CNXK_TIM_STATS_ENA "=1"
964 CNXK_TIM_EXT_CLK "=<string>");