1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
10 deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
12 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
13 enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
16 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
18 uint32_t wdata = BIT(16) | 1;
20 switch (dev->gw_mode) {
21 case CN10K_GW_MODE_NONE:
24 case CN10K_GW_MODE_PREF:
27 case CN10K_GW_MODE_PREF_WFE:
28 wdata |= BIT(20) | BIT(19);
36 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
38 struct cnxk_sso_evdev *dev = arg;
39 struct cn10k_sso_hws *ws;
41 /* Allocate event port memory */
42 ws = rte_zmalloc("cn10k_ws",
43 sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
46 plt_err("Failed to alloc memory for port=%d", port_id);
50 /* First cache line is reserved for cookie */
51 ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
52 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
53 ws->tx_base = ws->base;
56 ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
57 ws->lmt_base = dev->sso.lmt_base;
63 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
65 struct cnxk_sso_evdev *dev = arg;
66 struct cn10k_sso_hws *ws = port;
68 return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
72 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
74 struct cnxk_sso_evdev *dev = arg;
75 struct cn10k_sso_hws *ws = port;
77 return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
81 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
83 struct cnxk_sso_evdev *dev = arg;
84 struct cn10k_sso_hws *ws = hws;
87 ws->grp_base = grp_base;
88 ws->fc_mem = (uint64_t *)dev->fc_iova;
89 ws->xaq_lmt = dev->xaq_lmt;
91 /* Set get_work timeout for HWS */
92 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
93 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
97 cn10k_sso_hws_release(void *arg, void *hws)
99 struct cnxk_sso_evdev *dev = arg;
100 struct cn10k_sso_hws *ws = hws;
103 for (i = 0; i < dev->nb_event_queues; i++)
104 roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
105 memset(ws, 0, sizeof(*ws));
109 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
110 cnxk_handle_event_t fn, void *arg)
112 struct cn10k_sso_hws *ws = hws;
113 uint64_t cq_ds_cnt = 1;
119 plt_write64(0, base + SSO_LF_GGRP_QCTL);
121 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
122 req = queue_id; /* GGRP ID */
123 req |= BIT_ULL(18); /* Grouped */
124 req |= BIT_ULL(16); /* WAIT */
126 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
127 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
128 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
129 cq_ds_cnt &= 0x3FFF3FFF0000;
131 while (aq_cnt || cq_ds_cnt || ds_cnt) {
132 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
133 cn10k_sso_hws_get_work_empty(ws, &ev);
134 if (fn != NULL && ev.u64 != 0)
136 if (ev.sched_type != SSO_TT_EMPTY)
137 cnxk_sso_hws_swtag_flush(
138 ws->base + SSOW_LF_GWS_WQE0,
139 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
141 val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
142 } while (val & BIT_ULL(56));
143 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
144 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
145 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
146 /* Extract cq and ds count */
147 cq_ds_cnt &= 0x3FFF3FFF0000;
150 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
155 cn10k_sso_hws_reset(void *arg, void *hws)
157 struct cnxk_sso_evdev *dev = arg;
158 struct cn10k_sso_hws *ws = hws;
159 uintptr_t base = ws->base;
167 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
168 /* Wait till getwork/swtp/waitw/desched completes. */
170 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
171 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
172 BIT_ULL(56) | BIT_ULL(54)));
173 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
174 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
175 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
176 cnxk_sso_hws_swtag_untag(base +
177 SSOW_LF_GWS_OP_SWTAG_UNTAG);
178 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
181 /* Wait for desched to complete. */
183 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
184 } while (pend_state & BIT_ULL(58));
186 switch (dev->gw_mode) {
187 case CN10K_GW_MODE_PREF:
188 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
191 case CN10K_GW_MODE_PREF_WFE:
192 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
193 SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
195 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
197 case CN10K_GW_MODE_NONE:
202 if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
204 plt_write64(BIT_ULL(16) | 1,
205 ws->base + SSOW_LF_GWS_OP_GET_WORK0);
207 roc_load_pair(gw.u64[0], gw.u64[1],
208 ws->base + SSOW_LF_GWS_WQE0);
209 } while (gw.u64[0] & BIT_ULL(63));
210 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
211 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
212 if (pend_tt == SSO_TT_ATOMIC ||
213 pend_tt == SSO_TT_ORDERED)
214 cnxk_sso_hws_swtag_untag(
215 base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
216 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
220 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
225 cn10k_sso_set_rsrc(void *arg)
227 struct cnxk_sso_evdev *dev = arg;
229 dev->max_event_ports = dev->sso.max_hws;
230 dev->max_event_queues =
231 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
232 RTE_EVENT_MAX_QUEUES_PER_DEV :
237 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
239 struct cnxk_sso_evdev *dev = arg;
241 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
245 cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
247 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
250 if (dev->tx_adptr_data == NULL)
253 for (i = 0; i < dev->nb_event_ports; i++) {
254 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
257 ws_cookie = cnxk_sso_hws_get_cookie(ws);
258 ws_cookie = rte_realloc_socket(
260 sizeof(struct cnxk_sso_hws_cookie) +
261 sizeof(struct cn10k_sso_hws) +
262 (sizeof(uint64_t) * (dev->max_port_id + 1) *
263 RTE_MAX_QUEUES_PER_PORT),
264 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
265 if (ws_cookie == NULL)
267 ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
268 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
269 sizeof(uint64_t) * (dev->max_port_id + 1) *
270 RTE_MAX_QUEUES_PER_PORT);
271 event_dev->data->ports[i] = ws;
278 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
280 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
281 const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
282 #define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
283 NIX_RX_FASTPATH_MODES
287 const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
288 #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
289 NIX_RX_FASTPATH_MODES
293 const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
294 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
295 NIX_RX_FASTPATH_MODES
299 const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
300 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
301 NIX_RX_FASTPATH_MODES
305 const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
306 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
307 NIX_RX_FASTPATH_MODES
311 const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
312 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
313 NIX_RX_FASTPATH_MODES
317 const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
318 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
319 NIX_RX_FASTPATH_MODES
323 const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
324 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
325 NIX_RX_FASTPATH_MODES
329 const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
330 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
332 NIX_RX_FASTPATH_MODES
336 const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
337 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
338 NIX_RX_FASTPATH_MODES
342 const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
343 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
344 NIX_RX_FASTPATH_MODES
348 const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
349 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
350 NIX_RX_FASTPATH_MODES
354 const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
355 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
356 NIX_RX_FASTPATH_MODES
360 const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
361 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
362 NIX_RX_FASTPATH_MODES
366 const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
367 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
368 NIX_RX_FASTPATH_MODES
372 const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
373 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
374 NIX_RX_FASTPATH_MODES
379 const event_tx_adapter_enqueue_t
380 sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
381 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
382 NIX_TX_FASTPATH_MODES
386 const event_tx_adapter_enqueue_t
387 sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
388 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
389 NIX_TX_FASTPATH_MODES
393 event_dev->enqueue = cn10k_sso_hws_enq;
394 event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
395 event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
396 event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
397 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
398 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
400 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
401 sso_hws_deq_seg_burst);
402 if (dev->is_timeout_deq) {
403 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
404 sso_hws_deq_tmo_seg);
405 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
406 sso_hws_deq_tmo_seg_burst);
408 if (dev->is_ca_internal_port) {
409 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
411 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
412 sso_hws_deq_ca_seg_burst);
414 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
415 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
416 sso_hws_deq_tmo_ca_seg);
417 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
418 sso_hws_deq_tmo_ca_seg_burst);
421 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
422 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
424 if (dev->is_timeout_deq) {
425 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
427 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
428 sso_hws_deq_tmo_burst);
430 if (dev->is_ca_internal_port) {
431 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
433 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
434 sso_hws_deq_ca_burst);
436 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
437 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
439 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
440 sso_hws_deq_tmo_ca_burst);
443 event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
445 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
446 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
447 sso_hws_tx_adptr_enq_seg);
449 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
450 sso_hws_tx_adptr_enq);
452 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
456 cn10k_sso_info_get(struct rte_eventdev *event_dev,
457 struct rte_event_dev_info *dev_info)
459 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
461 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
462 cnxk_sso_info_get(dev, dev_info);
466 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
468 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
471 rc = cnxk_sso_dev_validate(event_dev);
473 plt_err("Invalid event device configuration");
477 rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
478 dev->nb_event_queues);
480 plt_err("Failed to initialize SSO resources");
484 rc = cnxk_sso_xaq_allocate(dev);
488 rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
489 cn10k_sso_hws_setup);
493 /* Restore any prior port-queue mapping. */
494 cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
501 roc_sso_rsrc_fini(&dev->sso);
502 dev->nb_event_ports = 0;
507 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
508 const struct rte_event_port_conf *port_conf)
511 RTE_SET_USED(port_conf);
512 return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
516 cn10k_sso_port_release(void *port)
518 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
519 struct cnxk_sso_evdev *dev;
524 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
525 if (!gws_cookie->configured)
528 cn10k_sso_hws_release(dev, port);
529 memset(gws_cookie, 0, sizeof(*gws_cookie));
531 rte_free(gws_cookie);
535 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
536 const uint8_t queues[], const uint8_t priorities[],
539 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
540 uint16_t hwgrp_ids[nb_links];
543 RTE_SET_USED(priorities);
544 for (link = 0; link < nb_links; link++)
545 hwgrp_ids[link] = queues[link];
546 nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
548 return (int)nb_links;
552 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
553 uint8_t queues[], uint16_t nb_unlinks)
555 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
556 uint16_t hwgrp_ids[nb_unlinks];
559 for (unlink = 0; unlink < nb_unlinks; unlink++)
560 hwgrp_ids[unlink] = queues[unlink];
561 nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
563 return (int)nb_unlinks;
567 cn10k_sso_start(struct rte_eventdev *event_dev)
571 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
575 rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
576 cn10k_sso_hws_flush_events);
579 cn10k_sso_fp_fns_set(event_dev);
585 cn10k_sso_stop(struct rte_eventdev *event_dev)
587 cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
588 cn10k_sso_hws_flush_events);
592 cn10k_sso_close(struct rte_eventdev *event_dev)
594 return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
598 cn10k_sso_selftest(void)
600 return cnxk_sso_selftest(RTE_STR(event_cn10k));
604 cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
605 const struct rte_eth_dev *eth_dev, uint32_t *caps)
609 RTE_SET_USED(event_dev);
610 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
612 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
614 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
615 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
616 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
617 RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
623 cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
626 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
629 for (i = 0; i < dev->nb_event_ports; i++) {
630 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
631 ws->lookup_mem = lookup_mem;
632 ws->tstamp = tstmp_info;
637 cn10k_sso_rx_adapter_queue_add(
638 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
640 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
642 struct cn10k_eth_rxq *rxq;
647 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
651 rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
655 rxq = eth_dev->data->rx_queues[0];
656 lookup_mem = rxq->lookup_mem;
657 tstmp_info = rxq->tstamp;
658 cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
659 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
665 cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
666 const struct rte_eth_dev *eth_dev,
671 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
675 return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
679 cn10k_sso_rx_adapter_vector_limits(
680 const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
681 struct rte_event_eth_rx_adapter_vector_limits *limits)
683 struct cnxk_eth_dev *cnxk_eth_dev;
687 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
691 cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
692 limits->log2_sz = true;
693 limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
694 limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
695 limits->min_timeout_ns =
696 (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
697 limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
703 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
704 const struct rte_eth_dev *eth_dev, uint32_t *caps)
709 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
713 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
714 RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
720 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
721 const struct rte_eth_dev *eth_dev,
727 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
730 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
733 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
739 cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
740 const struct rte_eth_dev *eth_dev,
746 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
749 return cn10k_sso_updt_tx_adptr_data(event_dev);
753 cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
754 const struct rte_cryptodev *cdev, uint32_t *caps)
756 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
757 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
759 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
760 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
766 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
767 const struct rte_cryptodev *cdev,
768 int32_t queue_pair_id,
769 const struct rte_event *event)
771 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
775 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
776 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
778 dev->is_ca_internal_port = 1;
779 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
781 return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
785 cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
786 const struct rte_cryptodev *cdev,
787 int32_t queue_pair_id)
789 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
790 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
792 return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
795 static struct eventdev_ops cn10k_sso_dev_ops = {
796 .dev_infos_get = cn10k_sso_info_get,
797 .dev_configure = cn10k_sso_dev_configure,
798 .queue_def_conf = cnxk_sso_queue_def_conf,
799 .queue_setup = cnxk_sso_queue_setup,
800 .queue_release = cnxk_sso_queue_release,
801 .port_def_conf = cnxk_sso_port_def_conf,
802 .port_setup = cn10k_sso_port_setup,
803 .port_release = cn10k_sso_port_release,
804 .port_link = cn10k_sso_port_link,
805 .port_unlink = cn10k_sso_port_unlink,
806 .timeout_ticks = cnxk_sso_timeout_ticks,
808 .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
809 .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
810 .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
811 .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
812 .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
814 .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
816 .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
817 .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
818 .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
820 .timer_adapter_caps_get = cnxk_tim_caps_get,
822 .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
823 .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
824 .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
826 .dump = cnxk_sso_dump,
827 .dev_start = cn10k_sso_start,
828 .dev_stop = cn10k_sso_stop,
829 .dev_close = cn10k_sso_close,
830 .dev_selftest = cn10k_sso_selftest,
834 cn10k_sso_init(struct rte_eventdev *event_dev)
836 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
839 if (RTE_CACHE_LINE_SIZE != 64) {
840 plt_err("Driver not compiled for CN10K");
846 plt_err("Failed to initialize platform model");
850 event_dev->dev_ops = &cn10k_sso_dev_ops;
851 /* For secondary processes, the primary has done all the work */
852 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
853 cn10k_sso_fp_fns_set(event_dev);
857 rc = cnxk_sso_init(event_dev);
861 cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
862 if (!dev->max_event_ports || !dev->max_event_queues) {
863 plt_err("Not enough eventdev resource queues=%d ports=%d",
864 dev->max_event_queues, dev->max_event_ports);
865 cnxk_sso_fini(event_dev);
869 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
870 event_dev->data->name, dev->max_event_queues,
871 dev->max_event_ports);
877 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
879 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
880 sizeof(struct cnxk_sso_evdev),
884 static const struct rte_pci_id cn10k_pci_sso_map[] = {
885 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
886 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
887 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
888 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
889 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
890 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
896 static struct rte_pci_driver cn10k_pci_sso = {
897 .id_table = cn10k_pci_sso_map,
898 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
899 .probe = cn10k_sso_probe,
900 .remove = cnxk_sso_remove,
903 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
904 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
905 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
906 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
907 CNXK_SSO_GGRP_QOS "=<string>"
908 CNXK_SSO_FORCE_BP "=1"
909 CN10K_SSO_GW_MODE "=<int>"
910 CNXK_TIM_DISABLE_NPA "=1"
911 CNXK_TIM_CHNK_SLOTS "=<int>"
912 CNXK_TIM_RINGS_LMT "=<int>"
913 CNXK_TIM_STATS_ENA "=1"
914 CNXK_TIM_EXT_CLK "=<string>");