1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops) \
10 (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)] \
11 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)] \
12 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
13 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)] \
14 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)] \
15 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
17 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops) \
19 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] \
20 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] \
21 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] \
22 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)] \
23 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
24 [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
27 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
29 uint32_t wdata = BIT(16) | 1;
31 switch (dev->gw_mode) {
32 case CN10K_GW_MODE_NONE:
35 case CN10K_GW_MODE_PREF:
38 case CN10K_GW_MODE_PREF_WFE:
39 wdata |= BIT(20) | BIT(19);
47 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
49 struct cnxk_sso_evdev *dev = arg;
50 struct cn10k_sso_hws *ws;
52 /* Allocate event port memory */
53 ws = rte_zmalloc("cn10k_ws",
54 sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
57 plt_err("Failed to alloc memory for port=%d", port_id);
61 /* First cache line is reserved for cookie */
62 ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
63 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
64 ws->tx_base = ws->base;
67 ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
68 ws->lmt_base = dev->sso.lmt_base;
74 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
76 struct cnxk_sso_evdev *dev = arg;
77 struct cn10k_sso_hws *ws = port;
79 return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
83 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
85 struct cnxk_sso_evdev *dev = arg;
86 struct cn10k_sso_hws *ws = port;
88 return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
92 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
94 struct cnxk_sso_evdev *dev = arg;
95 struct cn10k_sso_hws *ws = hws;
98 rte_memcpy(ws->grps_base, grps_base,
99 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
100 ws->fc_mem = dev->fc_mem;
101 ws->xaq_lmt = dev->xaq_lmt;
103 /* Set get_work timeout for HWS */
104 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
105 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
109 cn10k_sso_hws_release(void *arg, void *hws)
111 struct cnxk_sso_evdev *dev = arg;
112 struct cn10k_sso_hws *ws = hws;
115 for (i = 0; i < dev->nb_event_queues; i++)
116 roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
117 memset(ws, 0, sizeof(*ws));
121 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
122 cnxk_handle_event_t fn, void *arg)
124 struct cn10k_sso_hws *ws = hws;
125 uint64_t cq_ds_cnt = 1;
131 plt_write64(0, base + SSO_LF_GGRP_QCTL);
133 req = queue_id; /* GGRP ID */
134 req |= BIT_ULL(18); /* Grouped */
135 req |= BIT_ULL(16); /* WAIT */
137 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
138 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
139 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
140 cq_ds_cnt &= 0x3FFF3FFF0000;
142 while (aq_cnt || cq_ds_cnt || ds_cnt) {
143 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
144 cn10k_sso_hws_get_work_empty(ws, &ev);
145 if (fn != NULL && ev.u64 != 0)
147 if (ev.sched_type != SSO_TT_EMPTY)
148 cnxk_sso_hws_swtag_flush(
149 ws->base + SSOW_LF_GWS_WQE0,
150 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
152 val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
153 } while (val & BIT_ULL(56));
154 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
155 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
156 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
157 /* Extract cq and ds count */
158 cq_ds_cnt &= 0x3FFF3FFF0000;
161 plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
166 cn10k_sso_hws_reset(void *arg, void *hws)
168 struct cnxk_sso_evdev *dev = arg;
169 struct cn10k_sso_hws *ws = hws;
170 uintptr_t base = ws->base;
178 /* Wait till getwork/swtp/waitw/desched completes. */
180 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
181 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
182 BIT_ULL(56) | BIT_ULL(54)));
183 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
184 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
185 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
186 cnxk_sso_hws_swtag_untag(base +
187 SSOW_LF_GWS_OP_SWTAG_UNTAG);
188 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
191 /* Wait for desched to complete. */
193 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
194 } while (pend_state & BIT_ULL(58));
196 switch (dev->gw_mode) {
197 case CN10K_GW_MODE_PREF:
198 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
201 case CN10K_GW_MODE_PREF_WFE:
202 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
203 SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
205 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
207 case CN10K_GW_MODE_NONE:
212 if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
214 plt_write64(BIT_ULL(16) | 1,
215 ws->base + SSOW_LF_GWS_OP_GET_WORK0);
217 roc_load_pair(gw.u64[0], gw.u64[1],
218 ws->base + SSOW_LF_GWS_WQE0);
219 } while (gw.u64[0] & BIT_ULL(63));
220 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
221 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
222 if (pend_tt == SSO_TT_ATOMIC ||
223 pend_tt == SSO_TT_ORDERED)
224 cnxk_sso_hws_swtag_untag(
225 base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
226 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
230 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
235 cn10k_sso_set_rsrc(void *arg)
237 struct cnxk_sso_evdev *dev = arg;
239 dev->max_event_ports = dev->sso.max_hws;
240 dev->max_event_queues =
241 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
242 RTE_EVENT_MAX_QUEUES_PER_DEV :
247 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
249 struct cnxk_sso_evdev *dev = arg;
251 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
255 cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
257 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
260 if (dev->tx_adptr_data == NULL)
263 for (i = 0; i < dev->nb_event_ports; i++) {
264 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
267 ws_cookie = cnxk_sso_hws_get_cookie(ws);
268 ws_cookie = rte_realloc_socket(
270 sizeof(struct cnxk_sso_hws_cookie) +
271 sizeof(struct cn10k_sso_hws) +
272 (sizeof(uint64_t) * (dev->max_port_id + 1) *
273 RTE_MAX_QUEUES_PER_PORT),
274 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
275 if (ws_cookie == NULL)
277 ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
278 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
279 sizeof(uint64_t) * (dev->max_port_id + 1) *
280 RTE_MAX_QUEUES_PER_PORT);
281 event_dev->data->ports[i] = ws;
288 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
290 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
291 const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
292 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
293 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
294 NIX_RX_FASTPATH_MODES
298 const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
299 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
300 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
301 NIX_RX_FASTPATH_MODES
305 const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
306 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
307 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
308 NIX_RX_FASTPATH_MODES
312 const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
313 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
314 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
315 NIX_RX_FASTPATH_MODES
319 const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
320 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
321 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
322 NIX_RX_FASTPATH_MODES
326 const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
327 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
328 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
329 NIX_RX_FASTPATH_MODES
333 const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
334 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
335 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
336 NIX_RX_FASTPATH_MODES
340 const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
341 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
342 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
343 NIX_RX_FASTPATH_MODES
347 const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
348 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
349 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
350 NIX_RX_FASTPATH_MODES
354 const event_dequeue_burst_t
355 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
356 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
357 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
358 NIX_RX_FASTPATH_MODES
362 const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
363 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
364 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
365 NIX_RX_FASTPATH_MODES
369 const event_dequeue_burst_t
370 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
371 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
372 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
373 NIX_RX_FASTPATH_MODES
378 const event_tx_adapter_enqueue
379 sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
380 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
381 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
382 NIX_TX_FASTPATH_MODES
386 const event_tx_adapter_enqueue
387 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
388 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
389 [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
390 NIX_TX_FASTPATH_MODES
394 event_dev->enqueue = cn10k_sso_hws_enq;
395 event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
396 event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
397 event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
398 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
399 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
401 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
402 sso_hws_deq_seg_burst);
403 if (dev->is_timeout_deq) {
404 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
405 sso_hws_deq_tmo_seg);
406 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
407 sso_hws_deq_tmo_seg_burst);
409 if (dev->is_ca_internal_port) {
410 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
412 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
413 sso_hws_deq_ca_seg_burst);
416 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
417 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
419 if (dev->is_timeout_deq) {
420 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
422 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
423 sso_hws_deq_tmo_burst);
425 if (dev->is_ca_internal_port) {
426 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
428 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
429 sso_hws_deq_ca_burst);
432 event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
434 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
435 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
436 sso_hws_tx_adptr_enq_seg);
438 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
439 sso_hws_tx_adptr_enq);
441 event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
445 cn10k_sso_info_get(struct rte_eventdev *event_dev,
446 struct rte_event_dev_info *dev_info)
448 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
450 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
451 cnxk_sso_info_get(dev, dev_info);
455 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
457 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
460 rc = cnxk_sso_dev_validate(event_dev);
462 plt_err("Invalid event device configuration");
466 roc_sso_rsrc_fini(&dev->sso);
468 rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
469 dev->nb_event_queues);
471 plt_err("Failed to initialize SSO resources");
475 rc = cnxk_sso_xaq_allocate(dev);
479 rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
480 cn10k_sso_hws_setup);
484 /* Restore any prior port-queue mapping. */
485 cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
492 roc_sso_rsrc_fini(&dev->sso);
493 dev->nb_event_ports = 0;
498 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
499 const struct rte_event_port_conf *port_conf)
502 RTE_SET_USED(port_conf);
503 return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
507 cn10k_sso_port_release(void *port)
509 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
510 struct cnxk_sso_evdev *dev;
515 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
516 if (!gws_cookie->configured)
519 cn10k_sso_hws_release(dev, port);
520 memset(gws_cookie, 0, sizeof(*gws_cookie));
522 rte_free(gws_cookie);
526 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
527 const uint8_t queues[], const uint8_t priorities[],
530 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
531 uint16_t hwgrp_ids[nb_links];
534 RTE_SET_USED(priorities);
535 for (link = 0; link < nb_links; link++)
536 hwgrp_ids[link] = queues[link];
537 nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
539 return (int)nb_links;
543 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
544 uint8_t queues[], uint16_t nb_unlinks)
546 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
547 uint16_t hwgrp_ids[nb_unlinks];
550 for (unlink = 0; unlink < nb_unlinks; unlink++)
551 hwgrp_ids[unlink] = queues[unlink];
552 nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
554 return (int)nb_unlinks;
558 cn10k_sso_start(struct rte_eventdev *event_dev)
562 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
566 rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
567 cn10k_sso_hws_flush_events);
570 cn10k_sso_fp_fns_set(event_dev);
576 cn10k_sso_stop(struct rte_eventdev *event_dev)
578 cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
579 cn10k_sso_hws_flush_events);
583 cn10k_sso_close(struct rte_eventdev *event_dev)
585 return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
589 cn10k_sso_selftest(void)
591 return cnxk_sso_selftest(RTE_STR(event_cn10k));
595 cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
596 const struct rte_eth_dev *eth_dev, uint32_t *caps)
600 RTE_SET_USED(event_dev);
601 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
603 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
605 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
606 RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
607 RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
608 RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
614 cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
617 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
620 for (i = 0; i < dev->nb_event_ports; i++) {
621 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
622 ws->lookup_mem = lookup_mem;
623 ws->tstamp = tstmp_info;
628 cn10k_sso_rx_adapter_queue_add(
629 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
631 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
633 struct cn10k_eth_rxq *rxq;
638 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
642 rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
646 rxq = eth_dev->data->rx_queues[0];
647 lookup_mem = rxq->lookup_mem;
648 tstmp_info = rxq->tstamp;
649 cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
650 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
656 cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
657 const struct rte_eth_dev *eth_dev,
662 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
666 return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
670 cn10k_sso_rx_adapter_vector_limits(
671 const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
672 struct rte_event_eth_rx_adapter_vector_limits *limits)
674 struct cnxk_eth_dev *cnxk_eth_dev;
678 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
682 cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
683 limits->log2_sz = true;
684 limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
685 limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
686 limits->min_timeout_ns =
687 (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
688 limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
694 cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
695 uint16_t port_id, uint16_t rq_id, uint16_t sz,
696 uint64_t tmo_ns, struct rte_mempool *vmp)
698 struct roc_nix_rq *rq;
700 rq = &cnxk_eth_dev->rqs[rq_id];
704 if (rq->flow_tag_width == 0)
708 rq->vwqe_first_skip = 0;
709 rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
710 rq->vwqe_max_sz_exp = rte_log2_u32(sz);
713 ((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
714 rq->tag_mask = (port_id & 0xF) << 20;
716 (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
719 return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
723 cn10k_sso_rx_adapter_vector_config(
724 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
726 const struct rte_event_eth_rx_adapter_event_vector_config *config)
728 struct cnxk_eth_dev *cnxk_eth_dev;
729 struct cnxk_sso_evdev *dev;
732 rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
736 dev = cnxk_sso_pmd_priv(event_dev);
737 cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
738 if (rx_queue_id < 0) {
739 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
740 cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
741 RTE_EVENT_TYPE_ETHDEV_VECTOR);
742 rc = cnxk_sso_xae_reconfigure(
743 (struct rte_eventdev *)(uintptr_t)event_dev);
744 rc = cnxk_sso_rx_adapter_vwqe_enable(
745 cnxk_eth_dev, eth_dev->data->port_id, i,
746 config->vector_sz, config->vector_timeout_ns,
753 cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
754 RTE_EVENT_TYPE_ETHDEV_VECTOR);
755 rc = cnxk_sso_xae_reconfigure(
756 (struct rte_eventdev *)(uintptr_t)event_dev);
757 rc = cnxk_sso_rx_adapter_vwqe_enable(
758 cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
759 config->vector_sz, config->vector_timeout_ns,
769 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
770 const struct rte_eth_dev *eth_dev, uint32_t *caps)
775 ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
779 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
780 RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
786 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
787 const struct rte_eth_dev *eth_dev,
793 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
796 rc = cn10k_sso_updt_tx_adptr_data(event_dev);
799 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
805 cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
806 const struct rte_eth_dev *eth_dev,
812 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
815 return cn10k_sso_updt_tx_adptr_data(event_dev);
819 cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
820 const struct rte_cryptodev *cdev, uint32_t *caps)
822 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
823 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
825 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
826 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
832 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
833 const struct rte_cryptodev *cdev,
834 int32_t queue_pair_id,
835 const struct rte_event *event)
837 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
841 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
842 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
844 dev->is_ca_internal_port = 1;
845 cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
847 return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
851 cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
852 const struct rte_cryptodev *cdev,
853 int32_t queue_pair_id)
855 CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
856 CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
858 return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
861 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
862 .dev_infos_get = cn10k_sso_info_get,
863 .dev_configure = cn10k_sso_dev_configure,
864 .queue_def_conf = cnxk_sso_queue_def_conf,
865 .queue_setup = cnxk_sso_queue_setup,
866 .queue_release = cnxk_sso_queue_release,
867 .port_def_conf = cnxk_sso_port_def_conf,
868 .port_setup = cn10k_sso_port_setup,
869 .port_release = cn10k_sso_port_release,
870 .port_link = cn10k_sso_port_link,
871 .port_unlink = cn10k_sso_port_unlink,
872 .timeout_ticks = cnxk_sso_timeout_ticks,
874 .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
875 .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
876 .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
877 .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
878 .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
880 .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
881 .eth_rx_adapter_event_vector_config =
882 cn10k_sso_rx_adapter_vector_config,
884 .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
885 .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
886 .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
888 .timer_adapter_caps_get = cnxk_tim_caps_get,
890 .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
891 .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
892 .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
894 .dump = cnxk_sso_dump,
895 .dev_start = cn10k_sso_start,
896 .dev_stop = cn10k_sso_stop,
897 .dev_close = cn10k_sso_close,
898 .dev_selftest = cn10k_sso_selftest,
902 cn10k_sso_init(struct rte_eventdev *event_dev)
904 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
907 if (RTE_CACHE_LINE_SIZE != 64) {
908 plt_err("Driver not compiled for CN10K");
914 plt_err("Failed to initialize platform model");
918 event_dev->dev_ops = &cn10k_sso_dev_ops;
919 /* For secondary processes, the primary has done all the work */
920 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
921 cn10k_sso_fp_fns_set(event_dev);
925 rc = cnxk_sso_init(event_dev);
929 cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
930 if (!dev->max_event_ports || !dev->max_event_queues) {
931 plt_err("Not enough eventdev resource queues=%d ports=%d",
932 dev->max_event_queues, dev->max_event_ports);
933 cnxk_sso_fini(event_dev);
937 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
938 event_dev->data->name, dev->max_event_queues,
939 dev->max_event_ports);
945 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
947 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
948 sizeof(struct cnxk_sso_evdev),
952 static const struct rte_pci_id cn10k_pci_sso_map[] = {
953 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
954 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
955 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
956 CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
962 static struct rte_pci_driver cn10k_pci_sso = {
963 .id_table = cn10k_pci_sso_map,
964 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
965 .probe = cn10k_sso_probe,
966 .remove = cnxk_sso_remove,
969 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
970 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
971 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
972 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
973 CNXK_SSO_GGRP_QOS "=<string>"
974 CNXK_SSO_FORCE_BP "=1"
975 CN10K_SSO_GW_MODE "=<int>"
976 CNXK_TIM_DISABLE_NPA "=1"
977 CNXK_TIM_CHNK_SLOTS "=<int>"
978 CNXK_TIM_RINGS_LMT "=<int>"
979 CNXK_TIM_STATS_ENA "=1");