net/cnxk: add cn10k template Tx functions to build
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
10         deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
11
12 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
13         enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
14
15 static uint32_t
16 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
17 {
18         uint32_t wdata = BIT(16) | 1;
19
20         switch (dev->gw_mode) {
21         case CN10K_GW_MODE_NONE:
22         default:
23                 break;
24         case CN10K_GW_MODE_PREF:
25                 wdata |= BIT(19);
26                 break;
27         case CN10K_GW_MODE_PREF_WFE:
28                 wdata |= BIT(20) | BIT(19);
29                 break;
30         }
31
32         return wdata;
33 }
34
35 static void *
36 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
37 {
38         struct cnxk_sso_evdev *dev = arg;
39         struct cn10k_sso_hws *ws;
40
41         /* Allocate event port memory */
42         ws = rte_zmalloc("cn10k_ws",
43                          sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
44                          RTE_CACHE_LINE_SIZE);
45         if (ws == NULL) {
46                 plt_err("Failed to alloc memory for port=%d", port_id);
47                 return NULL;
48         }
49
50         /* First cache line is reserved for cookie */
51         ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
52         ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
53         ws->tx_base = ws->base;
54         ws->hws_id = port_id;
55         ws->swtag_req = 0;
56         ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
57         ws->lmt_base = dev->sso.lmt_base;
58
59         return ws;
60 }
61
62 static int
63 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
64 {
65         struct cnxk_sso_evdev *dev = arg;
66         struct cn10k_sso_hws *ws = port;
67
68         return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
69 }
70
71 static int
72 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn10k_sso_hws *ws = port;
76
77         return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
78 }
79
80 static void
81 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
82 {
83         struct cnxk_sso_evdev *dev = arg;
84         struct cn10k_sso_hws *ws = hws;
85         uint64_t val;
86
87         ws->grp_base = grp_base;
88         ws->fc_mem = (uint64_t *)dev->fc_iova;
89         ws->xaq_lmt = dev->xaq_lmt;
90
91         /* Set get_work timeout for HWS */
92         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
93         plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
94 }
95
96 static void
97 cn10k_sso_hws_release(void *arg, void *hws)
98 {
99         struct cnxk_sso_evdev *dev = arg;
100         struct cn10k_sso_hws *ws = hws;
101         int i;
102
103         for (i = 0; i < dev->nb_event_queues; i++)
104                 roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
105         memset(ws, 0, sizeof(*ws));
106 }
107
108 static void
109 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
110                            cnxk_handle_event_t fn, void *arg)
111 {
112         struct cn10k_sso_hws *ws = hws;
113         uint64_t cq_ds_cnt = 1;
114         uint64_t aq_cnt = 1;
115         uint64_t ds_cnt = 1;
116         struct rte_event ev;
117         uint64_t val, req;
118
119         plt_write64(0, base + SSO_LF_GGRP_QCTL);
120
121         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
122         req = queue_id;     /* GGRP ID */
123         req |= BIT_ULL(18); /* Grouped */
124         req |= BIT_ULL(16); /* WAIT */
125
126         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
127         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
128         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
129         cq_ds_cnt &= 0x3FFF3FFF0000;
130
131         while (aq_cnt || cq_ds_cnt || ds_cnt) {
132                 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
133                 cn10k_sso_hws_get_work_empty(ws, &ev);
134                 if (fn != NULL && ev.u64 != 0)
135                         fn(arg, ev);
136                 if (ev.sched_type != SSO_TT_EMPTY)
137                         cnxk_sso_hws_swtag_flush(
138                                 ws->base + SSOW_LF_GWS_WQE0,
139                                 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
140                 do {
141                         val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
142                 } while (val & BIT_ULL(56));
143                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
144                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
145                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
146                 /* Extract cq and ds count */
147                 cq_ds_cnt &= 0x3FFF3FFF0000;
148         }
149
150         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
151         rte_mb();
152 }
153
154 static void
155 cn10k_sso_hws_reset(void *arg, void *hws)
156 {
157         struct cnxk_sso_evdev *dev = arg;
158         struct cn10k_sso_hws *ws = hws;
159         uintptr_t base = ws->base;
160         uint64_t pend_state;
161         union {
162                 __uint128_t wdata;
163                 uint64_t u64[2];
164         } gw;
165         uint8_t pend_tt;
166
167         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
168         /* Wait till getwork/swtp/waitw/desched completes. */
169         do {
170                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
171         } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
172                                BIT_ULL(56) | BIT_ULL(54)));
173         pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
174         if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
175                 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
176                         cnxk_sso_hws_swtag_untag(base +
177                                                  SSOW_LF_GWS_OP_SWTAG_UNTAG);
178                 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
179         }
180
181         /* Wait for desched to complete. */
182         do {
183                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
184         } while (pend_state & BIT_ULL(58));
185
186         switch (dev->gw_mode) {
187         case CN10K_GW_MODE_PREF:
188                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
189                         ;
190                 break;
191         case CN10K_GW_MODE_PREF_WFE:
192                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
193                        SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
194                         continue;
195                 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
196                 break;
197         case CN10K_GW_MODE_NONE:
198         default:
199                 break;
200         }
201
202         if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
203             SSO_TT_EMPTY) {
204                 plt_write64(BIT_ULL(16) | 1,
205                             ws->base + SSOW_LF_GWS_OP_GET_WORK0);
206                 do {
207                         roc_load_pair(gw.u64[0], gw.u64[1],
208                                       ws->base + SSOW_LF_GWS_WQE0);
209                 } while (gw.u64[0] & BIT_ULL(63));
210                 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
211                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
212                         if (pend_tt == SSO_TT_ATOMIC ||
213                             pend_tt == SSO_TT_ORDERED)
214                                 cnxk_sso_hws_swtag_untag(
215                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
216                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
217                 }
218         }
219
220         plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
221         rte_mb();
222 }
223
224 static void
225 cn10k_sso_set_rsrc(void *arg)
226 {
227         struct cnxk_sso_evdev *dev = arg;
228
229         dev->max_event_ports = dev->sso.max_hws;
230         dev->max_event_queues =
231                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
232                               RTE_EVENT_MAX_QUEUES_PER_DEV :
233                               dev->sso.max_hwgrp;
234 }
235
236 static int
237 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
238 {
239         struct cnxk_sso_evdev *dev = arg;
240
241         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
242 }
243
244 static int
245 cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
246 {
247         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
248         int i;
249
250         if (dev->tx_adptr_data == NULL)
251                 return 0;
252
253         for (i = 0; i < dev->nb_event_ports; i++) {
254                 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
255                 void *ws_cookie;
256
257                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
258                 ws_cookie = rte_realloc_socket(
259                         ws_cookie,
260                         sizeof(struct cnxk_sso_hws_cookie) +
261                                 sizeof(struct cn10k_sso_hws) +
262                                 (sizeof(uint64_t) * (dev->max_port_id + 1) *
263                                  RTE_MAX_QUEUES_PER_PORT),
264                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
265                 if (ws_cookie == NULL)
266                         return -ENOMEM;
267                 ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
268                 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
269                        sizeof(uint64_t) * (dev->max_port_id + 1) *
270                                RTE_MAX_QUEUES_PER_PORT);
271                 event_dev->data->ports[i] = ws;
272         }
273
274         return 0;
275 }
276
277 static void
278 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
279 {
280         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
281         const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
282 #define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
283                 NIX_RX_FASTPATH_MODES
284 #undef R
285         };
286
287         const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
288 #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
289                 NIX_RX_FASTPATH_MODES
290 #undef R
291         };
292
293         const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
294 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
295                 NIX_RX_FASTPATH_MODES
296 #undef R
297         };
298
299         const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
300 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
301                 NIX_RX_FASTPATH_MODES
302 #undef R
303         };
304
305         const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
306 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
307                 NIX_RX_FASTPATH_MODES
308 #undef R
309         };
310
311         const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
312 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
313                 NIX_RX_FASTPATH_MODES
314 #undef R
315         };
316
317         const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
318 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
319                 NIX_RX_FASTPATH_MODES
320 #undef R
321         };
322
323         const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
324 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
325                 NIX_RX_FASTPATH_MODES
326 #undef R
327         };
328
329         const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
330 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
331
332                 NIX_RX_FASTPATH_MODES
333 #undef R
334         };
335
336         const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
337 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
338                         NIX_RX_FASTPATH_MODES
339 #undef R
340         };
341
342         const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
343 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
344                 NIX_RX_FASTPATH_MODES
345 #undef R
346         };
347
348         const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
349 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
350                 NIX_RX_FASTPATH_MODES
351 #undef R
352         };
353
354         const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
355 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
356                 NIX_RX_FASTPATH_MODES
357 #undef R
358         };
359
360         const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
361 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
362                 NIX_RX_FASTPATH_MODES
363 #undef R
364         };
365
366         const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
367 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
368                 NIX_RX_FASTPATH_MODES
369 #undef R
370         };
371
372         const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
373 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
374                 NIX_RX_FASTPATH_MODES
375 #undef R
376         };
377
378         /* Tx modes */
379         const event_tx_adapter_enqueue_t
380                 sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
381 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
382                         NIX_TX_FASTPATH_MODES
383 #undef T
384                 };
385
386         const event_tx_adapter_enqueue_t
387                 sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
388 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
389                         NIX_TX_FASTPATH_MODES
390 #undef T
391                 };
392
393         event_dev->enqueue = cn10k_sso_hws_enq;
394         event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
395         event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
396         event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
397         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
398                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
399                                        sso_hws_deq_seg);
400                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
401                                        sso_hws_deq_seg_burst);
402                 if (dev->is_timeout_deq) {
403                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
404                                                sso_hws_deq_tmo_seg);
405                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
406                                                sso_hws_deq_tmo_seg_burst);
407                 }
408                 if (dev->is_ca_internal_port) {
409                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
410                                                sso_hws_deq_ca_seg);
411                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
412                                                sso_hws_deq_ca_seg_burst);
413                 }
414                 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
415                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
416                                                sso_hws_deq_tmo_ca_seg);
417                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
418                                                sso_hws_deq_tmo_ca_seg_burst);
419                 }
420         } else {
421                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
422                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
423                                        sso_hws_deq_burst);
424                 if (dev->is_timeout_deq) {
425                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
426                                                sso_hws_deq_tmo);
427                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
428                                                sso_hws_deq_tmo_burst);
429                 }
430                 if (dev->is_ca_internal_port) {
431                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
432                                                sso_hws_deq_ca);
433                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
434                                                sso_hws_deq_ca_burst);
435                 }
436                 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
437                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
438                                                sso_hws_deq_tmo_ca);
439                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
440                                                sso_hws_deq_tmo_ca_burst);
441                 }
442         }
443         event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
444
445         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
446                 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
447                                        sso_hws_tx_adptr_enq_seg);
448         else
449                 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
450                                        sso_hws_tx_adptr_enq);
451
452         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
453 }
454
455 static void
456 cn10k_sso_info_get(struct rte_eventdev *event_dev,
457                    struct rte_event_dev_info *dev_info)
458 {
459         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
460
461         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
462         cnxk_sso_info_get(dev, dev_info);
463 }
464
465 static int
466 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
467 {
468         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
469         int rc;
470
471         rc = cnxk_sso_dev_validate(event_dev);
472         if (rc < 0) {
473                 plt_err("Invalid event device configuration");
474                 return -EINVAL;
475         }
476
477         rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
478                                  dev->nb_event_queues);
479         if (rc < 0) {
480                 plt_err("Failed to initialize SSO resources");
481                 return -ENODEV;
482         }
483
484         rc = cnxk_sso_xaq_allocate(dev);
485         if (rc < 0)
486                 goto cnxk_rsrc_fini;
487
488         rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
489                                     cn10k_sso_hws_setup);
490         if (rc < 0)
491                 goto cnxk_rsrc_fini;
492
493         /* Restore any prior port-queue mapping. */
494         cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
495
496         dev->configured = 1;
497         rte_mb();
498
499         return 0;
500 cnxk_rsrc_fini:
501         roc_sso_rsrc_fini(&dev->sso);
502         dev->nb_event_ports = 0;
503         return rc;
504 }
505
506 static int
507 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
508                      const struct rte_event_port_conf *port_conf)
509 {
510
511         RTE_SET_USED(port_conf);
512         return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
513 }
514
515 static void
516 cn10k_sso_port_release(void *port)
517 {
518         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
519         struct cnxk_sso_evdev *dev;
520
521         if (port == NULL)
522                 return;
523
524         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
525         if (!gws_cookie->configured)
526                 goto free;
527
528         cn10k_sso_hws_release(dev, port);
529         memset(gws_cookie, 0, sizeof(*gws_cookie));
530 free:
531         rte_free(gws_cookie);
532 }
533
534 static int
535 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
536                     const uint8_t queues[], const uint8_t priorities[],
537                     uint16_t nb_links)
538 {
539         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
540         uint16_t hwgrp_ids[nb_links];
541         uint16_t link;
542
543         RTE_SET_USED(priorities);
544         for (link = 0; link < nb_links; link++)
545                 hwgrp_ids[link] = queues[link];
546         nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
547
548         return (int)nb_links;
549 }
550
551 static int
552 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
553                       uint8_t queues[], uint16_t nb_unlinks)
554 {
555         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
556         uint16_t hwgrp_ids[nb_unlinks];
557         uint16_t unlink;
558
559         for (unlink = 0; unlink < nb_unlinks; unlink++)
560                 hwgrp_ids[unlink] = queues[unlink];
561         nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
562
563         return (int)nb_unlinks;
564 }
565
566 static int
567 cn10k_sso_start(struct rte_eventdev *event_dev)
568 {
569         int rc;
570
571         rc = cn10k_sso_updt_tx_adptr_data(event_dev);
572         if (rc < 0)
573                 return rc;
574
575         rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
576                             cn10k_sso_hws_flush_events);
577         if (rc < 0)
578                 return rc;
579         cn10k_sso_fp_fns_set(event_dev);
580
581         return rc;
582 }
583
584 static void
585 cn10k_sso_stop(struct rte_eventdev *event_dev)
586 {
587         cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
588                       cn10k_sso_hws_flush_events);
589 }
590
591 static int
592 cn10k_sso_close(struct rte_eventdev *event_dev)
593 {
594         return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
595 }
596
597 static int
598 cn10k_sso_selftest(void)
599 {
600         return cnxk_sso_selftest(RTE_STR(event_cn10k));
601 }
602
603 static int
604 cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
605                               const struct rte_eth_dev *eth_dev, uint32_t *caps)
606 {
607         int rc;
608
609         RTE_SET_USED(event_dev);
610         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
611         if (rc)
612                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
613         else
614                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
615                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
616                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
617                         RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
618
619         return 0;
620 }
621
622 static void
623 cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
624                        void *tstmp_info)
625 {
626         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
627         int i;
628
629         for (i = 0; i < dev->nb_event_ports; i++) {
630                 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
631                 ws->lookup_mem = lookup_mem;
632                 ws->tstamp = tstmp_info;
633         }
634 }
635
636 static int
637 cn10k_sso_rx_adapter_queue_add(
638         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
639         int32_t rx_queue_id,
640         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
641 {
642         struct cn10k_eth_rxq *rxq;
643         void *lookup_mem;
644         void *tstmp_info;
645         int rc;
646
647         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
648         if (rc)
649                 return -EINVAL;
650
651         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
652                                            queue_conf);
653         if (rc)
654                 return -EINVAL;
655         rxq = eth_dev->data->rx_queues[0];
656         lookup_mem = rxq->lookup_mem;
657         tstmp_info = rxq->tstamp;
658         cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
659         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
660
661         return 0;
662 }
663
664 static int
665 cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
666                                const struct rte_eth_dev *eth_dev,
667                                int32_t rx_queue_id)
668 {
669         int rc;
670
671         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
672         if (rc)
673                 return -EINVAL;
674
675         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
676 }
677
678 static int
679 cn10k_sso_rx_adapter_vector_limits(
680         const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
681         struct rte_event_eth_rx_adapter_vector_limits *limits)
682 {
683         struct cnxk_eth_dev *cnxk_eth_dev;
684         int ret;
685
686         RTE_SET_USED(dev);
687         ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
688         if (ret)
689                 return -ENOTSUP;
690
691         cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
692         limits->log2_sz = true;
693         limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
694         limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
695         limits->min_timeout_ns =
696                 (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
697         limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
698
699         return 0;
700 }
701
702 static int
703 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
704                               const struct rte_eth_dev *eth_dev, uint32_t *caps)
705 {
706         int ret;
707
708         RTE_SET_USED(dev);
709         ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
710         if (ret)
711                 *caps = 0;
712         else
713                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
714                         RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
715
716         return 0;
717 }
718
719 static int
720 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
721                                const struct rte_eth_dev *eth_dev,
722                                int32_t tx_queue_id)
723 {
724         int rc;
725
726         RTE_SET_USED(id);
727         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
728         if (rc < 0)
729                 return rc;
730         rc = cn10k_sso_updt_tx_adptr_data(event_dev);
731         if (rc < 0)
732                 return rc;
733         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
734
735         return 0;
736 }
737
738 static int
739 cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
740                                const struct rte_eth_dev *eth_dev,
741                                int32_t tx_queue_id)
742 {
743         int rc;
744
745         RTE_SET_USED(id);
746         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
747         if (rc < 0)
748                 return rc;
749         return cn10k_sso_updt_tx_adptr_data(event_dev);
750 }
751
752 static int
753 cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
754                               const struct rte_cryptodev *cdev, uint32_t *caps)
755 {
756         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
757         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
758
759         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
760                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
761
762         return 0;
763 }
764
765 static int
766 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
767                             const struct rte_cryptodev *cdev,
768                             int32_t queue_pair_id,
769                             const struct rte_event *event)
770 {
771         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
772
773         RTE_SET_USED(event);
774
775         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
776         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
777
778         dev->is_ca_internal_port = 1;
779         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
780
781         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
782 }
783
784 static int
785 cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
786                             const struct rte_cryptodev *cdev,
787                             int32_t queue_pair_id)
788 {
789         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
790         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
791
792         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
793 }
794
795 static struct eventdev_ops cn10k_sso_dev_ops = {
796         .dev_infos_get = cn10k_sso_info_get,
797         .dev_configure = cn10k_sso_dev_configure,
798         .queue_def_conf = cnxk_sso_queue_def_conf,
799         .queue_setup = cnxk_sso_queue_setup,
800         .queue_release = cnxk_sso_queue_release,
801         .port_def_conf = cnxk_sso_port_def_conf,
802         .port_setup = cn10k_sso_port_setup,
803         .port_release = cn10k_sso_port_release,
804         .port_link = cn10k_sso_port_link,
805         .port_unlink = cn10k_sso_port_unlink,
806         .timeout_ticks = cnxk_sso_timeout_ticks,
807
808         .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
809         .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
810         .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
811         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
812         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
813
814         .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
815
816         .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
817         .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
818         .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
819
820         .timer_adapter_caps_get = cnxk_tim_caps_get,
821
822         .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
823         .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
824         .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
825
826         .dump = cnxk_sso_dump,
827         .dev_start = cn10k_sso_start,
828         .dev_stop = cn10k_sso_stop,
829         .dev_close = cn10k_sso_close,
830         .dev_selftest = cn10k_sso_selftest,
831 };
832
833 static int
834 cn10k_sso_init(struct rte_eventdev *event_dev)
835 {
836         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
837         int rc;
838
839         if (RTE_CACHE_LINE_SIZE != 64) {
840                 plt_err("Driver not compiled for CN10K");
841                 return -EFAULT;
842         }
843
844         rc = roc_plt_init();
845         if (rc < 0) {
846                 plt_err("Failed to initialize platform model");
847                 return rc;
848         }
849
850         event_dev->dev_ops = &cn10k_sso_dev_ops;
851         /* For secondary processes, the primary has done all the work */
852         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
853                 cn10k_sso_fp_fns_set(event_dev);
854                 return 0;
855         }
856
857         rc = cnxk_sso_init(event_dev);
858         if (rc < 0)
859                 return rc;
860
861         cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
862         if (!dev->max_event_ports || !dev->max_event_queues) {
863                 plt_err("Not enough eventdev resource queues=%d ports=%d",
864                         dev->max_event_queues, dev->max_event_ports);
865                 cnxk_sso_fini(event_dev);
866                 return -ENODEV;
867         }
868
869         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
870                     event_dev->data->name, dev->max_event_queues,
871                     dev->max_event_ports);
872
873         return 0;
874 }
875
876 static int
877 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
878 {
879         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
880                                        sizeof(struct cnxk_sso_evdev),
881                                        cn10k_sso_init);
882 }
883
884 static const struct rte_pci_id cn10k_pci_sso_map[] = {
885         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
886         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
887         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
888         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
889         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
890         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
891         {
892                 .vendor_id = 0,
893         },
894 };
895
896 static struct rte_pci_driver cn10k_pci_sso = {
897         .id_table = cn10k_pci_sso_map,
898         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
899         .probe = cn10k_sso_probe,
900         .remove = cnxk_sso_remove,
901 };
902
903 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
904 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
905 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
906 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
907                               CNXK_SSO_GGRP_QOS "=<string>"
908                               CNXK_SSO_FORCE_BP "=1"
909                               CN10K_SSO_GW_MODE "=<int>"
910                               CNXK_TIM_DISABLE_NPA "=1"
911                               CNXK_TIM_CHNK_SLOTS "=<int>"
912                               CNXK_TIM_RINGS_LMT "=<int>"
913                               CNXK_TIM_STATS_ENA "=1"
914                               CNXK_TIM_EXT_CLK "=<string>");