net/cnxk: avoid command copy from Tx queue
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
14
15 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
16         enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
17
18 static int
19 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
20 {
21         struct cnxk_sso_evdev *dev = arg;
22         struct cn9k_sso_hws_dual *dws;
23         struct cn9k_sso_hws *ws;
24         int rc;
25
26         if (dev->dual_ws) {
27                 dws = port;
28                 rc = roc_sso_hws_link(&dev->sso,
29                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
30                                       nb_link);
31                 rc |= roc_sso_hws_link(&dev->sso,
32                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
33                                        map, nb_link);
34         } else {
35                 ws = port;
36                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
37         }
38
39         return rc;
40 }
41
42 static int
43 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
44 {
45         struct cnxk_sso_evdev *dev = arg;
46         struct cn9k_sso_hws_dual *dws;
47         struct cn9k_sso_hws *ws;
48         int rc;
49
50         if (dev->dual_ws) {
51                 dws = port;
52                 rc = roc_sso_hws_unlink(&dev->sso,
53                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
54                                         map, nb_link);
55                 rc |= roc_sso_hws_unlink(&dev->sso,
56                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
57                                          map, nb_link);
58         } else {
59                 ws = port;
60                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
61         }
62
63         return rc;
64 }
65
66 static void
67 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
68 {
69         struct cnxk_sso_evdev *dev = arg;
70         struct cn9k_sso_hws_dual *dws;
71         struct cn9k_sso_hws *ws;
72         uint64_t val;
73
74         /* Set get_work tmo for HWS */
75         val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
76         if (dev->dual_ws) {
77                 dws = hws;
78                 dws->grp_base = grp_base;
79                 dws->fc_mem = (uint64_t *)dev->fc_iova;
80                 dws->xaq_lmt = dev->xaq_lmt;
81
82                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
83                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
84         } else {
85                 ws = hws;
86                 ws->grp_base = grp_base;
87                 ws->fc_mem = (uint64_t *)dev->fc_iova;
88                 ws->xaq_lmt = dev->xaq_lmt;
89
90                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
91         }
92 }
93
94 static void
95 cn9k_sso_hws_release(void *arg, void *hws)
96 {
97         struct cnxk_sso_evdev *dev = arg;
98         struct cn9k_sso_hws_dual *dws;
99         struct cn9k_sso_hws *ws;
100         int i;
101
102         if (dev->dual_ws) {
103                 dws = hws;
104                 for (i = 0; i < dev->nb_event_queues; i++) {
105                         roc_sso_hws_unlink(&dev->sso,
106                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
107                                            (uint16_t *)&i, 1);
108                         roc_sso_hws_unlink(&dev->sso,
109                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
110                                            (uint16_t *)&i, 1);
111                 }
112                 memset(dws, 0, sizeof(*dws));
113         } else {
114                 ws = hws;
115                 for (i = 0; i < dev->nb_event_queues; i++)
116                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
117                                            (uint16_t *)&i, 1);
118                 memset(ws, 0, sizeof(*ws));
119         }
120 }
121
122 static void
123 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
124                           cnxk_handle_event_t fn, void *arg)
125 {
126         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
127         struct cn9k_sso_hws_dual *dws;
128         struct cn9k_sso_hws *ws;
129         uint64_t cq_ds_cnt = 1;
130         uint64_t aq_cnt = 1;
131         uint64_t ds_cnt = 1;
132         struct rte_event ev;
133         uintptr_t ws_base;
134         uint64_t val, req;
135
136         plt_write64(0, base + SSO_LF_GGRP_QCTL);
137
138         req = queue_id;     /* GGRP ID */
139         req |= BIT_ULL(18); /* Grouped */
140         req |= BIT_ULL(16); /* WAIT */
141
142         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
143         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
144         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
145         cq_ds_cnt &= 0x3FFF3FFF0000;
146
147         if (dev->dual_ws) {
148                 dws = hws;
149                 ws_base = dws->base[0];
150         } else {
151                 ws = hws;
152                 ws_base = ws->base;
153         }
154
155         while (aq_cnt || cq_ds_cnt || ds_cnt) {
156                 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
157                 cn9k_sso_hws_get_work_empty(ws_base, &ev);
158                 if (fn != NULL && ev.u64 != 0)
159                         fn(arg, ev);
160                 if (ev.sched_type != SSO_TT_EMPTY)
161                         cnxk_sso_hws_swtag_flush(
162                                 ws_base + SSOW_LF_GWS_TAG,
163                                 ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
164                 do {
165                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
166                 } while (val & BIT_ULL(56));
167                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
168                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
169                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
170                 /* Extract cq and ds count */
171                 cq_ds_cnt &= 0x3FFF3FFF0000;
172         }
173
174         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
175 }
176
177 static void
178 cn9k_sso_hws_reset(void *arg, void *hws)
179 {
180         struct cnxk_sso_evdev *dev = arg;
181         struct cn9k_sso_hws_dual *dws;
182         struct cn9k_sso_hws *ws;
183         uint64_t pend_state;
184         uint8_t pend_tt;
185         uintptr_t base;
186         uint64_t tag;
187         uint8_t i;
188
189         dws = hws;
190         ws = hws;
191         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
192                 base = dev->dual_ws ? dws->base[i] : ws->base;
193                 /* Wait till getwork/swtp/waitw/desched completes. */
194                 do {
195                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
196                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
197                                        BIT_ULL(56)));
198
199                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
200                 pend_tt = (tag >> 32) & 0x3;
201                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
202                         if (pend_tt == SSO_TT_ATOMIC ||
203                             pend_tt == SSO_TT_ORDERED)
204                                 cnxk_sso_hws_swtag_untag(
205                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
206                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
207                 }
208
209                 /* Wait for desched to complete. */
210                 do {
211                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
212                 } while (pend_state & BIT_ULL(58));
213         }
214 }
215
216 void
217 cn9k_sso_set_rsrc(void *arg)
218 {
219         struct cnxk_sso_evdev *dev = arg;
220
221         if (dev->dual_ws)
222                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
223         else
224                 dev->max_event_ports = dev->sso.max_hws;
225         dev->max_event_queues =
226                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
227                               RTE_EVENT_MAX_QUEUES_PER_DEV :
228                               dev->sso.max_hwgrp;
229 }
230
231 static int
232 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
233 {
234         struct cnxk_sso_evdev *dev = arg;
235
236         if (dev->dual_ws)
237                 hws = hws * CN9K_DUAL_WS_NB_WS;
238
239         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
240 }
241
242 static int
243 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
244 {
245         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
246         int i;
247
248         if (dev->tx_adptr_data == NULL)
249                 return 0;
250
251         for (i = 0; i < dev->nb_event_ports; i++) {
252                 if (dev->dual_ws) {
253                         struct cn9k_sso_hws_dual *dws =
254                                 event_dev->data->ports[i];
255                         void *ws_cookie;
256
257                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
258                         ws_cookie = rte_realloc_socket(
259                                 ws_cookie,
260                                 sizeof(struct cnxk_sso_hws_cookie) +
261                                         sizeof(struct cn9k_sso_hws_dual) +
262                                         dev->tx_adptr_data_sz,
263                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
264                         if (ws_cookie == NULL)
265                                 return -ENOMEM;
266                         dws = RTE_PTR_ADD(ws_cookie,
267                                           sizeof(struct cnxk_sso_hws_cookie));
268                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
269                                dev->tx_adptr_data_sz);
270                         event_dev->data->ports[i] = dws;
271                 } else {
272                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
273                         void *ws_cookie;
274
275                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
276                         ws_cookie = rte_realloc_socket(
277                                 ws_cookie,
278                                 sizeof(struct cnxk_sso_hws_cookie) +
279                                         sizeof(struct cn9k_sso_hws_dual) +
280                                         dev->tx_adptr_data_sz,
281                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
282                         if (ws_cookie == NULL)
283                                 return -ENOMEM;
284                         ws = RTE_PTR_ADD(ws_cookie,
285                                          sizeof(struct cnxk_sso_hws_cookie));
286                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
287                                dev->tx_adptr_data_sz);
288                         event_dev->data->ports[i] = ws;
289                 }
290         }
291         rte_mb();
292
293         return 0;
294 }
295
296 static void
297 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
298 {
299         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
300         /* Single WS modes */
301         const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
302 #define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
303                 NIX_RX_FASTPATH_MODES
304 #undef R
305         };
306
307         const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
308 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
309                 NIX_RX_FASTPATH_MODES
310 #undef R
311         };
312
313         const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
314 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
315                 NIX_RX_FASTPATH_MODES
316 #undef R
317         };
318
319         const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
320 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
321                 NIX_RX_FASTPATH_MODES
322 #undef R
323         };
324
325         const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
326 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_##name,
327                 NIX_RX_FASTPATH_MODES
328 #undef R
329         };
330
331         const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
332 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_burst_##name,
333                 NIX_RX_FASTPATH_MODES
334 #undef R
335         };
336
337         const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
338 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_##name,
339                 NIX_RX_FASTPATH_MODES
340 #undef R
341         };
342
343         const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
344 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_burst_##name,
345                 NIX_RX_FASTPATH_MODES
346 #undef R
347         };
348
349         const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
350 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
356 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
362 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
363                 NIX_RX_FASTPATH_MODES
364 #undef R
365         };
366
367         const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
368 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
369                 NIX_RX_FASTPATH_MODES
370 #undef R
371         };
372
373         const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
374 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_##name,
375                 NIX_RX_FASTPATH_MODES
376 #undef R
377         };
378
379         const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
380 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_burst_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
386 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_##name,
387                 NIX_RX_FASTPATH_MODES
388 #undef R
389         };
390
391         const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
392 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_burst_##name,
393                 NIX_RX_FASTPATH_MODES
394 #undef R
395         };
396
397         /* Dual WS modes */
398         const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
399 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
400                 NIX_RX_FASTPATH_MODES
401 #undef R
402         };
403
404         const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = {
405 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
406                 NIX_RX_FASTPATH_MODES
407 #undef R
408         };
409
410         const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
411 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
412                 NIX_RX_FASTPATH_MODES
413 #undef R
414         };
415
416         const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
417 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
418                 NIX_RX_FASTPATH_MODES
419 #undef R
420         };
421
422         const event_dequeue_t sso_hws_dual_deq_ca[NIX_RX_OFFLOAD_MAX] = {
423 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_##name,
424                 NIX_RX_FASTPATH_MODES
425 #undef R
426         };
427
428         const event_dequeue_burst_t sso_hws_dual_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
429 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_burst_##name,
430                 NIX_RX_FASTPATH_MODES
431 #undef R
432         };
433
434         const event_dequeue_t sso_hws_dual_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
435 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_##name,
436                 NIX_RX_FASTPATH_MODES
437 #undef R
438         };
439
440         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
441 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_burst_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
447 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
448                 NIX_RX_FASTPATH_MODES
449 #undef R
450         };
451
452         const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
453 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
454                 NIX_RX_FASTPATH_MODES
455 #undef R
456         };
457
458         const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
459 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
460                 NIX_RX_FASTPATH_MODES
461 #undef R
462         };
463
464         const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
465 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
466                 NIX_RX_FASTPATH_MODES
467 #undef R
468         };
469
470         const event_dequeue_t sso_hws_dual_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
471 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t sso_hws_dual_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
477 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
478                 NIX_RX_FASTPATH_MODES
479 #undef R
480         };
481
482         const event_dequeue_t sso_hws_dual_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
483 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_##name,
484                 NIX_RX_FASTPATH_MODES
485 #undef R
486         };
487
488         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
489 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name,
490                 NIX_RX_FASTPATH_MODES
491 #undef R
492         };
493
494         /* Tx modes */
495         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
496 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_##name,
497                 NIX_TX_FASTPATH_MODES
498 #undef T
499         };
500
501         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
502 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
503                 NIX_TX_FASTPATH_MODES
504 #undef T
505         };
506
507         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
508 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
509                 NIX_TX_FASTPATH_MODES
510 #undef T
511         };
512
513         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
514 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
515                 NIX_TX_FASTPATH_MODES
516 #undef T
517         };
518
519         event_dev->enqueue = cn9k_sso_hws_enq;
520         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
521         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
522         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
523         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
524                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
525                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
526                                       sso_hws_deq_seg_burst);
527                 if (dev->is_timeout_deq) {
528                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
529                                               sso_hws_deq_tmo_seg);
530                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
531                                               sso_hws_deq_tmo_seg_burst);
532                 }
533                 if (dev->is_ca_internal_port) {
534                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
535                                               sso_hws_deq_ca_seg);
536                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
537                                               sso_hws_deq_ca_seg_burst);
538                 }
539
540                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
541                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
542                                               sso_hws_deq_tmo_ca_seg);
543                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
544                                               sso_hws_deq_tmo_ca_seg_burst);
545                 }
546         } else {
547                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
548                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
549                                       sso_hws_deq_burst);
550                 if (dev->is_timeout_deq) {
551                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
552                                               sso_hws_deq_tmo);
553                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
554                                               sso_hws_deq_tmo_burst);
555                 }
556                 if (dev->is_ca_internal_port) {
557                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558                                               sso_hws_deq_ca);
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560                                               sso_hws_deq_ca_burst);
561                 }
562
563                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
564                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
565                                               sso_hws_deq_tmo_ca);
566                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
567                                               sso_hws_deq_tmo_ca_burst);
568                 }
569         }
570         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
571
572         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
573                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
574                                       sso_hws_tx_adptr_enq_seg);
575         else
576                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
577                                       sso_hws_tx_adptr_enq);
578
579         if (dev->dual_ws) {
580                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
581                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
582                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
583                 event_dev->enqueue_forward_burst =
584                         cn9k_sso_hws_dual_enq_fwd_burst;
585                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
586
587                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
588                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
589                                               sso_hws_dual_deq_seg);
590                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
591                                               sso_hws_dual_deq_seg_burst);
592                         if (dev->is_timeout_deq) {
593                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
594                                                       sso_hws_dual_deq_tmo_seg);
595                                 CN9K_SET_EVDEV_DEQ_OP(
596                                         dev, event_dev->dequeue_burst,
597                                         sso_hws_dual_deq_tmo_seg_burst);
598                         }
599                         if (dev->is_ca_internal_port) {
600                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
601                                                       sso_hws_dual_deq_ca_seg);
602                                 CN9K_SET_EVDEV_DEQ_OP(
603                                         dev, event_dev->dequeue_burst,
604                                         sso_hws_dual_deq_ca_seg_burst);
605                         }
606                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
607                                 CN9K_SET_EVDEV_DEQ_OP(
608                                         dev, event_dev->dequeue,
609                                         sso_hws_dual_deq_tmo_ca_seg);
610                                 CN9K_SET_EVDEV_DEQ_OP(
611                                         dev, event_dev->dequeue_burst,
612                                         sso_hws_dual_deq_tmo_ca_seg_burst);
613                         }
614                 } else {
615                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
616                                               sso_hws_dual_deq);
617                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
618                                               sso_hws_dual_deq_burst);
619                         if (dev->is_timeout_deq) {
620                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
621                                                       sso_hws_dual_deq_tmo);
622                                 CN9K_SET_EVDEV_DEQ_OP(
623                                         dev, event_dev->dequeue_burst,
624                                         sso_hws_dual_deq_tmo_burst);
625                         }
626                         if (dev->is_ca_internal_port) {
627                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
628                                                       sso_hws_dual_deq_ca);
629                                 CN9K_SET_EVDEV_DEQ_OP(
630                                         dev, event_dev->dequeue_burst,
631                                         sso_hws_dual_deq_ca_burst);
632                         }
633                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
634                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
635                                                       sso_hws_dual_deq_tmo_ca);
636                                 CN9K_SET_EVDEV_DEQ_OP(
637                                         dev, event_dev->dequeue_burst,
638                                         sso_hws_dual_deq_tmo_ca_burst);
639                         }
640                 }
641
642                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
643                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
644                                               sso_hws_dual_tx_adptr_enq_seg);
645                 else
646                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
647                                               sso_hws_dual_tx_adptr_enq);
648         }
649
650         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
651         rte_mb();
652 }
653
654 static void *
655 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
656 {
657         struct cnxk_sso_evdev *dev = arg;
658         struct cn9k_sso_hws_dual *dws;
659         struct cn9k_sso_hws *ws;
660         void *data;
661
662         if (dev->dual_ws) {
663                 dws = rte_zmalloc("cn9k_dual_ws",
664                                   sizeof(struct cn9k_sso_hws_dual) +
665                                           RTE_CACHE_LINE_SIZE,
666                                   RTE_CACHE_LINE_SIZE);
667                 if (dws == NULL) {
668                         plt_err("Failed to alloc memory for port=%d", port_id);
669                         return NULL;
670                 }
671
672                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
673                 dws->base[0] = roc_sso_hws_base_get(
674                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
675                 dws->base[1] = roc_sso_hws_base_get(
676                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
677                 dws->hws_id = port_id;
678                 dws->swtag_req = 0;
679                 dws->vws = 0;
680
681                 data = dws;
682         } else {
683                 /* Allocate event port memory */
684                 ws = rte_zmalloc("cn9k_ws",
685                                  sizeof(struct cn9k_sso_hws) +
686                                          RTE_CACHE_LINE_SIZE,
687                                  RTE_CACHE_LINE_SIZE);
688                 if (ws == NULL) {
689                         plt_err("Failed to alloc memory for port=%d", port_id);
690                         return NULL;
691                 }
692
693                 /* First cache line is reserved for cookie */
694                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
695                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
696                 ws->hws_id = port_id;
697                 ws->swtag_req = 0;
698
699                 data = ws;
700         }
701
702         return data;
703 }
704
705 static void
706 cn9k_sso_info_get(struct rte_eventdev *event_dev,
707                   struct rte_event_dev_info *dev_info)
708 {
709         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
710
711         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
712         cnxk_sso_info_get(dev, dev_info);
713 }
714
715 static int
716 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
717 {
718         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
719         int rc;
720
721         rc = cnxk_sso_dev_validate(event_dev);
722         if (rc < 0) {
723                 plt_err("Invalid event device configuration");
724                 return -EINVAL;
725         }
726
727         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
728         if (rc < 0) {
729                 plt_err("Failed to initialize SSO resources");
730                 return -ENODEV;
731         }
732
733         rc = cnxk_sso_xaq_allocate(dev);
734         if (rc < 0)
735                 goto cnxk_rsrc_fini;
736
737         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
738                                     cn9k_sso_hws_setup);
739         if (rc < 0)
740                 goto cnxk_rsrc_fini;
741
742         /* Restore any prior port-queue mapping. */
743         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
744
745         dev->configured = 1;
746         rte_mb();
747
748         return 0;
749 cnxk_rsrc_fini:
750         roc_sso_rsrc_fini(&dev->sso);
751         dev->nb_event_ports = 0;
752         return rc;
753 }
754
755 static int
756 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
757                     const struct rte_event_port_conf *port_conf)
758 {
759
760         RTE_SET_USED(port_conf);
761         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
762 }
763
764 static void
765 cn9k_sso_port_release(void *port)
766 {
767         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
768         struct cnxk_sso_evdev *dev;
769
770         if (port == NULL)
771                 return;
772
773         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
774         if (!gws_cookie->configured)
775                 goto free;
776
777         cn9k_sso_hws_release(dev, port);
778         memset(gws_cookie, 0, sizeof(*gws_cookie));
779 free:
780         rte_free(gws_cookie);
781 }
782
783 static int
784 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
785                    const uint8_t queues[], const uint8_t priorities[],
786                    uint16_t nb_links)
787 {
788         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
789         uint16_t hwgrp_ids[nb_links];
790         uint16_t link;
791
792         RTE_SET_USED(priorities);
793         for (link = 0; link < nb_links; link++)
794                 hwgrp_ids[link] = queues[link];
795         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
796
797         return (int)nb_links;
798 }
799
800 static int
801 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
802                      uint8_t queues[], uint16_t nb_unlinks)
803 {
804         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
805         uint16_t hwgrp_ids[nb_unlinks];
806         uint16_t unlink;
807
808         for (unlink = 0; unlink < nb_unlinks; unlink++)
809                 hwgrp_ids[unlink] = queues[unlink];
810         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
811
812         return (int)nb_unlinks;
813 }
814
815 static int
816 cn9k_sso_start(struct rte_eventdev *event_dev)
817 {
818         int rc;
819
820         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
821         if (rc < 0)
822                 return rc;
823
824         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
825                             cn9k_sso_hws_flush_events);
826         if (rc < 0)
827                 return rc;
828
829         cn9k_sso_fp_fns_set(event_dev);
830
831         return rc;
832 }
833
834 static void
835 cn9k_sso_stop(struct rte_eventdev *event_dev)
836 {
837         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
838 }
839
840 static int
841 cn9k_sso_close(struct rte_eventdev *event_dev)
842 {
843         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
844 }
845
846 static int
847 cn9k_sso_selftest(void)
848 {
849         return cnxk_sso_selftest(RTE_STR(event_cn9k));
850 }
851
852 static int
853 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
854                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
855 {
856         int rc;
857
858         RTE_SET_USED(event_dev);
859         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
860         if (rc)
861                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
862         else
863                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
864                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
865                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
866
867         return 0;
868 }
869
870 static void
871 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
872                       void *tstmp_info)
873 {
874         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
875         int i;
876
877         for (i = 0; i < dev->nb_event_ports; i++) {
878                 if (dev->dual_ws) {
879                         struct cn9k_sso_hws_dual *dws =
880                                 event_dev->data->ports[i];
881                         dws->lookup_mem = lookup_mem;
882                         dws->tstamp = tstmp_info;
883                 } else {
884                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
885                         ws->lookup_mem = lookup_mem;
886                         ws->tstamp = tstmp_info;
887                 }
888         }
889 }
890
891 static int
892 cn9k_sso_rx_adapter_queue_add(
893         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
894         int32_t rx_queue_id,
895         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
896 {
897         struct cn9k_eth_rxq *rxq;
898         void *lookup_mem;
899         void *tstmp_info;
900         int rc;
901
902         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
903         if (rc)
904                 return -EINVAL;
905
906         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
907                                            queue_conf);
908         if (rc)
909                 return -EINVAL;
910
911         rxq = eth_dev->data->rx_queues[0];
912         lookup_mem = rxq->lookup_mem;
913         tstmp_info = rxq->tstamp;
914         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
915         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
916
917         return 0;
918 }
919
920 static int
921 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
922                               const struct rte_eth_dev *eth_dev,
923                               int32_t rx_queue_id)
924 {
925         int rc;
926
927         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
928         if (rc)
929                 return -EINVAL;
930
931         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
932 }
933
934 static int
935 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
936                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
937 {
938         int ret;
939
940         RTE_SET_USED(dev);
941         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
942         if (ret)
943                 *caps = 0;
944         else
945                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
946
947         return 0;
948 }
949
950 static void
951 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
952                        bool ena)
953 {
954         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
955         struct cn9k_eth_txq *txq;
956         struct roc_nix_sq *sq;
957         int i;
958
959         if (tx_queue_id < 0) {
960                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
961                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
962         } else {
963                 uint16_t sq_limit;
964
965                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
966                 txq = eth_dev->data->tx_queues[tx_queue_id];
967                 sq_limit =
968                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
969                                     sq->nb_sqb_bufs;
970                 txq->nb_sqb_bufs_adj =
971                         sq_limit -
972                         RTE_ALIGN_MUL_CEIL(sq_limit,
973                                            (1ULL << txq->sqes_per_sqb_log2)) /
974                                 (1ULL << txq->sqes_per_sqb_log2);
975                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
976         }
977 }
978
979 static int
980 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
981                               const struct rte_eth_dev *eth_dev,
982                               int32_t tx_queue_id)
983 {
984         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
985         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
986         uint64_t tx_offloads;
987         int rc;
988
989         RTE_SET_USED(id);
990         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
991         if (rc < 0)
992                 return rc;
993
994         /* Can't enable tstamp if all the ports don't have it enabled. */
995         tx_offloads = cnxk_eth_dev->tx_offload_flags;
996         if (dev->tx_adptr_configured) {
997                 uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
998                 uint8_t tstmp_ena =
999                         !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
1000
1001                 if (tstmp_ena && !tstmp_req)
1002                         dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
1003                 else if (!tstmp_ena && tstmp_req)
1004                         tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
1005         }
1006
1007         dev->tx_offloads |= tx_offloads;
1008         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
1009         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
1010         if (rc < 0)
1011                 return rc;
1012         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1013         dev->tx_adptr_configured = 1;
1014
1015         return 0;
1016 }
1017
1018 static int
1019 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1020                               const struct rte_eth_dev *eth_dev,
1021                               int32_t tx_queue_id)
1022 {
1023         int rc;
1024
1025         RTE_SET_USED(id);
1026         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1027         if (rc < 0)
1028                 return rc;
1029         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1030         return cn9k_sso_updt_tx_adptr_data(event_dev);
1031 }
1032
1033 static int
1034 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1035                              const struct rte_cryptodev *cdev, uint32_t *caps)
1036 {
1037         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1038         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1039
1040         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1041                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1042
1043         return 0;
1044 }
1045
1046 static int
1047 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1048                            const struct rte_cryptodev *cdev,
1049                            int32_t queue_pair_id, const struct rte_event *event)
1050 {
1051         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1052
1053         RTE_SET_USED(event);
1054
1055         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1056         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1057
1058         dev->is_ca_internal_port = 1;
1059         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1060
1061         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1062 }
1063
1064 static int
1065 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1066                            const struct rte_cryptodev *cdev,
1067                            int32_t queue_pair_id)
1068 {
1069         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1070         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1071
1072         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1073 }
1074
1075 static struct eventdev_ops cn9k_sso_dev_ops = {
1076         .dev_infos_get = cn9k_sso_info_get,
1077         .dev_configure = cn9k_sso_dev_configure,
1078         .queue_def_conf = cnxk_sso_queue_def_conf,
1079         .queue_setup = cnxk_sso_queue_setup,
1080         .queue_release = cnxk_sso_queue_release,
1081         .port_def_conf = cnxk_sso_port_def_conf,
1082         .port_setup = cn9k_sso_port_setup,
1083         .port_release = cn9k_sso_port_release,
1084         .port_link = cn9k_sso_port_link,
1085         .port_unlink = cn9k_sso_port_unlink,
1086         .timeout_ticks = cnxk_sso_timeout_ticks,
1087
1088         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1089         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1090         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1091         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1092         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1093
1094         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1095         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1096         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1097
1098         .timer_adapter_caps_get = cnxk_tim_caps_get,
1099
1100         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1101         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1102         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1103
1104         .dump = cnxk_sso_dump,
1105         .dev_start = cn9k_sso_start,
1106         .dev_stop = cn9k_sso_stop,
1107         .dev_close = cn9k_sso_close,
1108         .dev_selftest = cn9k_sso_selftest,
1109 };
1110
1111 static int
1112 cn9k_sso_init(struct rte_eventdev *event_dev)
1113 {
1114         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1115         int rc;
1116
1117         if (RTE_CACHE_LINE_SIZE != 128) {
1118                 plt_err("Driver not compiled for CN9K");
1119                 return -EFAULT;
1120         }
1121
1122         rc = roc_plt_init();
1123         if (rc < 0) {
1124                 plt_err("Failed to initialize platform model");
1125                 return rc;
1126         }
1127
1128         event_dev->dev_ops = &cn9k_sso_dev_ops;
1129         /* For secondary processes, the primary has done all the work */
1130         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1131                 cn9k_sso_fp_fns_set(event_dev);
1132                 return 0;
1133         }
1134
1135         rc = cnxk_sso_init(event_dev);
1136         if (rc < 0)
1137                 return rc;
1138
1139         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1140         if (!dev->max_event_ports || !dev->max_event_queues) {
1141                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1142                         dev->max_event_queues, dev->max_event_ports);
1143                 cnxk_sso_fini(event_dev);
1144                 return -ENODEV;
1145         }
1146
1147         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1148                     event_dev->data->name, dev->max_event_queues,
1149                     dev->max_event_ports);
1150
1151         return 0;
1152 }
1153
1154 static int
1155 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1156 {
1157         return rte_event_pmd_pci_probe(
1158                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1159 }
1160
1161 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1162         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1163         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1164         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1165         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1166         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1167         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1168         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1169         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1170         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1171         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1172         {
1173                 .vendor_id = 0,
1174         },
1175 };
1176
1177 static struct rte_pci_driver cn9k_pci_sso = {
1178         .id_table = cn9k_pci_sso_map,
1179         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1180         .probe = cn9k_sso_probe,
1181         .remove = cnxk_sso_remove,
1182 };
1183
1184 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1185 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1186 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1187 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1188                               CNXK_SSO_GGRP_QOS "=<string>"
1189                               CNXK_SSO_FORCE_BP "=1"
1190                               CN9K_SSO_SINGLE_WS "=1"
1191                               CNXK_TIM_DISABLE_NPA "=1"
1192                               CNXK_TIM_CHNK_SLOTS "=<int>"
1193                               CNXK_TIM_RINGS_LMT "=<int>"
1194                               CNXK_TIM_STATS_ENA "=1");