net/cnxk: add cn9k template Tx functions to build
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
14
15 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
16         enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
17
18 static int
19 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
20 {
21         struct cnxk_sso_evdev *dev = arg;
22         struct cn9k_sso_hws_dual *dws;
23         struct cn9k_sso_hws *ws;
24         int rc;
25
26         if (dev->dual_ws) {
27                 dws = port;
28                 rc = roc_sso_hws_link(&dev->sso,
29                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
30                                       nb_link);
31                 rc |= roc_sso_hws_link(&dev->sso,
32                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
33                                        map, nb_link);
34         } else {
35                 ws = port;
36                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
37         }
38
39         return rc;
40 }
41
42 static int
43 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
44 {
45         struct cnxk_sso_evdev *dev = arg;
46         struct cn9k_sso_hws_dual *dws;
47         struct cn9k_sso_hws *ws;
48         int rc;
49
50         if (dev->dual_ws) {
51                 dws = port;
52                 rc = roc_sso_hws_unlink(&dev->sso,
53                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
54                                         map, nb_link);
55                 rc |= roc_sso_hws_unlink(&dev->sso,
56                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
57                                          map, nb_link);
58         } else {
59                 ws = port;
60                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
61         }
62
63         return rc;
64 }
65
66 static void
67 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
68 {
69         struct cnxk_sso_evdev *dev = arg;
70         struct cn9k_sso_hws_dual *dws;
71         struct cn9k_sso_hws *ws;
72         uint64_t val;
73
74         /* Set get_work tmo for HWS */
75         val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
76         if (dev->dual_ws) {
77                 dws = hws;
78                 dws->grp_base = grp_base;
79                 dws->fc_mem = (uint64_t *)dev->fc_iova;
80                 dws->xaq_lmt = dev->xaq_lmt;
81
82                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
83                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
84         } else {
85                 ws = hws;
86                 ws->grp_base = grp_base;
87                 ws->fc_mem = (uint64_t *)dev->fc_iova;
88                 ws->xaq_lmt = dev->xaq_lmt;
89
90                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
91         }
92 }
93
94 static void
95 cn9k_sso_hws_release(void *arg, void *hws)
96 {
97         struct cnxk_sso_evdev *dev = arg;
98         struct cn9k_sso_hws_dual *dws;
99         struct cn9k_sso_hws *ws;
100         int i;
101
102         if (dev->dual_ws) {
103                 dws = hws;
104                 for (i = 0; i < dev->nb_event_queues; i++) {
105                         roc_sso_hws_unlink(&dev->sso,
106                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
107                                            (uint16_t *)&i, 1);
108                         roc_sso_hws_unlink(&dev->sso,
109                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
110                                            (uint16_t *)&i, 1);
111                 }
112                 memset(dws, 0, sizeof(*dws));
113         } else {
114                 ws = hws;
115                 for (i = 0; i < dev->nb_event_queues; i++)
116                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
117                                            (uint16_t *)&i, 1);
118                 memset(ws, 0, sizeof(*ws));
119         }
120 }
121
122 static void
123 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
124                           cnxk_handle_event_t fn, void *arg)
125 {
126         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
127         struct cn9k_sso_hws_dual *dws;
128         struct cn9k_sso_hws *ws;
129         uint64_t cq_ds_cnt = 1;
130         uint64_t aq_cnt = 1;
131         uint64_t ds_cnt = 1;
132         struct rte_event ev;
133         uintptr_t ws_base;
134         uint64_t val, req;
135
136         plt_write64(0, base + SSO_LF_GGRP_QCTL);
137
138         req = queue_id;     /* GGRP ID */
139         req |= BIT_ULL(18); /* Grouped */
140         req |= BIT_ULL(16); /* WAIT */
141
142         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
143         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
144         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
145         cq_ds_cnt &= 0x3FFF3FFF0000;
146
147         if (dev->dual_ws) {
148                 dws = hws;
149                 ws_base = dws->base[0];
150         } else {
151                 ws = hws;
152                 ws_base = ws->base;
153         }
154
155         while (aq_cnt || cq_ds_cnt || ds_cnt) {
156                 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
157                 cn9k_sso_hws_get_work_empty(ws_base, &ev);
158                 if (fn != NULL && ev.u64 != 0)
159                         fn(arg, ev);
160                 if (ev.sched_type != SSO_TT_EMPTY)
161                         cnxk_sso_hws_swtag_flush(
162                                 ws_base + SSOW_LF_GWS_TAG,
163                                 ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
164                 do {
165                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
166                 } while (val & BIT_ULL(56));
167                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
168                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
169                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
170                 /* Extract cq and ds count */
171                 cq_ds_cnt &= 0x3FFF3FFF0000;
172         }
173
174         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
175 }
176
177 static void
178 cn9k_sso_hws_reset(void *arg, void *hws)
179 {
180         struct cnxk_sso_evdev *dev = arg;
181         struct cn9k_sso_hws_dual *dws;
182         struct cn9k_sso_hws *ws;
183         uint64_t pend_state;
184         uint8_t pend_tt;
185         uintptr_t base;
186         uint64_t tag;
187         uint8_t i;
188
189         dws = hws;
190         ws = hws;
191         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
192                 base = dev->dual_ws ? dws->base[i] : ws->base;
193                 /* Wait till getwork/swtp/waitw/desched completes. */
194                 do {
195                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
196                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
197                                        BIT_ULL(56)));
198
199                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
200                 pend_tt = (tag >> 32) & 0x3;
201                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
202                         if (pend_tt == SSO_TT_ATOMIC ||
203                             pend_tt == SSO_TT_ORDERED)
204                                 cnxk_sso_hws_swtag_untag(
205                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
206                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
207                 }
208
209                 /* Wait for desched to complete. */
210                 do {
211                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
212                 } while (pend_state & BIT_ULL(58));
213         }
214 }
215
216 void
217 cn9k_sso_set_rsrc(void *arg)
218 {
219         struct cnxk_sso_evdev *dev = arg;
220
221         if (dev->dual_ws)
222                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
223         else
224                 dev->max_event_ports = dev->sso.max_hws;
225         dev->max_event_queues =
226                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
227                               RTE_EVENT_MAX_QUEUES_PER_DEV :
228                               dev->sso.max_hwgrp;
229 }
230
231 static int
232 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
233 {
234         struct cnxk_sso_evdev *dev = arg;
235
236         if (dev->dual_ws)
237                 hws = hws * CN9K_DUAL_WS_NB_WS;
238
239         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
240 }
241
242 static int
243 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
244 {
245         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
246         int i;
247
248         if (dev->tx_adptr_data == NULL)
249                 return 0;
250
251         for (i = 0; i < dev->nb_event_ports; i++) {
252                 if (dev->dual_ws) {
253                         struct cn9k_sso_hws_dual *dws =
254                                 event_dev->data->ports[i];
255                         void *ws_cookie;
256
257                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
258                         ws_cookie = rte_realloc_socket(
259                                 ws_cookie,
260                                 sizeof(struct cnxk_sso_hws_cookie) +
261                                         sizeof(struct cn9k_sso_hws_dual) +
262                                         (sizeof(uint64_t) *
263                                          (dev->max_port_id + 1) *
264                                          RTE_MAX_QUEUES_PER_PORT),
265                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
266                         if (ws_cookie == NULL)
267                                 return -ENOMEM;
268                         dws = RTE_PTR_ADD(ws_cookie,
269                                           sizeof(struct cnxk_sso_hws_cookie));
270                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
271                                sizeof(uint64_t) * (dev->max_port_id + 1) *
272                                        RTE_MAX_QUEUES_PER_PORT);
273                         event_dev->data->ports[i] = dws;
274                 } else {
275                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
276                         void *ws_cookie;
277
278                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
279                         ws_cookie = rte_realloc_socket(
280                                 ws_cookie,
281                                 sizeof(struct cnxk_sso_hws_cookie) +
282                                         sizeof(struct cn9k_sso_hws_dual) +
283                                         (sizeof(uint64_t) *
284                                          (dev->max_port_id + 1) *
285                                          RTE_MAX_QUEUES_PER_PORT),
286                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
287                         if (ws_cookie == NULL)
288                                 return -ENOMEM;
289                         ws = RTE_PTR_ADD(ws_cookie,
290                                          sizeof(struct cnxk_sso_hws_cookie));
291                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
292                                sizeof(uint64_t) * (dev->max_port_id + 1) *
293                                        RTE_MAX_QUEUES_PER_PORT);
294                         event_dev->data->ports[i] = ws;
295                 }
296         }
297         rte_mb();
298
299         return 0;
300 }
301
302 static void
303 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
304 {
305         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
306         /* Single WS modes */
307         const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
308 #define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
309                 NIX_RX_FASTPATH_MODES
310 #undef R
311         };
312
313         const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
314 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
315                 NIX_RX_FASTPATH_MODES
316 #undef R
317         };
318
319         const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
320 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
321                 NIX_RX_FASTPATH_MODES
322 #undef R
323         };
324
325         const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
326 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
327                 NIX_RX_FASTPATH_MODES
328 #undef R
329         };
330
331         const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
332 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_##name,
333                 NIX_RX_FASTPATH_MODES
334 #undef R
335         };
336
337         const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
338 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_burst_##name,
339                 NIX_RX_FASTPATH_MODES
340 #undef R
341         };
342
343         const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
344 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_##name,
345                 NIX_RX_FASTPATH_MODES
346 #undef R
347         };
348
349         const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
350 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_burst_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
356 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
362 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
363                 NIX_RX_FASTPATH_MODES
364 #undef R
365         };
366
367         const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
368 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
369                 NIX_RX_FASTPATH_MODES
370 #undef R
371         };
372
373         const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
374 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
375                 NIX_RX_FASTPATH_MODES
376 #undef R
377         };
378
379         const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
380 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
386 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_burst_##name,
387                 NIX_RX_FASTPATH_MODES
388 #undef R
389         };
390
391         const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
392 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_##name,
393                 NIX_RX_FASTPATH_MODES
394 #undef R
395         };
396
397         const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
398 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_burst_##name,
399                 NIX_RX_FASTPATH_MODES
400 #undef R
401         };
402
403         /* Dual WS modes */
404         const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
405 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
406                 NIX_RX_FASTPATH_MODES
407 #undef R
408         };
409
410         const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = {
411 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
412                 NIX_RX_FASTPATH_MODES
413 #undef R
414         };
415
416         const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
417 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
418                 NIX_RX_FASTPATH_MODES
419 #undef R
420         };
421
422         const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
423 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
424                 NIX_RX_FASTPATH_MODES
425 #undef R
426         };
427
428         const event_dequeue_t sso_hws_dual_deq_ca[NIX_RX_OFFLOAD_MAX] = {
429 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_##name,
430                 NIX_RX_FASTPATH_MODES
431 #undef R
432         };
433
434         const event_dequeue_burst_t sso_hws_dual_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
435 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_burst_##name,
436                 NIX_RX_FASTPATH_MODES
437 #undef R
438         };
439
440         const event_dequeue_t sso_hws_dual_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
441 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
447 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_burst_##name,
448                 NIX_RX_FASTPATH_MODES
449 #undef R
450         };
451
452         const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
453 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
454                 NIX_RX_FASTPATH_MODES
455 #undef R
456         };
457
458         const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
459 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
460                 NIX_RX_FASTPATH_MODES
461 #undef R
462         };
463
464         const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
465 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
466                 NIX_RX_FASTPATH_MODES
467 #undef R
468         };
469
470         const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
471 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_t sso_hws_dual_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
477 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_##name,
478                 NIX_RX_FASTPATH_MODES
479 #undef R
480         };
481
482         const event_dequeue_burst_t sso_hws_dual_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
483 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
484                 NIX_RX_FASTPATH_MODES
485 #undef R
486         };
487
488         const event_dequeue_t sso_hws_dual_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
489 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_##name,
490                 NIX_RX_FASTPATH_MODES
491 #undef R
492         };
493
494         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
495 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name,
496                 NIX_RX_FASTPATH_MODES
497 #undef R
498         };
499
500         /* Tx modes */
501         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
502 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_##name,
503                 NIX_TX_FASTPATH_MODES
504 #undef T
505         };
506
507         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
508 #define T(name, sz, flags)[flags] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
509                 NIX_TX_FASTPATH_MODES
510 #undef T
511         };
512
513         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
514 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
515                 NIX_TX_FASTPATH_MODES
516 #undef T
517         };
518
519         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
520 #define T(name, sz, flags)[flags] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
521                 NIX_TX_FASTPATH_MODES
522 #undef T
523         };
524
525         event_dev->enqueue = cn9k_sso_hws_enq;
526         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
527         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
528         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
529         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
530                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
531                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
532                                       sso_hws_deq_seg_burst);
533                 if (dev->is_timeout_deq) {
534                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
535                                               sso_hws_deq_tmo_seg);
536                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
537                                               sso_hws_deq_tmo_seg_burst);
538                 }
539                 if (dev->is_ca_internal_port) {
540                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
541                                               sso_hws_deq_ca_seg);
542                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
543                                               sso_hws_deq_ca_seg_burst);
544                 }
545
546                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
547                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
548                                               sso_hws_deq_tmo_ca_seg);
549                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
550                                               sso_hws_deq_tmo_ca_seg_burst);
551                 }
552         } else {
553                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
554                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
555                                       sso_hws_deq_burst);
556                 if (dev->is_timeout_deq) {
557                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558                                               sso_hws_deq_tmo);
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560                                               sso_hws_deq_tmo_burst);
561                 }
562                 if (dev->is_ca_internal_port) {
563                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
564                                               sso_hws_deq_ca);
565                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
566                                               sso_hws_deq_ca_burst);
567                 }
568
569                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
570                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
571                                               sso_hws_deq_tmo_ca);
572                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
573                                               sso_hws_deq_tmo_ca_burst);
574                 }
575         }
576         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
577
578         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
579                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
580                                       sso_hws_tx_adptr_enq_seg);
581         else
582                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
583                                       sso_hws_tx_adptr_enq);
584
585         if (dev->dual_ws) {
586                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
587                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
588                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
589                 event_dev->enqueue_forward_burst =
590                         cn9k_sso_hws_dual_enq_fwd_burst;
591                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
592
593                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
594                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
595                                               sso_hws_dual_deq_seg);
596                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
597                                               sso_hws_dual_deq_seg_burst);
598                         if (dev->is_timeout_deq) {
599                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
600                                                       sso_hws_dual_deq_tmo_seg);
601                                 CN9K_SET_EVDEV_DEQ_OP(
602                                         dev, event_dev->dequeue_burst,
603                                         sso_hws_dual_deq_tmo_seg_burst);
604                         }
605                         if (dev->is_ca_internal_port) {
606                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
607                                                       sso_hws_dual_deq_ca_seg);
608                                 CN9K_SET_EVDEV_DEQ_OP(
609                                         dev, event_dev->dequeue_burst,
610                                         sso_hws_dual_deq_ca_seg_burst);
611                         }
612                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
613                                 CN9K_SET_EVDEV_DEQ_OP(
614                                         dev, event_dev->dequeue,
615                                         sso_hws_dual_deq_tmo_ca_seg);
616                                 CN9K_SET_EVDEV_DEQ_OP(
617                                         dev, event_dev->dequeue_burst,
618                                         sso_hws_dual_deq_tmo_ca_seg_burst);
619                         }
620                 } else {
621                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
622                                               sso_hws_dual_deq);
623                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
624                                               sso_hws_dual_deq_burst);
625                         if (dev->is_timeout_deq) {
626                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
627                                                       sso_hws_dual_deq_tmo);
628                                 CN9K_SET_EVDEV_DEQ_OP(
629                                         dev, event_dev->dequeue_burst,
630                                         sso_hws_dual_deq_tmo_burst);
631                         }
632                         if (dev->is_ca_internal_port) {
633                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
634                                                       sso_hws_dual_deq_ca);
635                                 CN9K_SET_EVDEV_DEQ_OP(
636                                         dev, event_dev->dequeue_burst,
637                                         sso_hws_dual_deq_ca_burst);
638                         }
639                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
640                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
641                                                       sso_hws_dual_deq_tmo_ca);
642                                 CN9K_SET_EVDEV_DEQ_OP(
643                                         dev, event_dev->dequeue_burst,
644                                         sso_hws_dual_deq_tmo_ca_burst);
645                         }
646                 }
647
648                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
649                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
650                                               sso_hws_dual_tx_adptr_enq_seg);
651                 else
652                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
653                                               sso_hws_dual_tx_adptr_enq);
654         }
655
656         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
657         rte_mb();
658 }
659
660 static void *
661 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
662 {
663         struct cnxk_sso_evdev *dev = arg;
664         struct cn9k_sso_hws_dual *dws;
665         struct cn9k_sso_hws *ws;
666         void *data;
667
668         if (dev->dual_ws) {
669                 dws = rte_zmalloc("cn9k_dual_ws",
670                                   sizeof(struct cn9k_sso_hws_dual) +
671                                           RTE_CACHE_LINE_SIZE,
672                                   RTE_CACHE_LINE_SIZE);
673                 if (dws == NULL) {
674                         plt_err("Failed to alloc memory for port=%d", port_id);
675                         return NULL;
676                 }
677
678                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
679                 dws->base[0] = roc_sso_hws_base_get(
680                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
681                 dws->base[1] = roc_sso_hws_base_get(
682                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
683                 dws->hws_id = port_id;
684                 dws->swtag_req = 0;
685                 dws->vws = 0;
686
687                 data = dws;
688         } else {
689                 /* Allocate event port memory */
690                 ws = rte_zmalloc("cn9k_ws",
691                                  sizeof(struct cn9k_sso_hws) +
692                                          RTE_CACHE_LINE_SIZE,
693                                  RTE_CACHE_LINE_SIZE);
694                 if (ws == NULL) {
695                         plt_err("Failed to alloc memory for port=%d", port_id);
696                         return NULL;
697                 }
698
699                 /* First cache line is reserved for cookie */
700                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
701                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
702                 ws->hws_id = port_id;
703                 ws->swtag_req = 0;
704
705                 data = ws;
706         }
707
708         return data;
709 }
710
711 static void
712 cn9k_sso_info_get(struct rte_eventdev *event_dev,
713                   struct rte_event_dev_info *dev_info)
714 {
715         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
716
717         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
718         cnxk_sso_info_get(dev, dev_info);
719 }
720
721 static int
722 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
723 {
724         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
725         int rc;
726
727         rc = cnxk_sso_dev_validate(event_dev);
728         if (rc < 0) {
729                 plt_err("Invalid event device configuration");
730                 return -EINVAL;
731         }
732
733         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
734         if (rc < 0) {
735                 plt_err("Failed to initialize SSO resources");
736                 return -ENODEV;
737         }
738
739         rc = cnxk_sso_xaq_allocate(dev);
740         if (rc < 0)
741                 goto cnxk_rsrc_fini;
742
743         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
744                                     cn9k_sso_hws_setup);
745         if (rc < 0)
746                 goto cnxk_rsrc_fini;
747
748         /* Restore any prior port-queue mapping. */
749         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
750
751         dev->configured = 1;
752         rte_mb();
753
754         return 0;
755 cnxk_rsrc_fini:
756         roc_sso_rsrc_fini(&dev->sso);
757         dev->nb_event_ports = 0;
758         return rc;
759 }
760
761 static int
762 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
763                     const struct rte_event_port_conf *port_conf)
764 {
765
766         RTE_SET_USED(port_conf);
767         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
768 }
769
770 static void
771 cn9k_sso_port_release(void *port)
772 {
773         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
774         struct cnxk_sso_evdev *dev;
775
776         if (port == NULL)
777                 return;
778
779         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
780         if (!gws_cookie->configured)
781                 goto free;
782
783         cn9k_sso_hws_release(dev, port);
784         memset(gws_cookie, 0, sizeof(*gws_cookie));
785 free:
786         rte_free(gws_cookie);
787 }
788
789 static int
790 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
791                    const uint8_t queues[], const uint8_t priorities[],
792                    uint16_t nb_links)
793 {
794         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
795         uint16_t hwgrp_ids[nb_links];
796         uint16_t link;
797
798         RTE_SET_USED(priorities);
799         for (link = 0; link < nb_links; link++)
800                 hwgrp_ids[link] = queues[link];
801         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
802
803         return (int)nb_links;
804 }
805
806 static int
807 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
808                      uint8_t queues[], uint16_t nb_unlinks)
809 {
810         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
811         uint16_t hwgrp_ids[nb_unlinks];
812         uint16_t unlink;
813
814         for (unlink = 0; unlink < nb_unlinks; unlink++)
815                 hwgrp_ids[unlink] = queues[unlink];
816         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
817
818         return (int)nb_unlinks;
819 }
820
821 static int
822 cn9k_sso_start(struct rte_eventdev *event_dev)
823 {
824         int rc;
825
826         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
827         if (rc < 0)
828                 return rc;
829
830         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
831                             cn9k_sso_hws_flush_events);
832         if (rc < 0)
833                 return rc;
834
835         cn9k_sso_fp_fns_set(event_dev);
836
837         return rc;
838 }
839
840 static void
841 cn9k_sso_stop(struct rte_eventdev *event_dev)
842 {
843         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
844 }
845
846 static int
847 cn9k_sso_close(struct rte_eventdev *event_dev)
848 {
849         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
850 }
851
852 static int
853 cn9k_sso_selftest(void)
854 {
855         return cnxk_sso_selftest(RTE_STR(event_cn9k));
856 }
857
858 static int
859 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
860                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
861 {
862         int rc;
863
864         RTE_SET_USED(event_dev);
865         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
866         if (rc)
867                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
868         else
869                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
870                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
871                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
872
873         return 0;
874 }
875
876 static void
877 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
878                       void *tstmp_info)
879 {
880         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
881         int i;
882
883         for (i = 0; i < dev->nb_event_ports; i++) {
884                 if (dev->dual_ws) {
885                         struct cn9k_sso_hws_dual *dws =
886                                 event_dev->data->ports[i];
887                         dws->lookup_mem = lookup_mem;
888                         dws->tstamp = tstmp_info;
889                 } else {
890                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
891                         ws->lookup_mem = lookup_mem;
892                         ws->tstamp = tstmp_info;
893                 }
894         }
895 }
896
897 static int
898 cn9k_sso_rx_adapter_queue_add(
899         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
900         int32_t rx_queue_id,
901         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
902 {
903         struct cn9k_eth_rxq *rxq;
904         void *lookup_mem;
905         void *tstmp_info;
906         int rc;
907
908         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
909         if (rc)
910                 return -EINVAL;
911
912         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
913                                            queue_conf);
914         if (rc)
915                 return -EINVAL;
916
917         rxq = eth_dev->data->rx_queues[0];
918         lookup_mem = rxq->lookup_mem;
919         tstmp_info = rxq->tstamp;
920         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
921         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
922
923         return 0;
924 }
925
926 static int
927 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
928                               const struct rte_eth_dev *eth_dev,
929                               int32_t rx_queue_id)
930 {
931         int rc;
932
933         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
934         if (rc)
935                 return -EINVAL;
936
937         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
938 }
939
940 static int
941 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
942                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
943 {
944         int ret;
945
946         RTE_SET_USED(dev);
947         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
948         if (ret)
949                 *caps = 0;
950         else
951                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
952
953         return 0;
954 }
955
956 static void
957 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
958                        bool ena)
959 {
960         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
961         struct cn9k_eth_txq *txq;
962         struct roc_nix_sq *sq;
963         int i;
964
965         if (tx_queue_id < 0) {
966                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
967                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
968         } else {
969                 uint16_t sq_limit;
970
971                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
972                 txq = eth_dev->data->tx_queues[tx_queue_id];
973                 sq_limit =
974                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
975                                     sq->nb_sqb_bufs;
976                 txq->nb_sqb_bufs_adj =
977                         sq_limit -
978                         RTE_ALIGN_MUL_CEIL(sq_limit,
979                                            (1ULL << txq->sqes_per_sqb_log2)) /
980                                 (1ULL << txq->sqes_per_sqb_log2);
981                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
982         }
983 }
984
985 static int
986 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
987                               const struct rte_eth_dev *eth_dev,
988                               int32_t tx_queue_id)
989 {
990         int rc;
991
992         RTE_SET_USED(id);
993         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
994         if (rc < 0)
995                 return rc;
996         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
997         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
998         if (rc < 0)
999                 return rc;
1000         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1001
1002         return 0;
1003 }
1004
1005 static int
1006 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1007                               const struct rte_eth_dev *eth_dev,
1008                               int32_t tx_queue_id)
1009 {
1010         int rc;
1011
1012         RTE_SET_USED(id);
1013         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1014         if (rc < 0)
1015                 return rc;
1016         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1017         return cn9k_sso_updt_tx_adptr_data(event_dev);
1018 }
1019
1020 static int
1021 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1022                              const struct rte_cryptodev *cdev, uint32_t *caps)
1023 {
1024         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1025         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1026
1027         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1028                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1029
1030         return 0;
1031 }
1032
1033 static int
1034 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1035                            const struct rte_cryptodev *cdev,
1036                            int32_t queue_pair_id, const struct rte_event *event)
1037 {
1038         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1039
1040         RTE_SET_USED(event);
1041
1042         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1043         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1044
1045         dev->is_ca_internal_port = 1;
1046         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1047
1048         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1049 }
1050
1051 static int
1052 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1053                            const struct rte_cryptodev *cdev,
1054                            int32_t queue_pair_id)
1055 {
1056         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1057         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1058
1059         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1060 }
1061
1062 static struct eventdev_ops cn9k_sso_dev_ops = {
1063         .dev_infos_get = cn9k_sso_info_get,
1064         .dev_configure = cn9k_sso_dev_configure,
1065         .queue_def_conf = cnxk_sso_queue_def_conf,
1066         .queue_setup = cnxk_sso_queue_setup,
1067         .queue_release = cnxk_sso_queue_release,
1068         .port_def_conf = cnxk_sso_port_def_conf,
1069         .port_setup = cn9k_sso_port_setup,
1070         .port_release = cn9k_sso_port_release,
1071         .port_link = cn9k_sso_port_link,
1072         .port_unlink = cn9k_sso_port_unlink,
1073         .timeout_ticks = cnxk_sso_timeout_ticks,
1074
1075         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1076         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1077         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1078         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1079         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1080
1081         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1082         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1083         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1084
1085         .timer_adapter_caps_get = cnxk_tim_caps_get,
1086
1087         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1088         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1089         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1090
1091         .dump = cnxk_sso_dump,
1092         .dev_start = cn9k_sso_start,
1093         .dev_stop = cn9k_sso_stop,
1094         .dev_close = cn9k_sso_close,
1095         .dev_selftest = cn9k_sso_selftest,
1096 };
1097
1098 static int
1099 cn9k_sso_init(struct rte_eventdev *event_dev)
1100 {
1101         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1102         int rc;
1103
1104         if (RTE_CACHE_LINE_SIZE != 128) {
1105                 plt_err("Driver not compiled for CN9K");
1106                 return -EFAULT;
1107         }
1108
1109         rc = roc_plt_init();
1110         if (rc < 0) {
1111                 plt_err("Failed to initialize platform model");
1112                 return rc;
1113         }
1114
1115         event_dev->dev_ops = &cn9k_sso_dev_ops;
1116         /* For secondary processes, the primary has done all the work */
1117         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1118                 cn9k_sso_fp_fns_set(event_dev);
1119                 return 0;
1120         }
1121
1122         rc = cnxk_sso_init(event_dev);
1123         if (rc < 0)
1124                 return rc;
1125
1126         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1127         if (!dev->max_event_ports || !dev->max_event_queues) {
1128                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1129                         dev->max_event_queues, dev->max_event_ports);
1130                 cnxk_sso_fini(event_dev);
1131                 return -ENODEV;
1132         }
1133
1134         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1135                     event_dev->data->name, dev->max_event_queues,
1136                     dev->max_event_ports);
1137
1138         return 0;
1139 }
1140
1141 static int
1142 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1143 {
1144         return rte_event_pmd_pci_probe(
1145                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1146 }
1147
1148 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1149         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1150         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1151         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1152         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1153         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1154         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1155         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1156         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1157         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1158         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1159         {
1160                 .vendor_id = 0,
1161         },
1162 };
1163
1164 static struct rte_pci_driver cn9k_pci_sso = {
1165         .id_table = cn9k_pci_sso_map,
1166         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1167         .probe = cn9k_sso_probe,
1168         .remove = cnxk_sso_remove,
1169 };
1170
1171 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1172 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1173 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1174 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1175                               CNXK_SSO_GGRP_QOS "=<string>"
1176                               CNXK_SSO_FORCE_BP "=1"
1177                               CN9K_SSO_SINGLE_WS "=1"
1178                               CNXK_TIM_DISABLE_NPA "=1"
1179                               CNXK_TIM_CHNK_SLOTS "=<int>"
1180                               CNXK_TIM_RINGS_LMT "=<int>"
1181                               CNXK_TIM_STATS_ENA "=1");