event/cnxk: rework enqueue path
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
19                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20
21 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
22         (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
28                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
29
30 static int
31 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
32 {
33         struct cnxk_sso_evdev *dev = arg;
34         struct cn9k_sso_hws_dual *dws;
35         struct cn9k_sso_hws *ws;
36         int rc;
37
38         if (dev->dual_ws) {
39                 dws = port;
40                 rc = roc_sso_hws_link(&dev->sso,
41                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
42                                       nb_link);
43                 rc |= roc_sso_hws_link(&dev->sso,
44                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
45                                        map, nb_link);
46         } else {
47                 ws = port;
48                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
49         }
50
51         return rc;
52 }
53
54 static int
55 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
56 {
57         struct cnxk_sso_evdev *dev = arg;
58         struct cn9k_sso_hws_dual *dws;
59         struct cn9k_sso_hws *ws;
60         int rc;
61
62         if (dev->dual_ws) {
63                 dws = port;
64                 rc = roc_sso_hws_unlink(&dev->sso,
65                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
66                                         map, nb_link);
67                 rc |= roc_sso_hws_unlink(&dev->sso,
68                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
69                                          map, nb_link);
70         } else {
71                 ws = port;
72                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
73         }
74
75         return rc;
76 }
77
78 static void
79 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
80 {
81         struct cnxk_sso_evdev *dev = arg;
82         struct cn9k_sso_hws_dual *dws;
83         struct cn9k_sso_hws *ws;
84         uint64_t val;
85
86         /* Set get_work tmo for HWS */
87         val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
88         if (dev->dual_ws) {
89                 dws = hws;
90                 dws->grp_base = grp_base;
91                 dws->fc_mem = (uint64_t *)dev->fc_iova;
92                 dws->xaq_lmt = dev->xaq_lmt;
93
94                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
95                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
96         } else {
97                 ws = hws;
98                 ws->grp_base = grp_base;
99                 ws->fc_mem = (uint64_t *)dev->fc_iova;
100                 ws->xaq_lmt = dev->xaq_lmt;
101
102                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
103         }
104 }
105
106 static void
107 cn9k_sso_hws_release(void *arg, void *hws)
108 {
109         struct cnxk_sso_evdev *dev = arg;
110         struct cn9k_sso_hws_dual *dws;
111         struct cn9k_sso_hws *ws;
112         int i;
113
114         if (dev->dual_ws) {
115                 dws = hws;
116                 for (i = 0; i < dev->nb_event_queues; i++) {
117                         roc_sso_hws_unlink(&dev->sso,
118                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
119                                            (uint16_t *)&i, 1);
120                         roc_sso_hws_unlink(&dev->sso,
121                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
122                                            (uint16_t *)&i, 1);
123                 }
124                 memset(dws, 0, sizeof(*dws));
125         } else {
126                 ws = hws;
127                 for (i = 0; i < dev->nb_event_queues; i++)
128                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
129                                            (uint16_t *)&i, 1);
130                 memset(ws, 0, sizeof(*ws));
131         }
132 }
133
134 static void
135 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
136                           cnxk_handle_event_t fn, void *arg)
137 {
138         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
139         struct cn9k_sso_hws_dual *dws;
140         struct cn9k_sso_hws *ws;
141         uint64_t cq_ds_cnt = 1;
142         uint64_t aq_cnt = 1;
143         uint64_t ds_cnt = 1;
144         struct rte_event ev;
145         uintptr_t ws_base;
146         uint64_t val, req;
147
148         plt_write64(0, base + SSO_LF_GGRP_QCTL);
149
150         req = queue_id;     /* GGRP ID */
151         req |= BIT_ULL(18); /* Grouped */
152         req |= BIT_ULL(16); /* WAIT */
153
154         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
155         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
156         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
157         cq_ds_cnt &= 0x3FFF3FFF0000;
158
159         if (dev->dual_ws) {
160                 dws = hws;
161                 ws_base = dws->base[0];
162         } else {
163                 ws = hws;
164                 ws_base = ws->base;
165         }
166
167         while (aq_cnt || cq_ds_cnt || ds_cnt) {
168                 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
169                 cn9k_sso_hws_get_work_empty(ws_base, &ev);
170                 if (fn != NULL && ev.u64 != 0)
171                         fn(arg, ev);
172                 if (ev.sched_type != SSO_TT_EMPTY)
173                         cnxk_sso_hws_swtag_flush(
174                                 ws_base + SSOW_LF_GWS_TAG,
175                                 ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
176                 do {
177                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
178                 } while (val & BIT_ULL(56));
179                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
180                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
181                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
182                 /* Extract cq and ds count */
183                 cq_ds_cnt &= 0x3FFF3FFF0000;
184         }
185
186         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
187 }
188
189 static void
190 cn9k_sso_hws_reset(void *arg, void *hws)
191 {
192         struct cnxk_sso_evdev *dev = arg;
193         struct cn9k_sso_hws_dual *dws;
194         struct cn9k_sso_hws *ws;
195         uint64_t pend_state;
196         uint8_t pend_tt;
197         uintptr_t base;
198         uint64_t tag;
199         uint8_t i;
200
201         dws = hws;
202         ws = hws;
203         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
204                 base = dev->dual_ws ? dws->base[i] : ws->base;
205                 /* Wait till getwork/swtp/waitw/desched completes. */
206                 do {
207                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
208                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
209                                        BIT_ULL(56)));
210
211                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
212                 pend_tt = (tag >> 32) & 0x3;
213                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
214                         if (pend_tt == SSO_TT_ATOMIC ||
215                             pend_tt == SSO_TT_ORDERED)
216                                 cnxk_sso_hws_swtag_untag(
217                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
218                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
219                 }
220
221                 /* Wait for desched to complete. */
222                 do {
223                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
224                 } while (pend_state & BIT_ULL(58));
225         }
226 }
227
228 void
229 cn9k_sso_set_rsrc(void *arg)
230 {
231         struct cnxk_sso_evdev *dev = arg;
232
233         if (dev->dual_ws)
234                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
235         else
236                 dev->max_event_ports = dev->sso.max_hws;
237         dev->max_event_queues =
238                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
239                               RTE_EVENT_MAX_QUEUES_PER_DEV :
240                               dev->sso.max_hwgrp;
241 }
242
243 static int
244 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
245 {
246         struct cnxk_sso_evdev *dev = arg;
247
248         if (dev->dual_ws)
249                 hws = hws * CN9K_DUAL_WS_NB_WS;
250
251         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
252 }
253
254 static int
255 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
256 {
257         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
258         int i;
259
260         if (dev->tx_adptr_data == NULL)
261                 return 0;
262
263         for (i = 0; i < dev->nb_event_ports; i++) {
264                 if (dev->dual_ws) {
265                         struct cn9k_sso_hws_dual *dws =
266                                 event_dev->data->ports[i];
267                         void *ws_cookie;
268
269                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
270                         ws_cookie = rte_realloc_socket(
271                                 ws_cookie,
272                                 sizeof(struct cnxk_sso_hws_cookie) +
273                                         sizeof(struct cn9k_sso_hws_dual) +
274                                         (sizeof(uint64_t) *
275                                          (dev->max_port_id + 1) *
276                                          RTE_MAX_QUEUES_PER_PORT),
277                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
278                         if (ws_cookie == NULL)
279                                 return -ENOMEM;
280                         dws = RTE_PTR_ADD(ws_cookie,
281                                           sizeof(struct cnxk_sso_hws_cookie));
282                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
283                                sizeof(uint64_t) * (dev->max_port_id + 1) *
284                                        RTE_MAX_QUEUES_PER_PORT);
285                         event_dev->data->ports[i] = dws;
286                 } else {
287                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
288                         void *ws_cookie;
289
290                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
291                         ws_cookie = rte_realloc_socket(
292                                 ws_cookie,
293                                 sizeof(struct cnxk_sso_hws_cookie) +
294                                         sizeof(struct cn9k_sso_hws_dual) +
295                                         (sizeof(uint64_t) *
296                                          (dev->max_port_id + 1) *
297                                          RTE_MAX_QUEUES_PER_PORT),
298                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
299                         if (ws_cookie == NULL)
300                                 return -ENOMEM;
301                         ws = RTE_PTR_ADD(ws_cookie,
302                                          sizeof(struct cnxk_sso_hws_cookie));
303                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
304                                sizeof(uint64_t) * (dev->max_port_id + 1) *
305                                        RTE_MAX_QUEUES_PER_PORT);
306                         event_dev->data->ports[i] = ws;
307                 }
308         }
309         rte_mb();
310
311         return 0;
312 }
313
314 static void
315 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
316 {
317         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
318         /* Single WS modes */
319         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
320 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
321         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
322                 NIX_RX_FASTPATH_MODES
323 #undef R
324         };
325
326         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
327 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
328         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
329                 NIX_RX_FASTPATH_MODES
330 #undef R
331         };
332
333         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
334 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
335         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
336                 NIX_RX_FASTPATH_MODES
337 #undef R
338         };
339
340         const event_dequeue_burst_t
341                 sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
342 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
343         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
344                 NIX_RX_FASTPATH_MODES
345 #undef R
346         };
347
348         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
349 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
350         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t
356                 sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
357 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
358         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
359                 NIX_RX_FASTPATH_MODES
360 #undef R
361         };
362
363         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
364 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
365         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
366                 NIX_RX_FASTPATH_MODES
367 #undef R
368         };
369
370         const event_dequeue_burst_t
371                 sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
372 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
373         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
374                 NIX_RX_FASTPATH_MODES
375 #undef R
376         };
377
378         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
379 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
380         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_burst_t
386                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
387 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
388         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
389                         NIX_RX_FASTPATH_MODES
390 #undef R
391         };
392
393         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
394 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
395         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
396                 NIX_RX_FASTPATH_MODES
397 #undef R
398         };
399
400         const event_dequeue_burst_t
401                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
402 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
403         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
404                         NIX_RX_FASTPATH_MODES
405 #undef R
406         };
407
408         /* Dual WS modes */
409         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2][2] = {
410 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
411         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
412                 NIX_RX_FASTPATH_MODES
413 #undef R
414         };
415
416         const event_dequeue_burst_t
417                 sso_hws_dual_deq_burst[2][2][2][2][2][2][2] = {
418 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
419         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
420                 NIX_RX_FASTPATH_MODES
421 #undef R
422         };
423
424         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2][2] = {
425 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
426         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
427                 NIX_RX_FASTPATH_MODES
428 #undef R
429         };
430
431         const event_dequeue_burst_t
432                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2][2] = {
433 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
434         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
435                         NIX_RX_FASTPATH_MODES
436 #undef R
437         };
438
439         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2][2] = {
440 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
441         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_burst_t
447                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2][2] = {
448 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
449         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
450                         NIX_RX_FASTPATH_MODES
451 #undef R
452         };
453
454         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2][2] = {
455 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
456         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
457                 NIX_RX_FASTPATH_MODES
458 #undef R
459         };
460
461         const event_dequeue_burst_t
462                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
463 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
464         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
465                         NIX_RX_FASTPATH_MODES
466 #undef R
467                 };
468
469         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2][2] = {
470 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
471         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t
477                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
478 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
479         [f6][f5][f4][f3][f2][f1][f0] =                                         \
480                         cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
481                         NIX_RX_FASTPATH_MODES
482 #undef R
483                 };
484
485         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2][2] = {
486 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
487         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
488                 NIX_RX_FASTPATH_MODES
489 #undef R
490         };
491
492         const event_dequeue_burst_t
493                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
494 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
495         [f6][f5][f4][f3][f2][f1][f0] =                                         \
496                         cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
497                         NIX_RX_FASTPATH_MODES
498 #undef R
499         };
500
501         /* Tx modes */
502         const event_tx_adapter_enqueue_t
503                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
504 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
505         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
506                         NIX_TX_FASTPATH_MODES
507 #undef T
508                 };
509
510         const event_tx_adapter_enqueue_t
511                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
512 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
513         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
514                         NIX_TX_FASTPATH_MODES
515 #undef T
516                 };
517
518         const event_tx_adapter_enqueue_t
519                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
520 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
521         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
522                         NIX_TX_FASTPATH_MODES
523 #undef T
524                 };
525
526         const event_tx_adapter_enqueue_t
527                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
528 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
529         [f6][f5][f4][f3][f2][f1][f0] =                                         \
530                         cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
531                         NIX_TX_FASTPATH_MODES
532 #undef T
533                 };
534
535         event_dev->enqueue = cn9k_sso_hws_enq;
536         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
537         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
538         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
539         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
540                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
541                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
542                                       sso_hws_deq_seg_burst);
543                 if (dev->is_timeout_deq) {
544                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
545                                               sso_hws_deq_tmo_seg);
546                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
547                                               sso_hws_deq_tmo_seg_burst);
548                 }
549                 if (dev->is_ca_internal_port) {
550                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
551                                               sso_hws_deq_ca_seg);
552                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
553                                               sso_hws_deq_ca_seg_burst);
554                 }
555         } else {
556                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
557                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
558                                       sso_hws_deq_burst);
559                 if (dev->is_timeout_deq) {
560                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
561                                               sso_hws_deq_tmo);
562                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
563                                               sso_hws_deq_tmo_burst);
564                 }
565                 if (dev->is_ca_internal_port) {
566                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
567                                               sso_hws_deq_ca);
568                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
569                                               sso_hws_deq_ca_burst);
570                 }
571         }
572         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
573
574         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
575                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
576                                       sso_hws_tx_adptr_enq_seg);
577         else
578                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
579                                       sso_hws_tx_adptr_enq);
580
581         if (dev->dual_ws) {
582                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
583                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
584                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
585                 event_dev->enqueue_forward_burst =
586                         cn9k_sso_hws_dual_enq_fwd_burst;
587                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
588
589                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
590                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
591                                               sso_hws_dual_deq_seg);
592                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
593                                               sso_hws_dual_deq_seg_burst);
594                         if (dev->is_timeout_deq) {
595                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
596                                                       sso_hws_dual_deq_tmo_seg);
597                                 CN9K_SET_EVDEV_DEQ_OP(
598                                         dev, event_dev->dequeue_burst,
599                                         sso_hws_dual_deq_tmo_seg_burst);
600                         }
601                         if (dev->is_ca_internal_port) {
602                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
603                                                       sso_hws_dual_deq_ca_seg);
604                                 CN9K_SET_EVDEV_DEQ_OP(
605                                         dev, event_dev->dequeue_burst,
606                                         sso_hws_dual_deq_ca_seg_burst);
607                         }
608                 } else {
609                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
610                                               sso_hws_dual_deq);
611                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
612                                               sso_hws_dual_deq_burst);
613                         if (dev->is_timeout_deq) {
614                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
615                                                       sso_hws_dual_deq_tmo);
616                                 CN9K_SET_EVDEV_DEQ_OP(
617                                         dev, event_dev->dequeue_burst,
618                                         sso_hws_dual_deq_tmo_burst);
619                         }
620                         if (dev->is_ca_internal_port) {
621                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
622                                                       sso_hws_dual_deq_ca);
623                                 CN9K_SET_EVDEV_DEQ_OP(
624                                         dev, event_dev->dequeue_burst,
625                                         sso_hws_dual_deq_ca_burst);
626                         }
627                 }
628
629                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
630                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
631                                               sso_hws_dual_tx_adptr_enq_seg);
632                 else
633                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
634                                               sso_hws_dual_tx_adptr_enq);
635         }
636
637         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
638         rte_mb();
639 }
640
641 static void *
642 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
643 {
644         struct cnxk_sso_evdev *dev = arg;
645         struct cn9k_sso_hws_dual *dws;
646         struct cn9k_sso_hws *ws;
647         void *data;
648
649         if (dev->dual_ws) {
650                 dws = rte_zmalloc("cn9k_dual_ws",
651                                   sizeof(struct cn9k_sso_hws_dual) +
652                                           RTE_CACHE_LINE_SIZE,
653                                   RTE_CACHE_LINE_SIZE);
654                 if (dws == NULL) {
655                         plt_err("Failed to alloc memory for port=%d", port_id);
656                         return NULL;
657                 }
658
659                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
660                 dws->base[0] = roc_sso_hws_base_get(
661                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
662                 dws->base[1] = roc_sso_hws_base_get(
663                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
664                 dws->hws_id = port_id;
665                 dws->swtag_req = 0;
666                 dws->vws = 0;
667
668                 data = dws;
669         } else {
670                 /* Allocate event port memory */
671                 ws = rte_zmalloc("cn9k_ws",
672                                  sizeof(struct cn9k_sso_hws) +
673                                          RTE_CACHE_LINE_SIZE,
674                                  RTE_CACHE_LINE_SIZE);
675                 if (ws == NULL) {
676                         plt_err("Failed to alloc memory for port=%d", port_id);
677                         return NULL;
678                 }
679
680                 /* First cache line is reserved for cookie */
681                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
682                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
683                 ws->hws_id = port_id;
684                 ws->swtag_req = 0;
685
686                 data = ws;
687         }
688
689         return data;
690 }
691
692 static void
693 cn9k_sso_info_get(struct rte_eventdev *event_dev,
694                   struct rte_event_dev_info *dev_info)
695 {
696         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
697
698         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
699         cnxk_sso_info_get(dev, dev_info);
700 }
701
702 static int
703 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
704 {
705         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
706         int rc;
707
708         rc = cnxk_sso_dev_validate(event_dev);
709         if (rc < 0) {
710                 plt_err("Invalid event device configuration");
711                 return -EINVAL;
712         }
713
714         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
715         if (rc < 0) {
716                 plt_err("Failed to initialize SSO resources");
717                 return -ENODEV;
718         }
719
720         rc = cnxk_sso_xaq_allocate(dev);
721         if (rc < 0)
722                 goto cnxk_rsrc_fini;
723
724         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
725                                     cn9k_sso_hws_setup);
726         if (rc < 0)
727                 goto cnxk_rsrc_fini;
728
729         /* Restore any prior port-queue mapping. */
730         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
731
732         dev->configured = 1;
733         rte_mb();
734
735         return 0;
736 cnxk_rsrc_fini:
737         roc_sso_rsrc_fini(&dev->sso);
738         dev->nb_event_ports = 0;
739         return rc;
740 }
741
742 static int
743 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
744                     const struct rte_event_port_conf *port_conf)
745 {
746
747         RTE_SET_USED(port_conf);
748         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
749 }
750
751 static void
752 cn9k_sso_port_release(void *port)
753 {
754         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
755         struct cnxk_sso_evdev *dev;
756
757         if (port == NULL)
758                 return;
759
760         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
761         if (!gws_cookie->configured)
762                 goto free;
763
764         cn9k_sso_hws_release(dev, port);
765         memset(gws_cookie, 0, sizeof(*gws_cookie));
766 free:
767         rte_free(gws_cookie);
768 }
769
770 static int
771 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
772                    const uint8_t queues[], const uint8_t priorities[],
773                    uint16_t nb_links)
774 {
775         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
776         uint16_t hwgrp_ids[nb_links];
777         uint16_t link;
778
779         RTE_SET_USED(priorities);
780         for (link = 0; link < nb_links; link++)
781                 hwgrp_ids[link] = queues[link];
782         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
783
784         return (int)nb_links;
785 }
786
787 static int
788 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
789                      uint8_t queues[], uint16_t nb_unlinks)
790 {
791         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
792         uint16_t hwgrp_ids[nb_unlinks];
793         uint16_t unlink;
794
795         for (unlink = 0; unlink < nb_unlinks; unlink++)
796                 hwgrp_ids[unlink] = queues[unlink];
797         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
798
799         return (int)nb_unlinks;
800 }
801
802 static int
803 cn9k_sso_start(struct rte_eventdev *event_dev)
804 {
805         int rc;
806
807         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
808         if (rc < 0)
809                 return rc;
810
811         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
812                             cn9k_sso_hws_flush_events);
813         if (rc < 0)
814                 return rc;
815
816         cn9k_sso_fp_fns_set(event_dev);
817
818         return rc;
819 }
820
821 static void
822 cn9k_sso_stop(struct rte_eventdev *event_dev)
823 {
824         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
825 }
826
827 static int
828 cn9k_sso_close(struct rte_eventdev *event_dev)
829 {
830         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
831 }
832
833 static int
834 cn9k_sso_selftest(void)
835 {
836         return cnxk_sso_selftest(RTE_STR(event_cn9k));
837 }
838
839 static int
840 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
841                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
842 {
843         int rc;
844
845         RTE_SET_USED(event_dev);
846         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
847         if (rc)
848                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
849         else
850                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
851                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
852                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
853
854         return 0;
855 }
856
857 static void
858 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
859                       void *tstmp_info)
860 {
861         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
862         int i;
863
864         for (i = 0; i < dev->nb_event_ports; i++) {
865                 if (dev->dual_ws) {
866                         struct cn9k_sso_hws_dual *dws =
867                                 event_dev->data->ports[i];
868                         dws->lookup_mem = lookup_mem;
869                         dws->tstamp = tstmp_info;
870                 } else {
871                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
872                         ws->lookup_mem = lookup_mem;
873                         ws->tstamp = tstmp_info;
874                 }
875         }
876 }
877
878 static int
879 cn9k_sso_rx_adapter_queue_add(
880         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
881         int32_t rx_queue_id,
882         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
883 {
884         struct cn9k_eth_rxq *rxq;
885         void *lookup_mem;
886         void *tstmp_info;
887         int rc;
888
889         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
890         if (rc)
891                 return -EINVAL;
892
893         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
894                                            queue_conf);
895         if (rc)
896                 return -EINVAL;
897
898         rxq = eth_dev->data->rx_queues[0];
899         lookup_mem = rxq->lookup_mem;
900         tstmp_info = rxq->tstamp;
901         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
902         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
903
904         return 0;
905 }
906
907 static int
908 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
909                               const struct rte_eth_dev *eth_dev,
910                               int32_t rx_queue_id)
911 {
912         int rc;
913
914         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
915         if (rc)
916                 return -EINVAL;
917
918         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
919 }
920
921 static int
922 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
923                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
924 {
925         int ret;
926
927         RTE_SET_USED(dev);
928         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
929         if (ret)
930                 *caps = 0;
931         else
932                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
933
934         return 0;
935 }
936
937 static void
938 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
939                        bool ena)
940 {
941         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
942         struct cn9k_eth_txq *txq;
943         struct roc_nix_sq *sq;
944         int i;
945
946         if (tx_queue_id < 0) {
947                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
948                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
949         } else {
950                 uint16_t sq_limit;
951
952                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
953                 txq = eth_dev->data->tx_queues[tx_queue_id];
954                 sq_limit =
955                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
956                                     sq->nb_sqb_bufs;
957                 txq->nb_sqb_bufs_adj =
958                         sq_limit -
959                         RTE_ALIGN_MUL_CEIL(sq_limit,
960                                            (1ULL << txq->sqes_per_sqb_log2)) /
961                                 (1ULL << txq->sqes_per_sqb_log2);
962                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
963         }
964 }
965
966 static int
967 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
968                               const struct rte_eth_dev *eth_dev,
969                               int32_t tx_queue_id)
970 {
971         int rc;
972
973         RTE_SET_USED(id);
974         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
975         if (rc < 0)
976                 return rc;
977         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
978         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
979         if (rc < 0)
980                 return rc;
981         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
982
983         return 0;
984 }
985
986 static int
987 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
988                               const struct rte_eth_dev *eth_dev,
989                               int32_t tx_queue_id)
990 {
991         int rc;
992
993         RTE_SET_USED(id);
994         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
995         if (rc < 0)
996                 return rc;
997         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
998         return cn9k_sso_updt_tx_adptr_data(event_dev);
999 }
1000
1001 static int
1002 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1003                              const struct rte_cryptodev *cdev, uint32_t *caps)
1004 {
1005         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1006         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1007
1008         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1009                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1010
1011         return 0;
1012 }
1013
1014 static int
1015 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1016                            const struct rte_cryptodev *cdev,
1017                            int32_t queue_pair_id, const struct rte_event *event)
1018 {
1019         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1020
1021         RTE_SET_USED(event);
1022
1023         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1024         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1025
1026         dev->is_ca_internal_port = 1;
1027         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1028
1029         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1030 }
1031
1032 static int
1033 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1034                            const struct rte_cryptodev *cdev,
1035                            int32_t queue_pair_id)
1036 {
1037         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1038         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1039
1040         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1041 }
1042
1043 static struct eventdev_ops cn9k_sso_dev_ops = {
1044         .dev_infos_get = cn9k_sso_info_get,
1045         .dev_configure = cn9k_sso_dev_configure,
1046         .queue_def_conf = cnxk_sso_queue_def_conf,
1047         .queue_setup = cnxk_sso_queue_setup,
1048         .queue_release = cnxk_sso_queue_release,
1049         .port_def_conf = cnxk_sso_port_def_conf,
1050         .port_setup = cn9k_sso_port_setup,
1051         .port_release = cn9k_sso_port_release,
1052         .port_link = cn9k_sso_port_link,
1053         .port_unlink = cn9k_sso_port_unlink,
1054         .timeout_ticks = cnxk_sso_timeout_ticks,
1055
1056         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1057         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1058         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1059         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1060         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1061
1062         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1063         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1064         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1065
1066         .timer_adapter_caps_get = cnxk_tim_caps_get,
1067
1068         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1069         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1070         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1071
1072         .dump = cnxk_sso_dump,
1073         .dev_start = cn9k_sso_start,
1074         .dev_stop = cn9k_sso_stop,
1075         .dev_close = cn9k_sso_close,
1076         .dev_selftest = cn9k_sso_selftest,
1077 };
1078
1079 static int
1080 cn9k_sso_init(struct rte_eventdev *event_dev)
1081 {
1082         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1083         int rc;
1084
1085         if (RTE_CACHE_LINE_SIZE != 128) {
1086                 plt_err("Driver not compiled for CN9K");
1087                 return -EFAULT;
1088         }
1089
1090         rc = roc_plt_init();
1091         if (rc < 0) {
1092                 plt_err("Failed to initialize platform model");
1093                 return rc;
1094         }
1095
1096         event_dev->dev_ops = &cn9k_sso_dev_ops;
1097         /* For secondary processes, the primary has done all the work */
1098         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1099                 cn9k_sso_fp_fns_set(event_dev);
1100                 return 0;
1101         }
1102
1103         rc = cnxk_sso_init(event_dev);
1104         if (rc < 0)
1105                 return rc;
1106
1107         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1108         if (!dev->max_event_ports || !dev->max_event_queues) {
1109                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1110                         dev->max_event_queues, dev->max_event_ports);
1111                 cnxk_sso_fini(event_dev);
1112                 return -ENODEV;
1113         }
1114
1115         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1116                     event_dev->data->name, dev->max_event_queues,
1117                     dev->max_event_ports);
1118
1119         return 0;
1120 }
1121
1122 static int
1123 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1124 {
1125         return rte_event_pmd_pci_probe(
1126                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1127 }
1128
1129 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1130         {
1131                 .vendor_id = 0,
1132         },
1133 };
1134
1135 static struct rte_pci_driver cn9k_pci_sso = {
1136         .id_table = cn9k_pci_sso_map,
1137         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1138         .probe = cn9k_sso_probe,
1139         .remove = cnxk_sso_remove,
1140 };
1141
1142 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1143 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1144 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1145 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1146                               CNXK_SSO_GGRP_QOS "=<string>"
1147                               CNXK_SSO_FORCE_BP "=1"
1148                               CN9K_SSO_SINGLE_WS "=1"
1149                               CNXK_TIM_DISABLE_NPA "=1"
1150                               CNXK_TIM_CHNK_SLOTS "=<int>"
1151                               CNXK_TIM_RINGS_LMT "=<int>"
1152                               CNXK_TIM_STATS_ENA "=1");