net/cnxk: support inline security setup for cn10k
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
19
20 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
21         (enq_op =                                                              \
22                  enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
28
29 static void
30 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
31 {
32         ws->tag_op = base + SSOW_LF_GWS_TAG;
33         ws->wqp_op = base + SSOW_LF_GWS_WQP;
34         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
35         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
36         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
37         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
38 }
39
40 static int
41 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
42 {
43         struct cnxk_sso_evdev *dev = arg;
44         struct cn9k_sso_hws_dual *dws;
45         struct cn9k_sso_hws *ws;
46         int rc;
47
48         if (dev->dual_ws) {
49                 dws = port;
50                 rc = roc_sso_hws_link(&dev->sso,
51                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
52                                       nb_link);
53                 rc |= roc_sso_hws_link(&dev->sso,
54                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
55                                        map, nb_link);
56         } else {
57                 ws = port;
58                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
59         }
60
61         return rc;
62 }
63
64 static int
65 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
66 {
67         struct cnxk_sso_evdev *dev = arg;
68         struct cn9k_sso_hws_dual *dws;
69         struct cn9k_sso_hws *ws;
70         int rc;
71
72         if (dev->dual_ws) {
73                 dws = port;
74                 rc = roc_sso_hws_unlink(&dev->sso,
75                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
76                                         map, nb_link);
77                 rc |= roc_sso_hws_unlink(&dev->sso,
78                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
79                                          map, nb_link);
80         } else {
81                 ws = port;
82                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
83         }
84
85         return rc;
86 }
87
88 static void
89 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
90 {
91         struct cnxk_sso_evdev *dev = arg;
92         struct cn9k_sso_hws_dual *dws;
93         struct cn9k_sso_hws *ws;
94         uint64_t val;
95
96         /* Set get_work tmo for HWS */
97         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
98         if (dev->dual_ws) {
99                 dws = hws;
100                 rte_memcpy(dws->grps_base, grps_base,
101                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
102                 dws->fc_mem = dev->fc_mem;
103                 dws->xaq_lmt = dev->xaq_lmt;
104
105                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
106                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
107         } else {
108                 ws = hws;
109                 rte_memcpy(ws->grps_base, grps_base,
110                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
111                 ws->fc_mem = dev->fc_mem;
112                 ws->xaq_lmt = dev->xaq_lmt;
113
114                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
115         }
116 }
117
118 static void
119 cn9k_sso_hws_release(void *arg, void *hws)
120 {
121         struct cnxk_sso_evdev *dev = arg;
122         struct cn9k_sso_hws_dual *dws;
123         struct cn9k_sso_hws *ws;
124         int i;
125
126         if (dev->dual_ws) {
127                 dws = hws;
128                 for (i = 0; i < dev->nb_event_queues; i++) {
129                         roc_sso_hws_unlink(&dev->sso,
130                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
131                                            (uint16_t *)&i, 1);
132                         roc_sso_hws_unlink(&dev->sso,
133                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
134                                            (uint16_t *)&i, 1);
135                 }
136                 memset(dws, 0, sizeof(*dws));
137         } else {
138                 ws = hws;
139                 for (i = 0; i < dev->nb_event_queues; i++)
140                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
141                                            (uint16_t *)&i, 1);
142                 memset(ws, 0, sizeof(*ws));
143         }
144 }
145
146 static void
147 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
148                           cnxk_handle_event_t fn, void *arg)
149 {
150         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
151         struct cn9k_sso_hws_dual *dws;
152         struct cn9k_sso_hws_state *st;
153         struct cn9k_sso_hws *ws;
154         uint64_t cq_ds_cnt = 1;
155         uint64_t aq_cnt = 1;
156         uint64_t ds_cnt = 1;
157         struct rte_event ev;
158         uintptr_t ws_base;
159         uint64_t val, req;
160
161         plt_write64(0, base + SSO_LF_GGRP_QCTL);
162
163         req = queue_id;     /* GGRP ID */
164         req |= BIT_ULL(18); /* Grouped */
165         req |= BIT_ULL(16); /* WAIT */
166
167         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
168         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
169         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
170         cq_ds_cnt &= 0x3FFF3FFF0000;
171
172         if (dev->dual_ws) {
173                 dws = hws;
174                 st = &dws->ws_state[0];
175                 ws_base = dws->base[0];
176         } else {
177                 ws = hws;
178                 st = (struct cn9k_sso_hws_state *)ws;
179                 ws_base = ws->base;
180         }
181
182         while (aq_cnt || cq_ds_cnt || ds_cnt) {
183                 plt_write64(req, st->getwrk_op);
184                 cn9k_sso_hws_get_work_empty(st, &ev);
185                 if (fn != NULL && ev.u64 != 0)
186                         fn(arg, ev);
187                 if (ev.sched_type != SSO_TT_EMPTY)
188                         cnxk_sso_hws_swtag_flush(st->tag_op,
189                                                  st->swtag_flush_op);
190                 do {
191                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
192                 } while (val & BIT_ULL(56));
193                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
194                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
195                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
196                 /* Extract cq and ds count */
197                 cq_ds_cnt &= 0x3FFF3FFF0000;
198         }
199
200         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
201 }
202
203 static void
204 cn9k_sso_hws_reset(void *arg, void *hws)
205 {
206         struct cnxk_sso_evdev *dev = arg;
207         struct cn9k_sso_hws_dual *dws;
208         struct cn9k_sso_hws *ws;
209         uint64_t pend_state;
210         uint8_t pend_tt;
211         uintptr_t base;
212         uint64_t tag;
213         uint8_t i;
214
215         dws = hws;
216         ws = hws;
217         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
218                 base = dev->dual_ws ? dws->base[i] : ws->base;
219                 /* Wait till getwork/swtp/waitw/desched completes. */
220                 do {
221                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
222                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
223                                        BIT_ULL(56)));
224
225                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
226                 pend_tt = (tag >> 32) & 0x3;
227                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
228                         if (pend_tt == SSO_TT_ATOMIC ||
229                             pend_tt == SSO_TT_ORDERED)
230                                 cnxk_sso_hws_swtag_untag(
231                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
232                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
233                 }
234
235                 /* Wait for desched to complete. */
236                 do {
237                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
238                 } while (pend_state & BIT_ULL(58));
239         }
240 }
241
242 void
243 cn9k_sso_set_rsrc(void *arg)
244 {
245         struct cnxk_sso_evdev *dev = arg;
246
247         if (dev->dual_ws)
248                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
249         else
250                 dev->max_event_ports = dev->sso.max_hws;
251         dev->max_event_queues =
252                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
253                               RTE_EVENT_MAX_QUEUES_PER_DEV :
254                               dev->sso.max_hwgrp;
255 }
256
257 static int
258 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
259 {
260         struct cnxk_sso_evdev *dev = arg;
261
262         if (dev->dual_ws)
263                 hws = hws * CN9K_DUAL_WS_NB_WS;
264
265         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
266 }
267
268 static int
269 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
270 {
271         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
272         int i;
273
274         if (dev->tx_adptr_data == NULL)
275                 return 0;
276
277         for (i = 0; i < dev->nb_event_ports; i++) {
278                 if (dev->dual_ws) {
279                         struct cn9k_sso_hws_dual *dws =
280                                 event_dev->data->ports[i];
281                         void *ws_cookie;
282
283                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
284                         ws_cookie = rte_realloc_socket(
285                                 ws_cookie,
286                                 sizeof(struct cnxk_sso_hws_cookie) +
287                                         sizeof(struct cn9k_sso_hws_dual) +
288                                         (sizeof(uint64_t) *
289                                          (dev->max_port_id + 1) *
290                                          RTE_MAX_QUEUES_PER_PORT),
291                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
292                         if (ws_cookie == NULL)
293                                 return -ENOMEM;
294                         dws = RTE_PTR_ADD(ws_cookie,
295                                           sizeof(struct cnxk_sso_hws_cookie));
296                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
297                                sizeof(uint64_t) * (dev->max_port_id + 1) *
298                                        RTE_MAX_QUEUES_PER_PORT);
299                         event_dev->data->ports[i] = dws;
300                 } else {
301                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
302                         void *ws_cookie;
303
304                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
305                         ws_cookie = rte_realloc_socket(
306                                 ws_cookie,
307                                 sizeof(struct cnxk_sso_hws_cookie) +
308                                         sizeof(struct cn9k_sso_hws_dual) +
309                                         (sizeof(uint64_t) *
310                                          (dev->max_port_id + 1) *
311                                          RTE_MAX_QUEUES_PER_PORT),
312                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
313                         if (ws_cookie == NULL)
314                                 return -ENOMEM;
315                         ws = RTE_PTR_ADD(ws_cookie,
316                                          sizeof(struct cnxk_sso_hws_cookie));
317                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
318                                sizeof(uint64_t) * (dev->max_port_id + 1) *
319                                        RTE_MAX_QUEUES_PER_PORT);
320                         event_dev->data->ports[i] = ws;
321                 }
322         }
323         rte_mb();
324
325         return 0;
326 }
327
328 static void
329 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
330 {
331         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
332         /* Single WS modes */
333         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
334 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
335         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
336                 NIX_RX_FASTPATH_MODES
337 #undef R
338         };
339
340         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
341 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
342         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
343                 NIX_RX_FASTPATH_MODES
344 #undef R
345         };
346
347         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
348 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
349         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
350                 NIX_RX_FASTPATH_MODES
351 #undef R
352         };
353
354         const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
355 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
356         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
362 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
363         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
364                 NIX_RX_FASTPATH_MODES
365 #undef R
366         };
367
368         const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
369 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
370         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
371                 NIX_RX_FASTPATH_MODES
372 #undef R
373         };
374
375         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
376 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
377         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
378                 NIX_RX_FASTPATH_MODES
379 #undef R
380         };
381
382         const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
383 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
384         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
385                 NIX_RX_FASTPATH_MODES
386 #undef R
387         };
388
389         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
390 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
391         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
392                 NIX_RX_FASTPATH_MODES
393 #undef R
394         };
395
396         const event_dequeue_burst_t
397                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
398 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
399         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
400                         NIX_RX_FASTPATH_MODES
401 #undef R
402         };
403
404         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
405 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
406         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
407                 NIX_RX_FASTPATH_MODES
408 #undef R
409         };
410
411         const event_dequeue_burst_t
412                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
413 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
414         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
415                         NIX_RX_FASTPATH_MODES
416 #undef R
417         };
418
419         /* Dual WS modes */
420         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
421 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
422         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
423                 NIX_RX_FASTPATH_MODES
424 #undef R
425         };
426
427         const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
428 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
429         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
430                 NIX_RX_FASTPATH_MODES
431 #undef R
432         };
433
434         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
435 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
436         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
437                 NIX_RX_FASTPATH_MODES
438 #undef R
439         };
440
441         const event_dequeue_burst_t
442                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
443 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
444         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
445                         NIX_RX_FASTPATH_MODES
446 #undef R
447         };
448
449         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2] = {
450 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
451         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
452                 NIX_RX_FASTPATH_MODES
453 #undef R
454         };
455
456         const event_dequeue_burst_t
457                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2] = {
458 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
459         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
460                         NIX_RX_FASTPATH_MODES
461 #undef R
462         };
463
464         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
465 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
466         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
467                 NIX_RX_FASTPATH_MODES
468 #undef R
469         };
470
471         const event_dequeue_burst_t
472                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
473 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
474         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
475                         NIX_RX_FASTPATH_MODES
476 #undef R
477                 };
478
479         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
480 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
481         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
482                 NIX_RX_FASTPATH_MODES
483 #undef R
484         };
485
486         const event_dequeue_burst_t
487                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
488 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
489         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
490                         NIX_RX_FASTPATH_MODES
491 #undef R
492                 };
493
494         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2] = {
495 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
496         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
497                 NIX_RX_FASTPATH_MODES
498 #undef R
499         };
500
501         const event_dequeue_burst_t
502                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2] = {
503 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
504         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
505                         NIX_RX_FASTPATH_MODES
506 #undef R
507         };
508
509         /* Tx modes */
510         const event_tx_adapter_enqueue
511                 sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
512 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
513         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
514                         NIX_TX_FASTPATH_MODES
515 #undef T
516                 };
517
518         const event_tx_adapter_enqueue
519                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
520 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
521         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
522                         NIX_TX_FASTPATH_MODES
523 #undef T
524                 };
525
526         const event_tx_adapter_enqueue
527                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
528 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
529         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
530                         NIX_TX_FASTPATH_MODES
531 #undef T
532                 };
533
534         const event_tx_adapter_enqueue
535                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
536 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
537         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
538                         NIX_TX_FASTPATH_MODES
539 #undef T
540                 };
541
542         event_dev->enqueue = cn9k_sso_hws_enq;
543         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
544         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
545         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
546         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
547                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
548                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
549                                       sso_hws_deq_seg_burst);
550                 if (dev->is_timeout_deq) {
551                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
552                                               sso_hws_deq_tmo_seg);
553                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
554                                               sso_hws_deq_tmo_seg_burst);
555                 }
556                 if (dev->is_ca_internal_port) {
557                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558                                               sso_hws_deq_ca_seg);
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560                                               sso_hws_deq_ca_seg_burst);
561                 }
562         } else {
563                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
564                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
565                                       sso_hws_deq_burst);
566                 if (dev->is_timeout_deq) {
567                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
568                                               sso_hws_deq_tmo);
569                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
570                                               sso_hws_deq_tmo_burst);
571                 }
572                 if (dev->is_ca_internal_port) {
573                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
574                                               sso_hws_deq_ca);
575                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
576                                               sso_hws_deq_ca_burst);
577                 }
578         }
579         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
580
581         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
582                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
583                                       sso_hws_tx_adptr_enq_seg);
584         else
585                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
586                                       sso_hws_tx_adptr_enq);
587
588         if (dev->dual_ws) {
589                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
590                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
591                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
592                 event_dev->enqueue_forward_burst =
593                         cn9k_sso_hws_dual_enq_fwd_burst;
594                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
595
596                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
597                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
598                                               sso_hws_dual_deq_seg);
599                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
600                                               sso_hws_dual_deq_seg_burst);
601                         if (dev->is_timeout_deq) {
602                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
603                                                       sso_hws_dual_deq_tmo_seg);
604                                 CN9K_SET_EVDEV_DEQ_OP(
605                                         dev, event_dev->dequeue_burst,
606                                         sso_hws_dual_deq_tmo_seg_burst);
607                         }
608                         if (dev->is_ca_internal_port) {
609                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
610                                                       sso_hws_dual_deq_ca_seg);
611                                 CN9K_SET_EVDEV_DEQ_OP(
612                                         dev, event_dev->dequeue_burst,
613                                         sso_hws_dual_deq_ca_seg_burst);
614                         }
615                 } else {
616                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
617                                               sso_hws_dual_deq);
618                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
619                                               sso_hws_dual_deq_burst);
620                         if (dev->is_timeout_deq) {
621                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
622                                                       sso_hws_dual_deq_tmo);
623                                 CN9K_SET_EVDEV_DEQ_OP(
624                                         dev, event_dev->dequeue_burst,
625                                         sso_hws_dual_deq_tmo_burst);
626                         }
627                         if (dev->is_ca_internal_port) {
628                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
629                                                       sso_hws_dual_deq_ca);
630                                 CN9K_SET_EVDEV_DEQ_OP(
631                                         dev, event_dev->dequeue_burst,
632                                         sso_hws_dual_deq_ca_burst);
633                         }
634                 }
635
636                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
637                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
638                                               sso_hws_dual_tx_adptr_enq_seg);
639                 else
640                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
641                                               sso_hws_dual_tx_adptr_enq);
642         }
643
644         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
645         rte_mb();
646 }
647
648 static void *
649 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
650 {
651         struct cnxk_sso_evdev *dev = arg;
652         struct cn9k_sso_hws_dual *dws;
653         struct cn9k_sso_hws *ws;
654         void *data;
655
656         if (dev->dual_ws) {
657                 dws = rte_zmalloc("cn9k_dual_ws",
658                                   sizeof(struct cn9k_sso_hws_dual) +
659                                           RTE_CACHE_LINE_SIZE,
660                                   RTE_CACHE_LINE_SIZE);
661                 if (dws == NULL) {
662                         plt_err("Failed to alloc memory for port=%d", port_id);
663                         return NULL;
664                 }
665
666                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
667                 dws->base[0] = roc_sso_hws_base_get(
668                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
669                 dws->base[1] = roc_sso_hws_base_get(
670                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
671                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
672                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
673                 dws->hws_id = port_id;
674                 dws->swtag_req = 0;
675                 dws->vws = 0;
676
677                 data = dws;
678         } else {
679                 /* Allocate event port memory */
680                 ws = rte_zmalloc("cn9k_ws",
681                                  sizeof(struct cn9k_sso_hws) +
682                                          RTE_CACHE_LINE_SIZE,
683                                  RTE_CACHE_LINE_SIZE);
684                 if (ws == NULL) {
685                         plt_err("Failed to alloc memory for port=%d", port_id);
686                         return NULL;
687                 }
688
689                 /* First cache line is reserved for cookie */
690                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
691                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
692                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
693                 ws->hws_id = port_id;
694                 ws->swtag_req = 0;
695
696                 data = ws;
697         }
698
699         return data;
700 }
701
702 static void
703 cn9k_sso_info_get(struct rte_eventdev *event_dev,
704                   struct rte_event_dev_info *dev_info)
705 {
706         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
707
708         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
709         cnxk_sso_info_get(dev, dev_info);
710 }
711
712 static int
713 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
714 {
715         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
716         int rc;
717
718         rc = cnxk_sso_dev_validate(event_dev);
719         if (rc < 0) {
720                 plt_err("Invalid event device configuration");
721                 return -EINVAL;
722         }
723
724         roc_sso_rsrc_fini(&dev->sso);
725
726         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
727         if (rc < 0) {
728                 plt_err("Failed to initialize SSO resources");
729                 return -ENODEV;
730         }
731
732         rc = cnxk_sso_xaq_allocate(dev);
733         if (rc < 0)
734                 goto cnxk_rsrc_fini;
735
736         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
737                                     cn9k_sso_hws_setup);
738         if (rc < 0)
739                 goto cnxk_rsrc_fini;
740
741         /* Restore any prior port-queue mapping. */
742         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
743
744         dev->configured = 1;
745         rte_mb();
746
747         return 0;
748 cnxk_rsrc_fini:
749         roc_sso_rsrc_fini(&dev->sso);
750         dev->nb_event_ports = 0;
751         return rc;
752 }
753
754 static int
755 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
756                     const struct rte_event_port_conf *port_conf)
757 {
758
759         RTE_SET_USED(port_conf);
760         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
761 }
762
763 static void
764 cn9k_sso_port_release(void *port)
765 {
766         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
767         struct cnxk_sso_evdev *dev;
768
769         if (port == NULL)
770                 return;
771
772         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
773         if (!gws_cookie->configured)
774                 goto free;
775
776         cn9k_sso_hws_release(dev, port);
777         memset(gws_cookie, 0, sizeof(*gws_cookie));
778 free:
779         rte_free(gws_cookie);
780 }
781
782 static int
783 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
784                    const uint8_t queues[], const uint8_t priorities[],
785                    uint16_t nb_links)
786 {
787         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
788         uint16_t hwgrp_ids[nb_links];
789         uint16_t link;
790
791         RTE_SET_USED(priorities);
792         for (link = 0; link < nb_links; link++)
793                 hwgrp_ids[link] = queues[link];
794         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
795
796         return (int)nb_links;
797 }
798
799 static int
800 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
801                      uint8_t queues[], uint16_t nb_unlinks)
802 {
803         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
804         uint16_t hwgrp_ids[nb_unlinks];
805         uint16_t unlink;
806
807         for (unlink = 0; unlink < nb_unlinks; unlink++)
808                 hwgrp_ids[unlink] = queues[unlink];
809         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
810
811         return (int)nb_unlinks;
812 }
813
814 static int
815 cn9k_sso_start(struct rte_eventdev *event_dev)
816 {
817         int rc;
818
819         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
820         if (rc < 0)
821                 return rc;
822
823         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
824                             cn9k_sso_hws_flush_events);
825         if (rc < 0)
826                 return rc;
827
828         cn9k_sso_fp_fns_set(event_dev);
829
830         return rc;
831 }
832
833 static void
834 cn9k_sso_stop(struct rte_eventdev *event_dev)
835 {
836         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
837 }
838
839 static int
840 cn9k_sso_close(struct rte_eventdev *event_dev)
841 {
842         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
843 }
844
845 static int
846 cn9k_sso_selftest(void)
847 {
848         return cnxk_sso_selftest(RTE_STR(event_cn9k));
849 }
850
851 static int
852 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
853                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
854 {
855         int rc;
856
857         RTE_SET_USED(event_dev);
858         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
859         if (rc)
860                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
861         else
862                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
863                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
864                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
865
866         return 0;
867 }
868
869 static void
870 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
871                       void *tstmp_info)
872 {
873         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
874         int i;
875
876         for (i = 0; i < dev->nb_event_ports; i++) {
877                 if (dev->dual_ws) {
878                         struct cn9k_sso_hws_dual *dws =
879                                 event_dev->data->ports[i];
880                         dws->lookup_mem = lookup_mem;
881                         dws->tstamp = tstmp_info;
882                 } else {
883                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
884                         ws->lookup_mem = lookup_mem;
885                         ws->tstamp = tstmp_info;
886                 }
887         }
888 }
889
890 static int
891 cn9k_sso_rx_adapter_queue_add(
892         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
893         int32_t rx_queue_id,
894         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
895 {
896         struct cn9k_eth_rxq *rxq;
897         void *lookup_mem;
898         void *tstmp_info;
899         int rc;
900
901         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
902         if (rc)
903                 return -EINVAL;
904
905         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
906                                            queue_conf);
907         if (rc)
908                 return -EINVAL;
909
910         rxq = eth_dev->data->rx_queues[0];
911         lookup_mem = rxq->lookup_mem;
912         tstmp_info = rxq->tstamp;
913         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
914         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
915
916         return 0;
917 }
918
919 static int
920 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
921                               const struct rte_eth_dev *eth_dev,
922                               int32_t rx_queue_id)
923 {
924         int rc;
925
926         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
927         if (rc)
928                 return -EINVAL;
929
930         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
931 }
932
933 static int
934 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
935                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
936 {
937         int ret;
938
939         RTE_SET_USED(dev);
940         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
941         if (ret)
942                 *caps = 0;
943         else
944                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
945
946         return 0;
947 }
948
949 static void
950 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
951                        bool ena)
952 {
953         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
954         struct cn9k_eth_txq *txq;
955         struct roc_nix_sq *sq;
956         int i;
957
958         if (tx_queue_id < 0) {
959                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
960                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
961         } else {
962                 uint16_t sq_limit;
963
964                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
965                 txq = eth_dev->data->tx_queues[tx_queue_id];
966                 sq_limit =
967                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
968                                     sq->nb_sqb_bufs;
969                 txq->nb_sqb_bufs_adj =
970                         sq_limit -
971                         RTE_ALIGN_MUL_CEIL(sq_limit,
972                                            (1ULL << txq->sqes_per_sqb_log2)) /
973                                 (1ULL << txq->sqes_per_sqb_log2);
974                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
975         }
976 }
977
978 static int
979 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
980                               const struct rte_eth_dev *eth_dev,
981                               int32_t tx_queue_id)
982 {
983         int rc;
984
985         RTE_SET_USED(id);
986         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
987         if (rc < 0)
988                 return rc;
989         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
990         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
991         if (rc < 0)
992                 return rc;
993         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
994
995         return 0;
996 }
997
998 static int
999 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1000                               const struct rte_eth_dev *eth_dev,
1001                               int32_t tx_queue_id)
1002 {
1003         int rc;
1004
1005         RTE_SET_USED(id);
1006         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1007         if (rc < 0)
1008                 return rc;
1009         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1010         return cn9k_sso_updt_tx_adptr_data(event_dev);
1011 }
1012
1013 static int
1014 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1015                              const struct rte_cryptodev *cdev, uint32_t *caps)
1016 {
1017         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1018         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1019
1020         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1021                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1022
1023         return 0;
1024 }
1025
1026 static int
1027 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1028                            const struct rte_cryptodev *cdev,
1029                            int32_t queue_pair_id, const struct rte_event *event)
1030 {
1031         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1032
1033         RTE_SET_USED(event);
1034
1035         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1036         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1037
1038         dev->is_ca_internal_port = 1;
1039         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1040
1041         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1042 }
1043
1044 static int
1045 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1046                            const struct rte_cryptodev *cdev,
1047                            int32_t queue_pair_id)
1048 {
1049         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1050         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1051
1052         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1053 }
1054
1055 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
1056         .dev_infos_get = cn9k_sso_info_get,
1057         .dev_configure = cn9k_sso_dev_configure,
1058         .queue_def_conf = cnxk_sso_queue_def_conf,
1059         .queue_setup = cnxk_sso_queue_setup,
1060         .queue_release = cnxk_sso_queue_release,
1061         .port_def_conf = cnxk_sso_port_def_conf,
1062         .port_setup = cn9k_sso_port_setup,
1063         .port_release = cn9k_sso_port_release,
1064         .port_link = cn9k_sso_port_link,
1065         .port_unlink = cn9k_sso_port_unlink,
1066         .timeout_ticks = cnxk_sso_timeout_ticks,
1067
1068         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1069         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1070         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1071         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1072         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1073
1074         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1075         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1076         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1077
1078         .timer_adapter_caps_get = cnxk_tim_caps_get,
1079
1080         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1081         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1082         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1083
1084         .dump = cnxk_sso_dump,
1085         .dev_start = cn9k_sso_start,
1086         .dev_stop = cn9k_sso_stop,
1087         .dev_close = cn9k_sso_close,
1088         .dev_selftest = cn9k_sso_selftest,
1089 };
1090
1091 static int
1092 cn9k_sso_init(struct rte_eventdev *event_dev)
1093 {
1094         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1095         int rc;
1096
1097         if (RTE_CACHE_LINE_SIZE != 128) {
1098                 plt_err("Driver not compiled for CN9K");
1099                 return -EFAULT;
1100         }
1101
1102         rc = roc_plt_init();
1103         if (rc < 0) {
1104                 plt_err("Failed to initialize platform model");
1105                 return rc;
1106         }
1107
1108         event_dev->dev_ops = &cn9k_sso_dev_ops;
1109         /* For secondary processes, the primary has done all the work */
1110         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1111                 cn9k_sso_fp_fns_set(event_dev);
1112                 return 0;
1113         }
1114
1115         rc = cnxk_sso_init(event_dev);
1116         if (rc < 0)
1117                 return rc;
1118
1119         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1120         if (!dev->max_event_ports || !dev->max_event_queues) {
1121                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1122                         dev->max_event_queues, dev->max_event_ports);
1123                 cnxk_sso_fini(event_dev);
1124                 return -ENODEV;
1125         }
1126
1127         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1128                     event_dev->data->name, dev->max_event_queues,
1129                     dev->max_event_ports);
1130
1131         return 0;
1132 }
1133
1134 static int
1135 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1136 {
1137         return rte_event_pmd_pci_probe(
1138                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1139 }
1140
1141 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1142         {
1143                 .vendor_id = 0,
1144         },
1145 };
1146
1147 static struct rte_pci_driver cn9k_pci_sso = {
1148         .id_table = cn9k_pci_sso_map,
1149         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1150         .probe = cn9k_sso_probe,
1151         .remove = cnxk_sso_remove,
1152 };
1153
1154 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1155 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1156 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1157 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1158                               CNXK_SSO_GGRP_QOS "=<string>"
1159                               CNXK_SSO_FORCE_BP "=1"
1160                               CN9K_SSO_SINGLE_WS "=1"
1161                               CNXK_TIM_DISABLE_NPA "=1"
1162                               CNXK_TIM_CHNK_SLOTS "=<int>"
1163                               CNXK_TIM_RINGS_LMT "=<int>"
1164                               CNXK_TIM_STATS_ENA "=1");