64d9ded18b358c5feba48edab3d0770707c2d683
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
19                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20
21 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
22         (enq_op =                                                              \
23                  enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
28                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
29
30 static void
31 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
32 {
33         ws->tag_op = base + SSOW_LF_GWS_TAG;
34         ws->wqp_op = base + SSOW_LF_GWS_WQP;
35         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
36         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
37         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
38         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
39 }
40
41 static int
42 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn9k_sso_hws_dual *dws;
46         struct cn9k_sso_hws *ws;
47         int rc;
48
49         if (dev->dual_ws) {
50                 dws = port;
51                 rc = roc_sso_hws_link(&dev->sso,
52                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
53                                       nb_link);
54                 rc |= roc_sso_hws_link(&dev->sso,
55                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
56                                        map, nb_link);
57         } else {
58                 ws = port;
59                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
60         }
61
62         return rc;
63 }
64
65 static int
66 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 {
68         struct cnxk_sso_evdev *dev = arg;
69         struct cn9k_sso_hws_dual *dws;
70         struct cn9k_sso_hws *ws;
71         int rc;
72
73         if (dev->dual_ws) {
74                 dws = port;
75                 rc = roc_sso_hws_unlink(&dev->sso,
76                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
77                                         map, nb_link);
78                 rc |= roc_sso_hws_unlink(&dev->sso,
79                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
80                                          map, nb_link);
81         } else {
82                 ws = port;
83                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84         }
85
86         return rc;
87 }
88
89 static void
90 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
91 {
92         struct cnxk_sso_evdev *dev = arg;
93         struct cn9k_sso_hws_dual *dws;
94         struct cn9k_sso_hws *ws;
95         uint64_t val;
96
97         /* Set get_work tmo for HWS */
98         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
99         if (dev->dual_ws) {
100                 dws = hws;
101                 rte_memcpy(dws->grps_base, grps_base,
102                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
103                 dws->fc_mem = dev->fc_mem;
104                 dws->xaq_lmt = dev->xaq_lmt;
105
106                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
107                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
108         } else {
109                 ws = hws;
110                 rte_memcpy(ws->grps_base, grps_base,
111                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
112                 ws->fc_mem = dev->fc_mem;
113                 ws->xaq_lmt = dev->xaq_lmt;
114
115                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
116         }
117 }
118
119 static void
120 cn9k_sso_hws_release(void *arg, void *hws)
121 {
122         struct cnxk_sso_evdev *dev = arg;
123         struct cn9k_sso_hws_dual *dws;
124         struct cn9k_sso_hws *ws;
125         int i;
126
127         if (dev->dual_ws) {
128                 dws = hws;
129                 for (i = 0; i < dev->nb_event_queues; i++) {
130                         roc_sso_hws_unlink(&dev->sso,
131                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
132                                            (uint16_t *)&i, 1);
133                         roc_sso_hws_unlink(&dev->sso,
134                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
135                                            (uint16_t *)&i, 1);
136                 }
137                 memset(dws, 0, sizeof(*dws));
138         } else {
139                 ws = hws;
140                 for (i = 0; i < dev->nb_event_queues; i++)
141                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
142                                            (uint16_t *)&i, 1);
143                 memset(ws, 0, sizeof(*ws));
144         }
145 }
146
147 static void
148 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
149                           cnxk_handle_event_t fn, void *arg)
150 {
151         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
152         struct cn9k_sso_hws_dual *dws;
153         struct cn9k_sso_hws_state *st;
154         struct cn9k_sso_hws *ws;
155         uint64_t cq_ds_cnt = 1;
156         uint64_t aq_cnt = 1;
157         uint64_t ds_cnt = 1;
158         struct rte_event ev;
159         uintptr_t ws_base;
160         uint64_t val, req;
161
162         plt_write64(0, base + SSO_LF_GGRP_QCTL);
163
164         req = queue_id;     /* GGRP ID */
165         req |= BIT_ULL(18); /* Grouped */
166         req |= BIT_ULL(16); /* WAIT */
167
168         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
169         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
170         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
171         cq_ds_cnt &= 0x3FFF3FFF0000;
172
173         if (dev->dual_ws) {
174                 dws = hws;
175                 st = &dws->ws_state[0];
176                 ws_base = dws->base[0];
177         } else {
178                 ws = hws;
179                 st = (struct cn9k_sso_hws_state *)ws;
180                 ws_base = ws->base;
181         }
182
183         while (aq_cnt || cq_ds_cnt || ds_cnt) {
184                 plt_write64(req, st->getwrk_op);
185                 cn9k_sso_hws_get_work_empty(st, &ev);
186                 if (fn != NULL && ev.u64 != 0)
187                         fn(arg, ev);
188                 if (ev.sched_type != SSO_TT_EMPTY)
189                         cnxk_sso_hws_swtag_flush(st->tag_op,
190                                                  st->swtag_flush_op);
191                 do {
192                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
193                 } while (val & BIT_ULL(56));
194                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
195                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
196                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
197                 /* Extract cq and ds count */
198                 cq_ds_cnt &= 0x3FFF3FFF0000;
199         }
200
201         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
202 }
203
204 static void
205 cn9k_sso_hws_reset(void *arg, void *hws)
206 {
207         struct cnxk_sso_evdev *dev = arg;
208         struct cn9k_sso_hws_dual *dws;
209         struct cn9k_sso_hws *ws;
210         uint64_t pend_state;
211         uint8_t pend_tt;
212         uintptr_t base;
213         uint64_t tag;
214         uint8_t i;
215
216         dws = hws;
217         ws = hws;
218         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
219                 base = dev->dual_ws ? dws->base[i] : ws->base;
220                 /* Wait till getwork/swtp/waitw/desched completes. */
221                 do {
222                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
223                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
224                                        BIT_ULL(56)));
225
226                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
227                 pend_tt = (tag >> 32) & 0x3;
228                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
229                         if (pend_tt == SSO_TT_ATOMIC ||
230                             pend_tt == SSO_TT_ORDERED)
231                                 cnxk_sso_hws_swtag_untag(
232                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
233                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
234                 }
235
236                 /* Wait for desched to complete. */
237                 do {
238                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
239                 } while (pend_state & BIT_ULL(58));
240         }
241 }
242
243 void
244 cn9k_sso_set_rsrc(void *arg)
245 {
246         struct cnxk_sso_evdev *dev = arg;
247
248         if (dev->dual_ws)
249                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
250         else
251                 dev->max_event_ports = dev->sso.max_hws;
252         dev->max_event_queues =
253                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
254                               RTE_EVENT_MAX_QUEUES_PER_DEV :
255                               dev->sso.max_hwgrp;
256 }
257
258 static int
259 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
260 {
261         struct cnxk_sso_evdev *dev = arg;
262
263         if (dev->dual_ws)
264                 hws = hws * CN9K_DUAL_WS_NB_WS;
265
266         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
267 }
268
269 static int
270 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
271 {
272         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
273         int i;
274
275         if (dev->tx_adptr_data == NULL)
276                 return 0;
277
278         for (i = 0; i < dev->nb_event_ports; i++) {
279                 if (dev->dual_ws) {
280                         struct cn9k_sso_hws_dual *dws =
281                                 event_dev->data->ports[i];
282                         void *ws_cookie;
283
284                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
285                         ws_cookie = rte_realloc_socket(
286                                 ws_cookie,
287                                 sizeof(struct cnxk_sso_hws_cookie) +
288                                         sizeof(struct cn9k_sso_hws_dual) +
289                                         (sizeof(uint64_t) *
290                                          (dev->max_port_id + 1) *
291                                          RTE_MAX_QUEUES_PER_PORT),
292                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
293                         if (ws_cookie == NULL)
294                                 return -ENOMEM;
295                         dws = RTE_PTR_ADD(ws_cookie,
296                                           sizeof(struct cnxk_sso_hws_cookie));
297                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
298                                sizeof(uint64_t) * (dev->max_port_id + 1) *
299                                        RTE_MAX_QUEUES_PER_PORT);
300                         event_dev->data->ports[i] = dws;
301                 } else {
302                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
303                         void *ws_cookie;
304
305                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
306                         ws_cookie = rte_realloc_socket(
307                                 ws_cookie,
308                                 sizeof(struct cnxk_sso_hws_cookie) +
309                                         sizeof(struct cn9k_sso_hws_dual) +
310                                         (sizeof(uint64_t) *
311                                          (dev->max_port_id + 1) *
312                                          RTE_MAX_QUEUES_PER_PORT),
313                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
314                         if (ws_cookie == NULL)
315                                 return -ENOMEM;
316                         ws = RTE_PTR_ADD(ws_cookie,
317                                          sizeof(struct cnxk_sso_hws_cookie));
318                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
319                                sizeof(uint64_t) * (dev->max_port_id + 1) *
320                                        RTE_MAX_QUEUES_PER_PORT);
321                         event_dev->data->ports[i] = ws;
322                 }
323         }
324         rte_mb();
325
326         return 0;
327 }
328
329 static void
330 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
331 {
332         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
333         /* Single WS modes */
334         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
335 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
336         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
337                 NIX_RX_FASTPATH_MODES
338 #undef R
339         };
340
341         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
342 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
343         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
344                 NIX_RX_FASTPATH_MODES
345 #undef R
346         };
347
348         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
349 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
350         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t
356                 sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
357 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
358         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
359                 NIX_RX_FASTPATH_MODES
360 #undef R
361         };
362
363         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
364 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
365         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
366                 NIX_RX_FASTPATH_MODES
367 #undef R
368         };
369
370         const event_dequeue_burst_t
371                 sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
372 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
373         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
374                 NIX_RX_FASTPATH_MODES
375 #undef R
376         };
377
378         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
379 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
380         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_burst_t
386                 sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
387 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
388         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
389                 NIX_RX_FASTPATH_MODES
390 #undef R
391         };
392
393         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
394 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
395         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
396                 NIX_RX_FASTPATH_MODES
397 #undef R
398         };
399
400         const event_dequeue_burst_t
401                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
402 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
403         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
404                         NIX_RX_FASTPATH_MODES
405 #undef R
406         };
407
408         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
409 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
410         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
411                 NIX_RX_FASTPATH_MODES
412 #undef R
413         };
414
415         const event_dequeue_burst_t
416                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
417 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
418         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
419                         NIX_RX_FASTPATH_MODES
420 #undef R
421         };
422
423         /* Dual WS modes */
424         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2][2] = {
425 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
426         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
427                 NIX_RX_FASTPATH_MODES
428 #undef R
429         };
430
431         const event_dequeue_burst_t
432                 sso_hws_dual_deq_burst[2][2][2][2][2][2][2] = {
433 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
434         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
435                 NIX_RX_FASTPATH_MODES
436 #undef R
437         };
438
439         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2][2] = {
440 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
441         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_burst_t
447                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2][2] = {
448 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
449         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
450                         NIX_RX_FASTPATH_MODES
451 #undef R
452         };
453
454         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2][2] = {
455 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
456         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
457                 NIX_RX_FASTPATH_MODES
458 #undef R
459         };
460
461         const event_dequeue_burst_t
462                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2][2] = {
463 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
464         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
465                         NIX_RX_FASTPATH_MODES
466 #undef R
467         };
468
469         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2][2] = {
470 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
471         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t
477                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
478 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
479         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
480                         NIX_RX_FASTPATH_MODES
481 #undef R
482                 };
483
484         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2][2] = {
485 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
486         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
487                 NIX_RX_FASTPATH_MODES
488 #undef R
489         };
490
491         const event_dequeue_burst_t
492                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
493 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
494         [f6][f5][f4][f3][f2][f1][f0] =                                         \
495                         cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
496                         NIX_RX_FASTPATH_MODES
497 #undef R
498                 };
499
500         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2][2] = {
501 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
502         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
503                 NIX_RX_FASTPATH_MODES
504 #undef R
505         };
506
507         const event_dequeue_burst_t
508                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
509 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
510         [f6][f5][f4][f3][f2][f1][f0] =                                         \
511                         cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
512                         NIX_RX_FASTPATH_MODES
513 #undef R
514         };
515
516         /* Tx modes */
517         const event_tx_adapter_enqueue
518                 sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
519 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
520         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
521                         NIX_TX_FASTPATH_MODES
522 #undef T
523                 };
524
525         const event_tx_adapter_enqueue
526                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
527 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
528         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
529                         NIX_TX_FASTPATH_MODES
530 #undef T
531                 };
532
533         const event_tx_adapter_enqueue
534                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
535 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
536         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
537                         NIX_TX_FASTPATH_MODES
538 #undef T
539                 };
540
541         const event_tx_adapter_enqueue
542                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
543 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
544         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
545                         NIX_TX_FASTPATH_MODES
546 #undef T
547                 };
548
549         event_dev->enqueue = cn9k_sso_hws_enq;
550         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
551         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
552         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
553         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
554                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
555                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
556                                       sso_hws_deq_seg_burst);
557                 if (dev->is_timeout_deq) {
558                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
559                                               sso_hws_deq_tmo_seg);
560                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
561                                               sso_hws_deq_tmo_seg_burst);
562                 }
563                 if (dev->is_ca_internal_port) {
564                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
565                                               sso_hws_deq_ca_seg);
566                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
567                                               sso_hws_deq_ca_seg_burst);
568                 }
569         } else {
570                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
571                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
572                                       sso_hws_deq_burst);
573                 if (dev->is_timeout_deq) {
574                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
575                                               sso_hws_deq_tmo);
576                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
577                                               sso_hws_deq_tmo_burst);
578                 }
579                 if (dev->is_ca_internal_port) {
580                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
581                                               sso_hws_deq_ca);
582                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
583                                               sso_hws_deq_ca_burst);
584                 }
585         }
586         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
587
588         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
589                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
590                                       sso_hws_tx_adptr_enq_seg);
591         else
592                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
593                                       sso_hws_tx_adptr_enq);
594
595         if (dev->dual_ws) {
596                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
597                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
598                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
599                 event_dev->enqueue_forward_burst =
600                         cn9k_sso_hws_dual_enq_fwd_burst;
601                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
602
603                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
604                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
605                                               sso_hws_dual_deq_seg);
606                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
607                                               sso_hws_dual_deq_seg_burst);
608                         if (dev->is_timeout_deq) {
609                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
610                                                       sso_hws_dual_deq_tmo_seg);
611                                 CN9K_SET_EVDEV_DEQ_OP(
612                                         dev, event_dev->dequeue_burst,
613                                         sso_hws_dual_deq_tmo_seg_burst);
614                         }
615                         if (dev->is_ca_internal_port) {
616                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
617                                                       sso_hws_dual_deq_ca_seg);
618                                 CN9K_SET_EVDEV_DEQ_OP(
619                                         dev, event_dev->dequeue_burst,
620                                         sso_hws_dual_deq_ca_seg_burst);
621                         }
622                 } else {
623                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
624                                               sso_hws_dual_deq);
625                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
626                                               sso_hws_dual_deq_burst);
627                         if (dev->is_timeout_deq) {
628                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
629                                                       sso_hws_dual_deq_tmo);
630                                 CN9K_SET_EVDEV_DEQ_OP(
631                                         dev, event_dev->dequeue_burst,
632                                         sso_hws_dual_deq_tmo_burst);
633                         }
634                         if (dev->is_ca_internal_port) {
635                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
636                                                       sso_hws_dual_deq_ca);
637                                 CN9K_SET_EVDEV_DEQ_OP(
638                                         dev, event_dev->dequeue_burst,
639                                         sso_hws_dual_deq_ca_burst);
640                         }
641                 }
642
643                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
644                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
645                                               sso_hws_dual_tx_adptr_enq_seg);
646                 else
647                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
648                                               sso_hws_dual_tx_adptr_enq);
649         }
650
651         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
652         rte_mb();
653 }
654
655 static void *
656 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
657 {
658         struct cnxk_sso_evdev *dev = arg;
659         struct cn9k_sso_hws_dual *dws;
660         struct cn9k_sso_hws *ws;
661         void *data;
662
663         if (dev->dual_ws) {
664                 dws = rte_zmalloc("cn9k_dual_ws",
665                                   sizeof(struct cn9k_sso_hws_dual) +
666                                           RTE_CACHE_LINE_SIZE,
667                                   RTE_CACHE_LINE_SIZE);
668                 if (dws == NULL) {
669                         plt_err("Failed to alloc memory for port=%d", port_id);
670                         return NULL;
671                 }
672
673                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
674                 dws->base[0] = roc_sso_hws_base_get(
675                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
676                 dws->base[1] = roc_sso_hws_base_get(
677                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
678                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
679                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
680                 dws->hws_id = port_id;
681                 dws->swtag_req = 0;
682                 dws->vws = 0;
683
684                 data = dws;
685         } else {
686                 /* Allocate event port memory */
687                 ws = rte_zmalloc("cn9k_ws",
688                                  sizeof(struct cn9k_sso_hws) +
689                                          RTE_CACHE_LINE_SIZE,
690                                  RTE_CACHE_LINE_SIZE);
691                 if (ws == NULL) {
692                         plt_err("Failed to alloc memory for port=%d", port_id);
693                         return NULL;
694                 }
695
696                 /* First cache line is reserved for cookie */
697                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
698                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
699                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
700                 ws->hws_id = port_id;
701                 ws->swtag_req = 0;
702
703                 data = ws;
704         }
705
706         return data;
707 }
708
709 static void
710 cn9k_sso_info_get(struct rte_eventdev *event_dev,
711                   struct rte_event_dev_info *dev_info)
712 {
713         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
714
715         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
716         cnxk_sso_info_get(dev, dev_info);
717 }
718
719 static int
720 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
721 {
722         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
723         int rc;
724
725         rc = cnxk_sso_dev_validate(event_dev);
726         if (rc < 0) {
727                 plt_err("Invalid event device configuration");
728                 return -EINVAL;
729         }
730
731         roc_sso_rsrc_fini(&dev->sso);
732
733         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
734         if (rc < 0) {
735                 plt_err("Failed to initialize SSO resources");
736                 return -ENODEV;
737         }
738
739         rc = cnxk_sso_xaq_allocate(dev);
740         if (rc < 0)
741                 goto cnxk_rsrc_fini;
742
743         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
744                                     cn9k_sso_hws_setup);
745         if (rc < 0)
746                 goto cnxk_rsrc_fini;
747
748         /* Restore any prior port-queue mapping. */
749         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
750
751         dev->configured = 1;
752         rte_mb();
753
754         return 0;
755 cnxk_rsrc_fini:
756         roc_sso_rsrc_fini(&dev->sso);
757         dev->nb_event_ports = 0;
758         return rc;
759 }
760
761 static int
762 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
763                     const struct rte_event_port_conf *port_conf)
764 {
765
766         RTE_SET_USED(port_conf);
767         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
768 }
769
770 static void
771 cn9k_sso_port_release(void *port)
772 {
773         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
774         struct cnxk_sso_evdev *dev;
775
776         if (port == NULL)
777                 return;
778
779         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
780         if (!gws_cookie->configured)
781                 goto free;
782
783         cn9k_sso_hws_release(dev, port);
784         memset(gws_cookie, 0, sizeof(*gws_cookie));
785 free:
786         rte_free(gws_cookie);
787 }
788
789 static int
790 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
791                    const uint8_t queues[], const uint8_t priorities[],
792                    uint16_t nb_links)
793 {
794         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
795         uint16_t hwgrp_ids[nb_links];
796         uint16_t link;
797
798         RTE_SET_USED(priorities);
799         for (link = 0; link < nb_links; link++)
800                 hwgrp_ids[link] = queues[link];
801         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
802
803         return (int)nb_links;
804 }
805
806 static int
807 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
808                      uint8_t queues[], uint16_t nb_unlinks)
809 {
810         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
811         uint16_t hwgrp_ids[nb_unlinks];
812         uint16_t unlink;
813
814         for (unlink = 0; unlink < nb_unlinks; unlink++)
815                 hwgrp_ids[unlink] = queues[unlink];
816         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
817
818         return (int)nb_unlinks;
819 }
820
821 static int
822 cn9k_sso_start(struct rte_eventdev *event_dev)
823 {
824         int rc;
825
826         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
827         if (rc < 0)
828                 return rc;
829
830         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
831                             cn9k_sso_hws_flush_events);
832         if (rc < 0)
833                 return rc;
834
835         cn9k_sso_fp_fns_set(event_dev);
836
837         return rc;
838 }
839
840 static void
841 cn9k_sso_stop(struct rte_eventdev *event_dev)
842 {
843         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
844 }
845
846 static int
847 cn9k_sso_close(struct rte_eventdev *event_dev)
848 {
849         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
850 }
851
852 static int
853 cn9k_sso_selftest(void)
854 {
855         return cnxk_sso_selftest(RTE_STR(event_cn9k));
856 }
857
858 static int
859 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
860                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
861 {
862         int rc;
863
864         RTE_SET_USED(event_dev);
865         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
866         if (rc)
867                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
868         else
869                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
870                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
871                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
872
873         return 0;
874 }
875
876 static void
877 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
878                       void *tstmp_info)
879 {
880         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
881         int i;
882
883         for (i = 0; i < dev->nb_event_ports; i++) {
884                 if (dev->dual_ws) {
885                         struct cn9k_sso_hws_dual *dws =
886                                 event_dev->data->ports[i];
887                         dws->lookup_mem = lookup_mem;
888                         dws->tstamp = tstmp_info;
889                 } else {
890                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
891                         ws->lookup_mem = lookup_mem;
892                         ws->tstamp = tstmp_info;
893                 }
894         }
895 }
896
897 static int
898 cn9k_sso_rx_adapter_queue_add(
899         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
900         int32_t rx_queue_id,
901         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
902 {
903         struct cn9k_eth_rxq *rxq;
904         void *lookup_mem;
905         void *tstmp_info;
906         int rc;
907
908         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
909         if (rc)
910                 return -EINVAL;
911
912         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
913                                            queue_conf);
914         if (rc)
915                 return -EINVAL;
916
917         rxq = eth_dev->data->rx_queues[0];
918         lookup_mem = rxq->lookup_mem;
919         tstmp_info = rxq->tstamp;
920         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
921         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
922
923         return 0;
924 }
925
926 static int
927 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
928                               const struct rte_eth_dev *eth_dev,
929                               int32_t rx_queue_id)
930 {
931         int rc;
932
933         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
934         if (rc)
935                 return -EINVAL;
936
937         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
938 }
939
940 static int
941 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
942                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
943 {
944         int ret;
945
946         RTE_SET_USED(dev);
947         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
948         if (ret)
949                 *caps = 0;
950         else
951                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
952
953         return 0;
954 }
955
956 static void
957 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
958                        bool ena)
959 {
960         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
961         struct cn9k_eth_txq *txq;
962         struct roc_nix_sq *sq;
963         int i;
964
965         if (tx_queue_id < 0) {
966                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
967                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
968         } else {
969                 uint16_t sq_limit;
970
971                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
972                 txq = eth_dev->data->tx_queues[tx_queue_id];
973                 sq_limit =
974                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
975                                     sq->nb_sqb_bufs;
976                 txq->nb_sqb_bufs_adj =
977                         sq_limit -
978                         RTE_ALIGN_MUL_CEIL(sq_limit,
979                                            (1ULL << txq->sqes_per_sqb_log2)) /
980                                 (1ULL << txq->sqes_per_sqb_log2);
981                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
982         }
983 }
984
985 static int
986 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
987                               const struct rte_eth_dev *eth_dev,
988                               int32_t tx_queue_id)
989 {
990         int rc;
991
992         RTE_SET_USED(id);
993         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
994         if (rc < 0)
995                 return rc;
996         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
997         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
998         if (rc < 0)
999                 return rc;
1000         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1001
1002         return 0;
1003 }
1004
1005 static int
1006 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1007                               const struct rte_eth_dev *eth_dev,
1008                               int32_t tx_queue_id)
1009 {
1010         int rc;
1011
1012         RTE_SET_USED(id);
1013         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1014         if (rc < 0)
1015                 return rc;
1016         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1017         return cn9k_sso_updt_tx_adptr_data(event_dev);
1018 }
1019
1020 static int
1021 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1022                              const struct rte_cryptodev *cdev, uint32_t *caps)
1023 {
1024         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1025         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1026
1027         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1028                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1029
1030         return 0;
1031 }
1032
1033 static int
1034 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1035                            const struct rte_cryptodev *cdev,
1036                            int32_t queue_pair_id, const struct rte_event *event)
1037 {
1038         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1039
1040         RTE_SET_USED(event);
1041
1042         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1043         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1044
1045         dev->is_ca_internal_port = 1;
1046         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1047
1048         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1049 }
1050
1051 static int
1052 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1053                            const struct rte_cryptodev *cdev,
1054                            int32_t queue_pair_id)
1055 {
1056         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1057         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1058
1059         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1060 }
1061
1062 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
1063         .dev_infos_get = cn9k_sso_info_get,
1064         .dev_configure = cn9k_sso_dev_configure,
1065         .queue_def_conf = cnxk_sso_queue_def_conf,
1066         .queue_setup = cnxk_sso_queue_setup,
1067         .queue_release = cnxk_sso_queue_release,
1068         .port_def_conf = cnxk_sso_port_def_conf,
1069         .port_setup = cn9k_sso_port_setup,
1070         .port_release = cn9k_sso_port_release,
1071         .port_link = cn9k_sso_port_link,
1072         .port_unlink = cn9k_sso_port_unlink,
1073         .timeout_ticks = cnxk_sso_timeout_ticks,
1074
1075         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1076         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1077         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1078         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1079         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1080
1081         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1082         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1083         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1084
1085         .timer_adapter_caps_get = cnxk_tim_caps_get,
1086
1087         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1088         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1089         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1090
1091         .dump = cnxk_sso_dump,
1092         .dev_start = cn9k_sso_start,
1093         .dev_stop = cn9k_sso_stop,
1094         .dev_close = cn9k_sso_close,
1095         .dev_selftest = cn9k_sso_selftest,
1096 };
1097
1098 static int
1099 cn9k_sso_init(struct rte_eventdev *event_dev)
1100 {
1101         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1102         int rc;
1103
1104         if (RTE_CACHE_LINE_SIZE != 128) {
1105                 plt_err("Driver not compiled for CN9K");
1106                 return -EFAULT;
1107         }
1108
1109         rc = roc_plt_init();
1110         if (rc < 0) {
1111                 plt_err("Failed to initialize platform model");
1112                 return rc;
1113         }
1114
1115         event_dev->dev_ops = &cn9k_sso_dev_ops;
1116         /* For secondary processes, the primary has done all the work */
1117         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1118                 cn9k_sso_fp_fns_set(event_dev);
1119                 return 0;
1120         }
1121
1122         rc = cnxk_sso_init(event_dev);
1123         if (rc < 0)
1124                 return rc;
1125
1126         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1127         if (!dev->max_event_ports || !dev->max_event_queues) {
1128                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1129                         dev->max_event_queues, dev->max_event_ports);
1130                 cnxk_sso_fini(event_dev);
1131                 return -ENODEV;
1132         }
1133
1134         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1135                     event_dev->data->name, dev->max_event_queues,
1136                     dev->max_event_ports);
1137
1138         return 0;
1139 }
1140
1141 static int
1142 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1143 {
1144         return rte_event_pmd_pci_probe(
1145                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1146 }
1147
1148 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1149         {
1150                 .vendor_id = 0,
1151         },
1152 };
1153
1154 static struct rte_pci_driver cn9k_pci_sso = {
1155         .id_table = cn9k_pci_sso_map,
1156         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1157         .probe = cn9k_sso_probe,
1158         .remove = cnxk_sso_remove,
1159 };
1160
1161 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1162 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1163 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1164 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1165                               CNXK_SSO_GGRP_QOS "=<string>"
1166                               CNXK_SSO_FORCE_BP "=1"
1167                               CN9K_SSO_SINGLE_WS "=1"
1168                               CNXK_TIM_DISABLE_NPA "=1"
1169                               CNXK_TIM_CHNK_SLOTS "=<int>"
1170                               CNXK_TIM_RINGS_LMT "=<int>"
1171                               CNXK_TIM_STATS_ENA "=1");