event/cnxk: use common XAQ pool functions
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
19                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20
21 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
22         (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
28                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
29
30 static void
31 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
32 {
33         ws->tag_op = base + SSOW_LF_GWS_TAG;
34         ws->wqp_op = base + SSOW_LF_GWS_WQP;
35         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
36         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
37         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
38         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
39 }
40
41 static int
42 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn9k_sso_hws_dual *dws;
46         struct cn9k_sso_hws *ws;
47         int rc;
48
49         if (dev->dual_ws) {
50                 dws = port;
51                 rc = roc_sso_hws_link(&dev->sso,
52                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
53                                       nb_link);
54                 rc |= roc_sso_hws_link(&dev->sso,
55                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
56                                        map, nb_link);
57         } else {
58                 ws = port;
59                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
60         }
61
62         return rc;
63 }
64
65 static int
66 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 {
68         struct cnxk_sso_evdev *dev = arg;
69         struct cn9k_sso_hws_dual *dws;
70         struct cn9k_sso_hws *ws;
71         int rc;
72
73         if (dev->dual_ws) {
74                 dws = port;
75                 rc = roc_sso_hws_unlink(&dev->sso,
76                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
77                                         map, nb_link);
78                 rc |= roc_sso_hws_unlink(&dev->sso,
79                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
80                                          map, nb_link);
81         } else {
82                 ws = port;
83                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84         }
85
86         return rc;
87 }
88
89 static void
90 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
91 {
92         struct cnxk_sso_evdev *dev = arg;
93         struct cn9k_sso_hws_dual *dws;
94         struct cn9k_sso_hws *ws;
95         uint64_t val;
96
97         /* Set get_work tmo for HWS */
98         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
99         if (dev->dual_ws) {
100                 dws = hws;
101                 rte_memcpy(dws->grps_base, grps_base,
102                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
103                 dws->fc_mem = (uint64_t *)dev->fc_iova;
104                 dws->xaq_lmt = dev->xaq_lmt;
105
106                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
107                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
108         } else {
109                 ws = hws;
110                 rte_memcpy(ws->grps_base, grps_base,
111                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
112                 ws->fc_mem = (uint64_t *)dev->fc_iova;
113                 ws->xaq_lmt = dev->xaq_lmt;
114
115                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
116         }
117 }
118
119 static void
120 cn9k_sso_hws_release(void *arg, void *hws)
121 {
122         struct cnxk_sso_evdev *dev = arg;
123         struct cn9k_sso_hws_dual *dws;
124         struct cn9k_sso_hws *ws;
125         int i;
126
127         if (dev->dual_ws) {
128                 dws = hws;
129                 for (i = 0; i < dev->nb_event_queues; i++) {
130                         roc_sso_hws_unlink(&dev->sso,
131                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
132                                            (uint16_t *)&i, 1);
133                         roc_sso_hws_unlink(&dev->sso,
134                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
135                                            (uint16_t *)&i, 1);
136                 }
137                 memset(dws, 0, sizeof(*dws));
138         } else {
139                 ws = hws;
140                 for (i = 0; i < dev->nb_event_queues; i++)
141                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
142                                            (uint16_t *)&i, 1);
143                 memset(ws, 0, sizeof(*ws));
144         }
145 }
146
147 static void
148 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
149                           cnxk_handle_event_t fn, void *arg)
150 {
151         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
152         struct cn9k_sso_hws_dual *dws;
153         struct cn9k_sso_hws_state *st;
154         struct cn9k_sso_hws *ws;
155         uint64_t cq_ds_cnt = 1;
156         uint64_t aq_cnt = 1;
157         uint64_t ds_cnt = 1;
158         struct rte_event ev;
159         uintptr_t ws_base;
160         uint64_t val, req;
161
162         plt_write64(0, base + SSO_LF_GGRP_QCTL);
163
164         req = queue_id;     /* GGRP ID */
165         req |= BIT_ULL(18); /* Grouped */
166         req |= BIT_ULL(16); /* WAIT */
167
168         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
169         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
170         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
171         cq_ds_cnt &= 0x3FFF3FFF0000;
172
173         if (dev->dual_ws) {
174                 dws = hws;
175                 st = &dws->ws_state[0];
176                 ws_base = dws->base[0];
177         } else {
178                 ws = hws;
179                 st = (struct cn9k_sso_hws_state *)ws;
180                 ws_base = ws->base;
181         }
182
183         while (aq_cnt || cq_ds_cnt || ds_cnt) {
184                 plt_write64(req, st->getwrk_op);
185                 cn9k_sso_hws_get_work_empty(st, &ev);
186                 if (fn != NULL && ev.u64 != 0)
187                         fn(arg, ev);
188                 if (ev.sched_type != SSO_TT_EMPTY)
189                         cnxk_sso_hws_swtag_flush(st->tag_op,
190                                                  st->swtag_flush_op);
191                 do {
192                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
193                 } while (val & BIT_ULL(56));
194                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
195                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
196                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
197                 /* Extract cq and ds count */
198                 cq_ds_cnt &= 0x3FFF3FFF0000;
199         }
200
201         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
202 }
203
204 static void
205 cn9k_sso_hws_reset(void *arg, void *hws)
206 {
207         struct cnxk_sso_evdev *dev = arg;
208         struct cn9k_sso_hws_dual *dws;
209         struct cn9k_sso_hws *ws;
210         uint64_t pend_state;
211         uint8_t pend_tt;
212         uintptr_t base;
213         uint64_t tag;
214         uint8_t i;
215
216         dws = hws;
217         ws = hws;
218         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
219                 base = dev->dual_ws ? dws->base[i] : ws->base;
220                 /* Wait till getwork/swtp/waitw/desched completes. */
221                 do {
222                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
223                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
224                                        BIT_ULL(56)));
225
226                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
227                 pend_tt = (tag >> 32) & 0x3;
228                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
229                         if (pend_tt == SSO_TT_ATOMIC ||
230                             pend_tt == SSO_TT_ORDERED)
231                                 cnxk_sso_hws_swtag_untag(
232                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
233                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
234                 }
235
236                 /* Wait for desched to complete. */
237                 do {
238                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
239                 } while (pend_state & BIT_ULL(58));
240         }
241 }
242
243 void
244 cn9k_sso_set_rsrc(void *arg)
245 {
246         struct cnxk_sso_evdev *dev = arg;
247
248         if (dev->dual_ws)
249                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
250         else
251                 dev->max_event_ports = dev->sso.max_hws;
252         dev->max_event_queues =
253                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
254                               RTE_EVENT_MAX_QUEUES_PER_DEV :
255                               dev->sso.max_hwgrp;
256 }
257
258 static int
259 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
260 {
261         struct cnxk_sso_evdev *dev = arg;
262
263         if (dev->dual_ws)
264                 hws = hws * CN9K_DUAL_WS_NB_WS;
265
266         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
267 }
268
269 static int
270 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
271 {
272         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
273         int i;
274
275         if (dev->tx_adptr_data == NULL)
276                 return 0;
277
278         for (i = 0; i < dev->nb_event_ports; i++) {
279                 if (dev->dual_ws) {
280                         struct cn9k_sso_hws_dual *dws =
281                                 event_dev->data->ports[i];
282                         void *ws_cookie;
283
284                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
285                         ws_cookie = rte_realloc_socket(
286                                 ws_cookie,
287                                 sizeof(struct cnxk_sso_hws_cookie) +
288                                         sizeof(struct cn9k_sso_hws_dual) +
289                                         (sizeof(uint64_t) *
290                                          (dev->max_port_id + 1) *
291                                          RTE_MAX_QUEUES_PER_PORT),
292                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
293                         if (ws_cookie == NULL)
294                                 return -ENOMEM;
295                         dws = RTE_PTR_ADD(ws_cookie,
296                                           sizeof(struct cnxk_sso_hws_cookie));
297                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
298                                sizeof(uint64_t) * (dev->max_port_id + 1) *
299                                        RTE_MAX_QUEUES_PER_PORT);
300                         event_dev->data->ports[i] = dws;
301                 } else {
302                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
303                         void *ws_cookie;
304
305                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
306                         ws_cookie = rte_realloc_socket(
307                                 ws_cookie,
308                                 sizeof(struct cnxk_sso_hws_cookie) +
309                                         sizeof(struct cn9k_sso_hws_dual) +
310                                         (sizeof(uint64_t) *
311                                          (dev->max_port_id + 1) *
312                                          RTE_MAX_QUEUES_PER_PORT),
313                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
314                         if (ws_cookie == NULL)
315                                 return -ENOMEM;
316                         ws = RTE_PTR_ADD(ws_cookie,
317                                          sizeof(struct cnxk_sso_hws_cookie));
318                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
319                                sizeof(uint64_t) * (dev->max_port_id + 1) *
320                                        RTE_MAX_QUEUES_PER_PORT);
321                         event_dev->data->ports[i] = ws;
322                 }
323         }
324         rte_mb();
325
326         return 0;
327 }
328
329 static void
330 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
331 {
332         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
333         /* Single WS modes */
334         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
335 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
336         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
337                 NIX_RX_FASTPATH_MODES
338 #undef R
339         };
340
341         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
342 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
343         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
344                 NIX_RX_FASTPATH_MODES
345 #undef R
346         };
347
348         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
349 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
350         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t
356                 sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
357 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
358         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
359                 NIX_RX_FASTPATH_MODES
360 #undef R
361         };
362
363         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
364 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
365         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
366                 NIX_RX_FASTPATH_MODES
367 #undef R
368         };
369
370         const event_dequeue_burst_t
371                 sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
372 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
373         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
374                 NIX_RX_FASTPATH_MODES
375 #undef R
376         };
377
378         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
379 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
380         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_burst_t
386                 sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
387 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
388         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
389                 NIX_RX_FASTPATH_MODES
390 #undef R
391         };
392
393         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
394 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
395         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
396                 NIX_RX_FASTPATH_MODES
397 #undef R
398         };
399
400         const event_dequeue_burst_t
401                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
402 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
403         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
404                         NIX_RX_FASTPATH_MODES
405 #undef R
406         };
407
408         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
409 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
410         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
411                 NIX_RX_FASTPATH_MODES
412 #undef R
413         };
414
415         const event_dequeue_burst_t
416                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
417 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
418         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
419                         NIX_RX_FASTPATH_MODES
420 #undef R
421         };
422
423         /* Dual WS modes */
424         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2][2] = {
425 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
426         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
427                 NIX_RX_FASTPATH_MODES
428 #undef R
429         };
430
431         const event_dequeue_burst_t
432                 sso_hws_dual_deq_burst[2][2][2][2][2][2][2] = {
433 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
434         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
435                 NIX_RX_FASTPATH_MODES
436 #undef R
437         };
438
439         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2][2] = {
440 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
441         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_burst_t
447                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2][2] = {
448 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
449         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
450                         NIX_RX_FASTPATH_MODES
451 #undef R
452         };
453
454         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2][2] = {
455 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
456         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
457                 NIX_RX_FASTPATH_MODES
458 #undef R
459         };
460
461         const event_dequeue_burst_t
462                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2][2] = {
463 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
464         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
465                         NIX_RX_FASTPATH_MODES
466 #undef R
467         };
468
469         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2][2] = {
470 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
471         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t
477                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
478 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
479         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
480                         NIX_RX_FASTPATH_MODES
481 #undef R
482                 };
483
484         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2][2] = {
485 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
486         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
487                 NIX_RX_FASTPATH_MODES
488 #undef R
489         };
490
491         const event_dequeue_burst_t
492                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
493 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
494         [f6][f5][f4][f3][f2][f1][f0] =                                         \
495                         cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
496                         NIX_RX_FASTPATH_MODES
497 #undef R
498                 };
499
500         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2][2] = {
501 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
502         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
503                 NIX_RX_FASTPATH_MODES
504 #undef R
505         };
506
507         const event_dequeue_burst_t
508                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
509 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
510         [f6][f5][f4][f3][f2][f1][f0] =                                         \
511                         cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
512                         NIX_RX_FASTPATH_MODES
513 #undef R
514         };
515
516         /* Tx modes */
517         const event_tx_adapter_enqueue_t
518                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
519 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
520         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
521                         NIX_TX_FASTPATH_MODES
522 #undef T
523                 };
524
525         const event_tx_adapter_enqueue_t
526                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
527 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
528         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
529                         NIX_TX_FASTPATH_MODES
530 #undef T
531                 };
532
533         const event_tx_adapter_enqueue_t
534                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
535 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
536         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
537                         NIX_TX_FASTPATH_MODES
538 #undef T
539                 };
540
541         const event_tx_adapter_enqueue_t
542                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
543 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
544         [f6][f5][f4][f3][f2][f1][f0] =                                         \
545                         cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
546                         NIX_TX_FASTPATH_MODES
547 #undef T
548                 };
549
550         event_dev->enqueue = cn9k_sso_hws_enq;
551         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
552         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
553         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
554         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
555                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
556                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
557                                       sso_hws_deq_seg_burst);
558                 if (dev->is_timeout_deq) {
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
560                                               sso_hws_deq_tmo_seg);
561                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
562                                               sso_hws_deq_tmo_seg_burst);
563                 }
564                 if (dev->is_ca_internal_port) {
565                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
566                                               sso_hws_deq_ca_seg);
567                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
568                                               sso_hws_deq_ca_seg_burst);
569                 }
570         } else {
571                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
572                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
573                                       sso_hws_deq_burst);
574                 if (dev->is_timeout_deq) {
575                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
576                                               sso_hws_deq_tmo);
577                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
578                                               sso_hws_deq_tmo_burst);
579                 }
580                 if (dev->is_ca_internal_port) {
581                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
582                                               sso_hws_deq_ca);
583                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
584                                               sso_hws_deq_ca_burst);
585                 }
586         }
587         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
588
589         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
590                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
591                                       sso_hws_tx_adptr_enq_seg);
592         else
593                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
594                                       sso_hws_tx_adptr_enq);
595
596         if (dev->dual_ws) {
597                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
598                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
599                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
600                 event_dev->enqueue_forward_burst =
601                         cn9k_sso_hws_dual_enq_fwd_burst;
602                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
603
604                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
605                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
606                                               sso_hws_dual_deq_seg);
607                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
608                                               sso_hws_dual_deq_seg_burst);
609                         if (dev->is_timeout_deq) {
610                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
611                                                       sso_hws_dual_deq_tmo_seg);
612                                 CN9K_SET_EVDEV_DEQ_OP(
613                                         dev, event_dev->dequeue_burst,
614                                         sso_hws_dual_deq_tmo_seg_burst);
615                         }
616                         if (dev->is_ca_internal_port) {
617                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
618                                                       sso_hws_dual_deq_ca_seg);
619                                 CN9K_SET_EVDEV_DEQ_OP(
620                                         dev, event_dev->dequeue_burst,
621                                         sso_hws_dual_deq_ca_seg_burst);
622                         }
623                 } else {
624                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
625                                               sso_hws_dual_deq);
626                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
627                                               sso_hws_dual_deq_burst);
628                         if (dev->is_timeout_deq) {
629                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
630                                                       sso_hws_dual_deq_tmo);
631                                 CN9K_SET_EVDEV_DEQ_OP(
632                                         dev, event_dev->dequeue_burst,
633                                         sso_hws_dual_deq_tmo_burst);
634                         }
635                         if (dev->is_ca_internal_port) {
636                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
637                                                       sso_hws_dual_deq_ca);
638                                 CN9K_SET_EVDEV_DEQ_OP(
639                                         dev, event_dev->dequeue_burst,
640                                         sso_hws_dual_deq_ca_burst);
641                         }
642                 }
643
644                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
645                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
646                                               sso_hws_dual_tx_adptr_enq_seg);
647                 else
648                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
649                                               sso_hws_dual_tx_adptr_enq);
650         }
651
652         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
653         rte_mb();
654 }
655
656 static void *
657 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
658 {
659         struct cnxk_sso_evdev *dev = arg;
660         struct cn9k_sso_hws_dual *dws;
661         struct cn9k_sso_hws *ws;
662         void *data;
663
664         if (dev->dual_ws) {
665                 dws = rte_zmalloc("cn9k_dual_ws",
666                                   sizeof(struct cn9k_sso_hws_dual) +
667                                           RTE_CACHE_LINE_SIZE,
668                                   RTE_CACHE_LINE_SIZE);
669                 if (dws == NULL) {
670                         plt_err("Failed to alloc memory for port=%d", port_id);
671                         return NULL;
672                 }
673
674                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
675                 dws->base[0] = roc_sso_hws_base_get(
676                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
677                 dws->base[1] = roc_sso_hws_base_get(
678                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
679                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
680                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
681                 dws->hws_id = port_id;
682                 dws->swtag_req = 0;
683                 dws->vws = 0;
684
685                 data = dws;
686         } else {
687                 /* Allocate event port memory */
688                 ws = rte_zmalloc("cn9k_ws",
689                                  sizeof(struct cn9k_sso_hws) +
690                                          RTE_CACHE_LINE_SIZE,
691                                  RTE_CACHE_LINE_SIZE);
692                 if (ws == NULL) {
693                         plt_err("Failed to alloc memory for port=%d", port_id);
694                         return NULL;
695                 }
696
697                 /* First cache line is reserved for cookie */
698                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
699                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
700                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
701                 ws->hws_id = port_id;
702                 ws->swtag_req = 0;
703
704                 data = ws;
705         }
706
707         return data;
708 }
709
710 static void
711 cn9k_sso_info_get(struct rte_eventdev *event_dev,
712                   struct rte_event_dev_info *dev_info)
713 {
714         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
715
716         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
717         cnxk_sso_info_get(dev, dev_info);
718 }
719
720 static int
721 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
722 {
723         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
724         int rc;
725
726         rc = cnxk_sso_dev_validate(event_dev);
727         if (rc < 0) {
728                 plt_err("Invalid event device configuration");
729                 return -EINVAL;
730         }
731
732         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
733         if (rc < 0) {
734                 plt_err("Failed to initialize SSO resources");
735                 return -ENODEV;
736         }
737
738         rc = cnxk_sso_xaq_allocate(dev);
739         if (rc < 0)
740                 goto cnxk_rsrc_fini;
741
742         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
743                                     cn9k_sso_hws_setup);
744         if (rc < 0)
745                 goto cnxk_rsrc_fini;
746
747         /* Restore any prior port-queue mapping. */
748         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
749
750         dev->configured = 1;
751         rte_mb();
752
753         return 0;
754 cnxk_rsrc_fini:
755         roc_sso_rsrc_fini(&dev->sso);
756         dev->nb_event_ports = 0;
757         return rc;
758 }
759
760 static int
761 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
762                     const struct rte_event_port_conf *port_conf)
763 {
764
765         RTE_SET_USED(port_conf);
766         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
767 }
768
769 static void
770 cn9k_sso_port_release(void *port)
771 {
772         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
773         struct cnxk_sso_evdev *dev;
774
775         if (port == NULL)
776                 return;
777
778         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
779         if (!gws_cookie->configured)
780                 goto free;
781
782         cn9k_sso_hws_release(dev, port);
783         memset(gws_cookie, 0, sizeof(*gws_cookie));
784 free:
785         rte_free(gws_cookie);
786 }
787
788 static int
789 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
790                    const uint8_t queues[], const uint8_t priorities[],
791                    uint16_t nb_links)
792 {
793         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
794         uint16_t hwgrp_ids[nb_links];
795         uint16_t link;
796
797         RTE_SET_USED(priorities);
798         for (link = 0; link < nb_links; link++)
799                 hwgrp_ids[link] = queues[link];
800         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
801
802         return (int)nb_links;
803 }
804
805 static int
806 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
807                      uint8_t queues[], uint16_t nb_unlinks)
808 {
809         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
810         uint16_t hwgrp_ids[nb_unlinks];
811         uint16_t unlink;
812
813         for (unlink = 0; unlink < nb_unlinks; unlink++)
814                 hwgrp_ids[unlink] = queues[unlink];
815         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
816
817         return (int)nb_unlinks;
818 }
819
820 static int
821 cn9k_sso_start(struct rte_eventdev *event_dev)
822 {
823         int rc;
824
825         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
826         if (rc < 0)
827                 return rc;
828
829         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
830                             cn9k_sso_hws_flush_events);
831         if (rc < 0)
832                 return rc;
833
834         cn9k_sso_fp_fns_set(event_dev);
835
836         return rc;
837 }
838
839 static void
840 cn9k_sso_stop(struct rte_eventdev *event_dev)
841 {
842         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
843 }
844
845 static int
846 cn9k_sso_close(struct rte_eventdev *event_dev)
847 {
848         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
849 }
850
851 static int
852 cn9k_sso_selftest(void)
853 {
854         return cnxk_sso_selftest(RTE_STR(event_cn9k));
855 }
856
857 static int
858 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
859                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
860 {
861         int rc;
862
863         RTE_SET_USED(event_dev);
864         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
865         if (rc)
866                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
867         else
868                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
869                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
870                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
871
872         return 0;
873 }
874
875 static void
876 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
877                       void *tstmp_info)
878 {
879         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
880         int i;
881
882         for (i = 0; i < dev->nb_event_ports; i++) {
883                 if (dev->dual_ws) {
884                         struct cn9k_sso_hws_dual *dws =
885                                 event_dev->data->ports[i];
886                         dws->lookup_mem = lookup_mem;
887                         dws->tstamp = tstmp_info;
888                 } else {
889                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
890                         ws->lookup_mem = lookup_mem;
891                         ws->tstamp = tstmp_info;
892                 }
893         }
894 }
895
896 static int
897 cn9k_sso_rx_adapter_queue_add(
898         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
899         int32_t rx_queue_id,
900         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
901 {
902         struct cn9k_eth_rxq *rxq;
903         void *lookup_mem;
904         void *tstmp_info;
905         int rc;
906
907         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
908         if (rc)
909                 return -EINVAL;
910
911         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
912                                            queue_conf);
913         if (rc)
914                 return -EINVAL;
915
916         rxq = eth_dev->data->rx_queues[0];
917         lookup_mem = rxq->lookup_mem;
918         tstmp_info = rxq->tstamp;
919         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
920         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
921
922         return 0;
923 }
924
925 static int
926 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
927                               const struct rte_eth_dev *eth_dev,
928                               int32_t rx_queue_id)
929 {
930         int rc;
931
932         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
933         if (rc)
934                 return -EINVAL;
935
936         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
937 }
938
939 static int
940 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
941                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
942 {
943         int ret;
944
945         RTE_SET_USED(dev);
946         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
947         if (ret)
948                 *caps = 0;
949         else
950                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
951
952         return 0;
953 }
954
955 static void
956 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
957                        bool ena)
958 {
959         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
960         struct cn9k_eth_txq *txq;
961         struct roc_nix_sq *sq;
962         int i;
963
964         if (tx_queue_id < 0) {
965                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
966                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
967         } else {
968                 uint16_t sq_limit;
969
970                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
971                 txq = eth_dev->data->tx_queues[tx_queue_id];
972                 sq_limit =
973                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
974                                     sq->nb_sqb_bufs;
975                 txq->nb_sqb_bufs_adj =
976                         sq_limit -
977                         RTE_ALIGN_MUL_CEIL(sq_limit,
978                                            (1ULL << txq->sqes_per_sqb_log2)) /
979                                 (1ULL << txq->sqes_per_sqb_log2);
980                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
981         }
982 }
983
984 static int
985 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
986                               const struct rte_eth_dev *eth_dev,
987                               int32_t tx_queue_id)
988 {
989         int rc;
990
991         RTE_SET_USED(id);
992         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
993         if (rc < 0)
994                 return rc;
995         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
996         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
997         if (rc < 0)
998                 return rc;
999         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1000
1001         return 0;
1002 }
1003
1004 static int
1005 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1006                               const struct rte_eth_dev *eth_dev,
1007                               int32_t tx_queue_id)
1008 {
1009         int rc;
1010
1011         RTE_SET_USED(id);
1012         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1013         if (rc < 0)
1014                 return rc;
1015         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1016         return cn9k_sso_updt_tx_adptr_data(event_dev);
1017 }
1018
1019 static int
1020 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1021                              const struct rte_cryptodev *cdev, uint32_t *caps)
1022 {
1023         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1024         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1025
1026         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1027                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1028
1029         return 0;
1030 }
1031
1032 static int
1033 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1034                            const struct rte_cryptodev *cdev,
1035                            int32_t queue_pair_id, const struct rte_event *event)
1036 {
1037         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1038
1039         RTE_SET_USED(event);
1040
1041         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1042         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1043
1044         dev->is_ca_internal_port = 1;
1045         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1046
1047         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1048 }
1049
1050 static int
1051 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1052                            const struct rte_cryptodev *cdev,
1053                            int32_t queue_pair_id)
1054 {
1055         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1056         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1057
1058         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1059 }
1060
1061 static struct eventdev_ops cn9k_sso_dev_ops = {
1062         .dev_infos_get = cn9k_sso_info_get,
1063         .dev_configure = cn9k_sso_dev_configure,
1064         .queue_def_conf = cnxk_sso_queue_def_conf,
1065         .queue_setup = cnxk_sso_queue_setup,
1066         .queue_release = cnxk_sso_queue_release,
1067         .port_def_conf = cnxk_sso_port_def_conf,
1068         .port_setup = cn9k_sso_port_setup,
1069         .port_release = cn9k_sso_port_release,
1070         .port_link = cn9k_sso_port_link,
1071         .port_unlink = cn9k_sso_port_unlink,
1072         .timeout_ticks = cnxk_sso_timeout_ticks,
1073
1074         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1075         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1076         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1077         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1078         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1079
1080         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1081         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1082         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1083
1084         .timer_adapter_caps_get = cnxk_tim_caps_get,
1085
1086         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1087         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1088         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1089
1090         .dump = cnxk_sso_dump,
1091         .dev_start = cn9k_sso_start,
1092         .dev_stop = cn9k_sso_stop,
1093         .dev_close = cn9k_sso_close,
1094         .dev_selftest = cn9k_sso_selftest,
1095 };
1096
1097 static int
1098 cn9k_sso_init(struct rte_eventdev *event_dev)
1099 {
1100         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1101         int rc;
1102
1103         if (RTE_CACHE_LINE_SIZE != 128) {
1104                 plt_err("Driver not compiled for CN9K");
1105                 return -EFAULT;
1106         }
1107
1108         rc = roc_plt_init();
1109         if (rc < 0) {
1110                 plt_err("Failed to initialize platform model");
1111                 return rc;
1112         }
1113
1114         event_dev->dev_ops = &cn9k_sso_dev_ops;
1115         /* For secondary processes, the primary has done all the work */
1116         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1117                 cn9k_sso_fp_fns_set(event_dev);
1118                 return 0;
1119         }
1120
1121         rc = cnxk_sso_init(event_dev);
1122         if (rc < 0)
1123                 return rc;
1124
1125         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1126         if (!dev->max_event_ports || !dev->max_event_queues) {
1127                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1128                         dev->max_event_queues, dev->max_event_ports);
1129                 cnxk_sso_fini(event_dev);
1130                 return -ENODEV;
1131         }
1132
1133         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1134                     event_dev->data->name, dev->max_event_queues,
1135                     dev->max_event_ports);
1136
1137         return 0;
1138 }
1139
1140 static int
1141 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1142 {
1143         return rte_event_pmd_pci_probe(
1144                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1145 }
1146
1147 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1148         {
1149                 .vendor_id = 0,
1150         },
1151 };
1152
1153 static struct rte_pci_driver cn9k_pci_sso = {
1154         .id_table = cn9k_pci_sso_map,
1155         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1156         .probe = cn9k_sso_probe,
1157         .remove = cnxk_sso_remove,
1158 };
1159
1160 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1161 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1162 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1163 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1164                               CNXK_SSO_GGRP_QOS "=<string>"
1165                               CNXK_SSO_FORCE_BP "=1"
1166                               CN9K_SSO_SINGLE_WS "=1"
1167                               CNXK_TIM_DISABLE_NPA "=1"
1168                               CNXK_TIM_CHNK_SLOTS "=<int>"
1169                               CNXK_TIM_RINGS_LMT "=<int>"
1170                               CNXK_TIM_STATS_ENA "=1");