net/ice: fix TM hierarchy commit flag reset
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
19                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20
21 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
22         (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
28                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
29
30 static void
31 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
32 {
33         ws->tag_op = base + SSOW_LF_GWS_TAG;
34         ws->wqp_op = base + SSOW_LF_GWS_WQP;
35         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
36         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
37         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
38         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
39 }
40
41 static int
42 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn9k_sso_hws_dual *dws;
46         struct cn9k_sso_hws *ws;
47         int rc;
48
49         if (dev->dual_ws) {
50                 dws = port;
51                 rc = roc_sso_hws_link(&dev->sso,
52                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
53                                       nb_link);
54                 rc |= roc_sso_hws_link(&dev->sso,
55                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
56                                        map, nb_link);
57         } else {
58                 ws = port;
59                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
60         }
61
62         return rc;
63 }
64
65 static int
66 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 {
68         struct cnxk_sso_evdev *dev = arg;
69         struct cn9k_sso_hws_dual *dws;
70         struct cn9k_sso_hws *ws;
71         int rc;
72
73         if (dev->dual_ws) {
74                 dws = port;
75                 rc = roc_sso_hws_unlink(&dev->sso,
76                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
77                                         map, nb_link);
78                 rc |= roc_sso_hws_unlink(&dev->sso,
79                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
80                                          map, nb_link);
81         } else {
82                 ws = port;
83                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84         }
85
86         return rc;
87 }
88
89 static void
90 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
91 {
92         struct cnxk_sso_evdev *dev = arg;
93         struct cn9k_sso_hws_dual *dws;
94         struct cn9k_sso_hws *ws;
95         uint64_t val;
96
97         /* Set get_work tmo for HWS */
98         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
99         if (dev->dual_ws) {
100                 dws = hws;
101                 rte_memcpy(dws->grps_base, grps_base,
102                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
103                 dws->fc_mem = dev->fc_mem;
104                 dws->xaq_lmt = dev->xaq_lmt;
105
106                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
107                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
108         } else {
109                 ws = hws;
110                 rte_memcpy(ws->grps_base, grps_base,
111                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
112                 ws->fc_mem = dev->fc_mem;
113                 ws->xaq_lmt = dev->xaq_lmt;
114
115                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
116         }
117 }
118
119 static void
120 cn9k_sso_hws_release(void *arg, void *hws)
121 {
122         struct cnxk_sso_evdev *dev = arg;
123         struct cn9k_sso_hws_dual *dws;
124         struct cn9k_sso_hws *ws;
125         int i;
126
127         if (dev->dual_ws) {
128                 dws = hws;
129                 for (i = 0; i < dev->nb_event_queues; i++) {
130                         roc_sso_hws_unlink(&dev->sso,
131                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
132                                            (uint16_t *)&i, 1);
133                         roc_sso_hws_unlink(&dev->sso,
134                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
135                                            (uint16_t *)&i, 1);
136                 }
137                 memset(dws, 0, sizeof(*dws));
138         } else {
139                 ws = hws;
140                 for (i = 0; i < dev->nb_event_queues; i++)
141                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
142                                            (uint16_t *)&i, 1);
143                 memset(ws, 0, sizeof(*ws));
144         }
145 }
146
147 static void
148 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
149                           cnxk_handle_event_t fn, void *arg)
150 {
151         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
152         struct cn9k_sso_hws_dual *dws;
153         struct cn9k_sso_hws_state *st;
154         struct cn9k_sso_hws *ws;
155         uint64_t cq_ds_cnt = 1;
156         uint64_t aq_cnt = 1;
157         uint64_t ds_cnt = 1;
158         struct rte_event ev;
159         uintptr_t ws_base;
160         uint64_t val, req;
161
162         plt_write64(0, base + SSO_LF_GGRP_QCTL);
163
164         req = queue_id;     /* GGRP ID */
165         req |= BIT_ULL(18); /* Grouped */
166         req |= BIT_ULL(16); /* WAIT */
167
168         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
169         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
170         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
171         cq_ds_cnt &= 0x3FFF3FFF0000;
172
173         if (dev->dual_ws) {
174                 dws = hws;
175                 st = &dws->ws_state[0];
176                 ws_base = dws->base[0];
177         } else {
178                 ws = hws;
179                 st = (struct cn9k_sso_hws_state *)ws;
180                 ws_base = ws->base;
181         }
182
183         while (aq_cnt || cq_ds_cnt || ds_cnt) {
184                 plt_write64(req, st->getwrk_op);
185                 cn9k_sso_hws_get_work_empty(st, &ev);
186                 if (fn != NULL && ev.u64 != 0)
187                         fn(arg, ev);
188                 if (ev.sched_type != SSO_TT_EMPTY)
189                         cnxk_sso_hws_swtag_flush(st->tag_op,
190                                                  st->swtag_flush_op);
191                 do {
192                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
193                 } while (val & BIT_ULL(56));
194                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
195                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
196                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
197                 /* Extract cq and ds count */
198                 cq_ds_cnt &= 0x3FFF3FFF0000;
199         }
200
201         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
202 }
203
204 static void
205 cn9k_sso_hws_reset(void *arg, void *hws)
206 {
207         struct cnxk_sso_evdev *dev = arg;
208         struct cn9k_sso_hws_dual *dws;
209         struct cn9k_sso_hws *ws;
210         uint64_t pend_state;
211         uint8_t pend_tt;
212         uintptr_t base;
213         uint64_t tag;
214         uint8_t i;
215
216         dws = hws;
217         ws = hws;
218         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
219                 base = dev->dual_ws ? dws->base[i] : ws->base;
220                 /* Wait till getwork/swtp/waitw/desched completes. */
221                 do {
222                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
223                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
224                                        BIT_ULL(56)));
225
226                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
227                 pend_tt = (tag >> 32) & 0x3;
228                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
229                         if (pend_tt == SSO_TT_ATOMIC ||
230                             pend_tt == SSO_TT_ORDERED)
231                                 cnxk_sso_hws_swtag_untag(
232                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
233                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
234                 }
235
236                 /* Wait for desched to complete. */
237                 do {
238                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
239                 } while (pend_state & BIT_ULL(58));
240         }
241 }
242
243 void
244 cn9k_sso_set_rsrc(void *arg)
245 {
246         struct cnxk_sso_evdev *dev = arg;
247
248         if (dev->dual_ws)
249                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
250         else
251                 dev->max_event_ports = dev->sso.max_hws;
252         dev->max_event_queues =
253                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
254                               RTE_EVENT_MAX_QUEUES_PER_DEV :
255                               dev->sso.max_hwgrp;
256 }
257
258 static int
259 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
260 {
261         struct cnxk_sso_evdev *dev = arg;
262
263         if (dev->dual_ws)
264                 hws = hws * CN9K_DUAL_WS_NB_WS;
265
266         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
267 }
268
269 static int
270 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
271 {
272         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
273         int i;
274
275         if (dev->tx_adptr_data == NULL)
276                 return 0;
277
278         for (i = 0; i < dev->nb_event_ports; i++) {
279                 if (dev->dual_ws) {
280                         struct cn9k_sso_hws_dual *dws =
281                                 event_dev->data->ports[i];
282                         void *ws_cookie;
283
284                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
285                         ws_cookie = rte_realloc_socket(
286                                 ws_cookie,
287                                 sizeof(struct cnxk_sso_hws_cookie) +
288                                         sizeof(struct cn9k_sso_hws_dual) +
289                                         (sizeof(uint64_t) *
290                                          (dev->max_port_id + 1) *
291                                          RTE_MAX_QUEUES_PER_PORT),
292                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
293                         if (ws_cookie == NULL)
294                                 return -ENOMEM;
295                         dws = RTE_PTR_ADD(ws_cookie,
296                                           sizeof(struct cnxk_sso_hws_cookie));
297                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
298                                sizeof(uint64_t) * (dev->max_port_id + 1) *
299                                        RTE_MAX_QUEUES_PER_PORT);
300                         event_dev->data->ports[i] = dws;
301                 } else {
302                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
303                         void *ws_cookie;
304
305                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
306                         ws_cookie = rte_realloc_socket(
307                                 ws_cookie,
308                                 sizeof(struct cnxk_sso_hws_cookie) +
309                                         sizeof(struct cn9k_sso_hws_dual) +
310                                         (sizeof(uint64_t) *
311                                          (dev->max_port_id + 1) *
312                                          RTE_MAX_QUEUES_PER_PORT),
313                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
314                         if (ws_cookie == NULL)
315                                 return -ENOMEM;
316                         ws = RTE_PTR_ADD(ws_cookie,
317                                          sizeof(struct cnxk_sso_hws_cookie));
318                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
319                                sizeof(uint64_t) * (dev->max_port_id + 1) *
320                                        RTE_MAX_QUEUES_PER_PORT);
321                         event_dev->data->ports[i] = ws;
322                 }
323         }
324         rte_mb();
325
326         return 0;
327 }
328
329 static void
330 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
331 {
332         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
333         /* Single WS modes */
334         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
335 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
336         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
337                 NIX_RX_FASTPATH_MODES
338 #undef R
339         };
340
341         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
342 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
343         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
344                 NIX_RX_FASTPATH_MODES
345 #undef R
346         };
347
348         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
349 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
350         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t
356                 sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
357 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
358         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
359                 NIX_RX_FASTPATH_MODES
360 #undef R
361         };
362
363         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
364 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
365         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
366                 NIX_RX_FASTPATH_MODES
367 #undef R
368         };
369
370         const event_dequeue_burst_t
371                 sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
372 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
373         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
374                 NIX_RX_FASTPATH_MODES
375 #undef R
376         };
377
378         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
379 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
380         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_burst_t
386                 sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
387 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
388         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
389                 NIX_RX_FASTPATH_MODES
390 #undef R
391         };
392
393         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
394 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
395         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
396                 NIX_RX_FASTPATH_MODES
397 #undef R
398         };
399
400         const event_dequeue_burst_t
401                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
402 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
403         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
404                         NIX_RX_FASTPATH_MODES
405 #undef R
406         };
407
408         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
409 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
410         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
411                 NIX_RX_FASTPATH_MODES
412 #undef R
413         };
414
415         const event_dequeue_burst_t
416                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
417 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
418         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
419                         NIX_RX_FASTPATH_MODES
420 #undef R
421         };
422
423         /* Dual WS modes */
424         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2][2] = {
425 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
426         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
427                 NIX_RX_FASTPATH_MODES
428 #undef R
429         };
430
431         const event_dequeue_burst_t
432                 sso_hws_dual_deq_burst[2][2][2][2][2][2][2] = {
433 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
434         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
435                 NIX_RX_FASTPATH_MODES
436 #undef R
437         };
438
439         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2][2] = {
440 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
441         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_burst_t
447                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2][2] = {
448 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
449         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
450                         NIX_RX_FASTPATH_MODES
451 #undef R
452         };
453
454         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2][2] = {
455 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
456         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
457                 NIX_RX_FASTPATH_MODES
458 #undef R
459         };
460
461         const event_dequeue_burst_t
462                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2][2] = {
463 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
464         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
465                         NIX_RX_FASTPATH_MODES
466 #undef R
467         };
468
469         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2][2] = {
470 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
471         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t
477                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
478 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
479         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
480                         NIX_RX_FASTPATH_MODES
481 #undef R
482                 };
483
484         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2][2] = {
485 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
486         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
487                 NIX_RX_FASTPATH_MODES
488 #undef R
489         };
490
491         const event_dequeue_burst_t
492                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
493 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
494         [f6][f5][f4][f3][f2][f1][f0] =                                         \
495                         cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
496                         NIX_RX_FASTPATH_MODES
497 #undef R
498                 };
499
500         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2][2] = {
501 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
502         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
503                 NIX_RX_FASTPATH_MODES
504 #undef R
505         };
506
507         const event_dequeue_burst_t
508                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
509 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
510         [f6][f5][f4][f3][f2][f1][f0] =                                         \
511                         cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
512                         NIX_RX_FASTPATH_MODES
513 #undef R
514         };
515
516         /* Tx modes */
517         const event_tx_adapter_enqueue_t
518                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
519 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
520         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
521                         NIX_TX_FASTPATH_MODES
522 #undef T
523                 };
524
525         const event_tx_adapter_enqueue_t
526                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
527 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
528         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
529                         NIX_TX_FASTPATH_MODES
530 #undef T
531                 };
532
533         const event_tx_adapter_enqueue_t
534                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
535 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
536         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
537                         NIX_TX_FASTPATH_MODES
538 #undef T
539                 };
540
541         const event_tx_adapter_enqueue_t
542                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
543 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
544         [f6][f5][f4][f3][f2][f1][f0] =                                         \
545                         cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
546                         NIX_TX_FASTPATH_MODES
547 #undef T
548                 };
549
550         event_dev->enqueue = cn9k_sso_hws_enq;
551         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
552         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
553         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
554         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
555                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
556                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
557                                       sso_hws_deq_seg_burst);
558                 if (dev->is_timeout_deq) {
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
560                                               sso_hws_deq_tmo_seg);
561                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
562                                               sso_hws_deq_tmo_seg_burst);
563                 }
564                 if (dev->is_ca_internal_port) {
565                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
566                                               sso_hws_deq_ca_seg);
567                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
568                                               sso_hws_deq_ca_seg_burst);
569                 }
570         } else {
571                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
572                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
573                                       sso_hws_deq_burst);
574                 if (dev->is_timeout_deq) {
575                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
576                                               sso_hws_deq_tmo);
577                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
578                                               sso_hws_deq_tmo_burst);
579                 }
580                 if (dev->is_ca_internal_port) {
581                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
582                                               sso_hws_deq_ca);
583                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
584                                               sso_hws_deq_ca_burst);
585                 }
586         }
587         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
588
589         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
590                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
591                                       sso_hws_tx_adptr_enq_seg);
592         else
593                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
594                                       sso_hws_tx_adptr_enq);
595
596         if (dev->dual_ws) {
597                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
598                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
599                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
600                 event_dev->enqueue_forward_burst =
601                         cn9k_sso_hws_dual_enq_fwd_burst;
602                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
603
604                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
605                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
606                                               sso_hws_dual_deq_seg);
607                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
608                                               sso_hws_dual_deq_seg_burst);
609                         if (dev->is_timeout_deq) {
610                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
611                                                       sso_hws_dual_deq_tmo_seg);
612                                 CN9K_SET_EVDEV_DEQ_OP(
613                                         dev, event_dev->dequeue_burst,
614                                         sso_hws_dual_deq_tmo_seg_burst);
615                         }
616                         if (dev->is_ca_internal_port) {
617                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
618                                                       sso_hws_dual_deq_ca_seg);
619                                 CN9K_SET_EVDEV_DEQ_OP(
620                                         dev, event_dev->dequeue_burst,
621                                         sso_hws_dual_deq_ca_seg_burst);
622                         }
623                 } else {
624                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
625                                               sso_hws_dual_deq);
626                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
627                                               sso_hws_dual_deq_burst);
628                         if (dev->is_timeout_deq) {
629                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
630                                                       sso_hws_dual_deq_tmo);
631                                 CN9K_SET_EVDEV_DEQ_OP(
632                                         dev, event_dev->dequeue_burst,
633                                         sso_hws_dual_deq_tmo_burst);
634                         }
635                         if (dev->is_ca_internal_port) {
636                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
637                                                       sso_hws_dual_deq_ca);
638                                 CN9K_SET_EVDEV_DEQ_OP(
639                                         dev, event_dev->dequeue_burst,
640                                         sso_hws_dual_deq_ca_burst);
641                         }
642                 }
643
644                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
645                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
646                                               sso_hws_dual_tx_adptr_enq_seg);
647                 else
648                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
649                                               sso_hws_dual_tx_adptr_enq);
650         }
651
652         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
653         rte_mb();
654 }
655
656 static void *
657 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
658 {
659         struct cnxk_sso_evdev *dev = arg;
660         struct cn9k_sso_hws_dual *dws;
661         struct cn9k_sso_hws *ws;
662         void *data;
663
664         if (dev->dual_ws) {
665                 dws = rte_zmalloc("cn9k_dual_ws",
666                                   sizeof(struct cn9k_sso_hws_dual) +
667                                           RTE_CACHE_LINE_SIZE,
668                                   RTE_CACHE_LINE_SIZE);
669                 if (dws == NULL) {
670                         plt_err("Failed to alloc memory for port=%d", port_id);
671                         return NULL;
672                 }
673
674                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
675                 dws->base[0] = roc_sso_hws_base_get(
676                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
677                 dws->base[1] = roc_sso_hws_base_get(
678                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
679                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
680                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
681                 dws->hws_id = port_id;
682                 dws->swtag_req = 0;
683                 dws->vws = 0;
684
685                 data = dws;
686         } else {
687                 /* Allocate event port memory */
688                 ws = rte_zmalloc("cn9k_ws",
689                                  sizeof(struct cn9k_sso_hws) +
690                                          RTE_CACHE_LINE_SIZE,
691                                  RTE_CACHE_LINE_SIZE);
692                 if (ws == NULL) {
693                         plt_err("Failed to alloc memory for port=%d", port_id);
694                         return NULL;
695                 }
696
697                 /* First cache line is reserved for cookie */
698                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
699                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
700                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
701                 ws->hws_id = port_id;
702                 ws->swtag_req = 0;
703
704                 data = ws;
705         }
706
707         return data;
708 }
709
710 static void
711 cn9k_sso_info_get(struct rte_eventdev *event_dev,
712                   struct rte_event_dev_info *dev_info)
713 {
714         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
715
716         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
717         cnxk_sso_info_get(dev, dev_info);
718 }
719
720 static int
721 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
722 {
723         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
724         int rc;
725
726         rc = cnxk_sso_dev_validate(event_dev);
727         if (rc < 0) {
728                 plt_err("Invalid event device configuration");
729                 return -EINVAL;
730         }
731
732         roc_sso_rsrc_fini(&dev->sso);
733
734         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
735         if (rc < 0) {
736                 plt_err("Failed to initialize SSO resources");
737                 return -ENODEV;
738         }
739
740         rc = cnxk_sso_xaq_allocate(dev);
741         if (rc < 0)
742                 goto cnxk_rsrc_fini;
743
744         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
745                                     cn9k_sso_hws_setup);
746         if (rc < 0)
747                 goto cnxk_rsrc_fini;
748
749         /* Restore any prior port-queue mapping. */
750         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
751
752         dev->configured = 1;
753         rte_mb();
754
755         return 0;
756 cnxk_rsrc_fini:
757         roc_sso_rsrc_fini(&dev->sso);
758         dev->nb_event_ports = 0;
759         return rc;
760 }
761
762 static int
763 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
764                     const struct rte_event_port_conf *port_conf)
765 {
766
767         RTE_SET_USED(port_conf);
768         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
769 }
770
771 static void
772 cn9k_sso_port_release(void *port)
773 {
774         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
775         struct cnxk_sso_evdev *dev;
776
777         if (port == NULL)
778                 return;
779
780         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
781         if (!gws_cookie->configured)
782                 goto free;
783
784         cn9k_sso_hws_release(dev, port);
785         memset(gws_cookie, 0, sizeof(*gws_cookie));
786 free:
787         rte_free(gws_cookie);
788 }
789
790 static int
791 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
792                    const uint8_t queues[], const uint8_t priorities[],
793                    uint16_t nb_links)
794 {
795         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
796         uint16_t hwgrp_ids[nb_links];
797         uint16_t link;
798
799         RTE_SET_USED(priorities);
800         for (link = 0; link < nb_links; link++)
801                 hwgrp_ids[link] = queues[link];
802         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
803
804         return (int)nb_links;
805 }
806
807 static int
808 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
809                      uint8_t queues[], uint16_t nb_unlinks)
810 {
811         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
812         uint16_t hwgrp_ids[nb_unlinks];
813         uint16_t unlink;
814
815         for (unlink = 0; unlink < nb_unlinks; unlink++)
816                 hwgrp_ids[unlink] = queues[unlink];
817         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
818
819         return (int)nb_unlinks;
820 }
821
822 static int
823 cn9k_sso_start(struct rte_eventdev *event_dev)
824 {
825         int rc;
826
827         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
828         if (rc < 0)
829                 return rc;
830
831         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
832                             cn9k_sso_hws_flush_events);
833         if (rc < 0)
834                 return rc;
835
836         cn9k_sso_fp_fns_set(event_dev);
837
838         return rc;
839 }
840
841 static void
842 cn9k_sso_stop(struct rte_eventdev *event_dev)
843 {
844         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
845 }
846
847 static int
848 cn9k_sso_close(struct rte_eventdev *event_dev)
849 {
850         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
851 }
852
853 static int
854 cn9k_sso_selftest(void)
855 {
856         return cnxk_sso_selftest(RTE_STR(event_cn9k));
857 }
858
859 static int
860 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
861                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
862 {
863         int rc;
864
865         RTE_SET_USED(event_dev);
866         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
867         if (rc)
868                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
869         else
870                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
871                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
872                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
873
874         return 0;
875 }
876
877 static void
878 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
879                       void *tstmp_info)
880 {
881         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
882         int i;
883
884         for (i = 0; i < dev->nb_event_ports; i++) {
885                 if (dev->dual_ws) {
886                         struct cn9k_sso_hws_dual *dws =
887                                 event_dev->data->ports[i];
888                         dws->lookup_mem = lookup_mem;
889                         dws->tstamp = tstmp_info;
890                 } else {
891                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
892                         ws->lookup_mem = lookup_mem;
893                         ws->tstamp = tstmp_info;
894                 }
895         }
896 }
897
898 static int
899 cn9k_sso_rx_adapter_queue_add(
900         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
901         int32_t rx_queue_id,
902         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
903 {
904         struct cn9k_eth_rxq *rxq;
905         void *lookup_mem;
906         void *tstmp_info;
907         int rc;
908
909         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
910         if (rc)
911                 return -EINVAL;
912
913         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
914                                            queue_conf);
915         if (rc)
916                 return -EINVAL;
917
918         rxq = eth_dev->data->rx_queues[0];
919         lookup_mem = rxq->lookup_mem;
920         tstmp_info = rxq->tstamp;
921         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
922         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
923
924         return 0;
925 }
926
927 static int
928 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
929                               const struct rte_eth_dev *eth_dev,
930                               int32_t rx_queue_id)
931 {
932         int rc;
933
934         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
935         if (rc)
936                 return -EINVAL;
937
938         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
939 }
940
941 static int
942 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
943                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
944 {
945         int ret;
946
947         RTE_SET_USED(dev);
948         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
949         if (ret)
950                 *caps = 0;
951         else
952                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
953
954         return 0;
955 }
956
957 static void
958 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
959                        bool ena)
960 {
961         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
962         struct cn9k_eth_txq *txq;
963         struct roc_nix_sq *sq;
964         int i;
965
966         if (tx_queue_id < 0) {
967                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
968                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
969         } else {
970                 uint16_t sq_limit;
971
972                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
973                 txq = eth_dev->data->tx_queues[tx_queue_id];
974                 sq_limit =
975                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
976                                     sq->nb_sqb_bufs;
977                 txq->nb_sqb_bufs_adj =
978                         sq_limit -
979                         RTE_ALIGN_MUL_CEIL(sq_limit,
980                                            (1ULL << txq->sqes_per_sqb_log2)) /
981                                 (1ULL << txq->sqes_per_sqb_log2);
982                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
983         }
984 }
985
986 static int
987 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
988                               const struct rte_eth_dev *eth_dev,
989                               int32_t tx_queue_id)
990 {
991         int rc;
992
993         RTE_SET_USED(id);
994         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
995         if (rc < 0)
996                 return rc;
997         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
998         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
999         if (rc < 0)
1000                 return rc;
1001         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1002
1003         return 0;
1004 }
1005
1006 static int
1007 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1008                               const struct rte_eth_dev *eth_dev,
1009                               int32_t tx_queue_id)
1010 {
1011         int rc;
1012
1013         RTE_SET_USED(id);
1014         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1015         if (rc < 0)
1016                 return rc;
1017         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1018         return cn9k_sso_updt_tx_adptr_data(event_dev);
1019 }
1020
1021 static int
1022 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1023                              const struct rte_cryptodev *cdev, uint32_t *caps)
1024 {
1025         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1026         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1027
1028         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1029                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1030
1031         return 0;
1032 }
1033
1034 static int
1035 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1036                            const struct rte_cryptodev *cdev,
1037                            int32_t queue_pair_id, const struct rte_event *event)
1038 {
1039         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1040
1041         RTE_SET_USED(event);
1042
1043         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1044         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1045
1046         dev->is_ca_internal_port = 1;
1047         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1048
1049         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1050 }
1051
1052 static int
1053 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1054                            const struct rte_cryptodev *cdev,
1055                            int32_t queue_pair_id)
1056 {
1057         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1058         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1059
1060         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1061 }
1062
1063 static struct eventdev_ops cn9k_sso_dev_ops = {
1064         .dev_infos_get = cn9k_sso_info_get,
1065         .dev_configure = cn9k_sso_dev_configure,
1066         .queue_def_conf = cnxk_sso_queue_def_conf,
1067         .queue_setup = cnxk_sso_queue_setup,
1068         .queue_release = cnxk_sso_queue_release,
1069         .port_def_conf = cnxk_sso_port_def_conf,
1070         .port_setup = cn9k_sso_port_setup,
1071         .port_release = cn9k_sso_port_release,
1072         .port_link = cn9k_sso_port_link,
1073         .port_unlink = cn9k_sso_port_unlink,
1074         .timeout_ticks = cnxk_sso_timeout_ticks,
1075
1076         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1077         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1078         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1079         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1080         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1081
1082         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1083         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1084         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1085
1086         .timer_adapter_caps_get = cnxk_tim_caps_get,
1087
1088         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1089         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1090         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1091
1092         .dump = cnxk_sso_dump,
1093         .dev_start = cn9k_sso_start,
1094         .dev_stop = cn9k_sso_stop,
1095         .dev_close = cn9k_sso_close,
1096         .dev_selftest = cn9k_sso_selftest,
1097 };
1098
1099 static int
1100 cn9k_sso_init(struct rte_eventdev *event_dev)
1101 {
1102         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1103         int rc;
1104
1105         if (RTE_CACHE_LINE_SIZE != 128) {
1106                 plt_err("Driver not compiled for CN9K");
1107                 return -EFAULT;
1108         }
1109
1110         rc = roc_plt_init();
1111         if (rc < 0) {
1112                 plt_err("Failed to initialize platform model");
1113                 return rc;
1114         }
1115
1116         event_dev->dev_ops = &cn9k_sso_dev_ops;
1117         /* For secondary processes, the primary has done all the work */
1118         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1119                 cn9k_sso_fp_fns_set(event_dev);
1120                 return 0;
1121         }
1122
1123         rc = cnxk_sso_init(event_dev);
1124         if (rc < 0)
1125                 return rc;
1126
1127         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1128         if (!dev->max_event_ports || !dev->max_event_queues) {
1129                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1130                         dev->max_event_queues, dev->max_event_ports);
1131                 cnxk_sso_fini(event_dev);
1132                 return -ENODEV;
1133         }
1134
1135         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1136                     event_dev->data->name, dev->max_event_queues,
1137                     dev->max_event_ports);
1138
1139         return 0;
1140 }
1141
1142 static int
1143 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1144 {
1145         return rte_event_pmd_pci_probe(
1146                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1147 }
1148
1149 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1150         {
1151                 .vendor_id = 0,
1152         },
1153 };
1154
1155 static struct rte_pci_driver cn9k_pci_sso = {
1156         .id_table = cn9k_pci_sso_map,
1157         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1158         .probe = cn9k_sso_probe,
1159         .remove = cnxk_sso_remove,
1160 };
1161
1162 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1163 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1164 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1165 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1166                               CNXK_SSO_GGRP_QOS "=<string>"
1167                               CNXK_SSO_FORCE_BP "=1"
1168                               CN9K_SSO_SINGLE_WS "=1"
1169                               CNXK_TIM_DISABLE_NPA "=1"
1170                               CNXK_TIM_CHNK_SLOTS "=<int>"
1171                               CNXK_TIM_RINGS_LMT "=<int>"
1172                               CNXK_TIM_STATS_ENA "=1");