event/cnxk: reduce workslot memory consumption
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
19                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
20
21 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
22         (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
28                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
29
30 static void
31 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
32 {
33         ws->tag_op = base + SSOW_LF_GWS_TAG;
34         ws->wqp_op = base + SSOW_LF_GWS_WQP;
35         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
36         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
37         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
38         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
39 }
40
41 static int
42 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn9k_sso_hws_dual *dws;
46         struct cn9k_sso_hws *ws;
47         int rc;
48
49         if (dev->dual_ws) {
50                 dws = port;
51                 rc = roc_sso_hws_link(&dev->sso,
52                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
53                                       nb_link);
54                 rc |= roc_sso_hws_link(&dev->sso,
55                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
56                                        map, nb_link);
57         } else {
58                 ws = port;
59                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
60         }
61
62         return rc;
63 }
64
65 static int
66 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
67 {
68         struct cnxk_sso_evdev *dev = arg;
69         struct cn9k_sso_hws_dual *dws;
70         struct cn9k_sso_hws *ws;
71         int rc;
72
73         if (dev->dual_ws) {
74                 dws = port;
75                 rc = roc_sso_hws_unlink(&dev->sso,
76                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
77                                         map, nb_link);
78                 rc |= roc_sso_hws_unlink(&dev->sso,
79                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
80                                          map, nb_link);
81         } else {
82                 ws = port;
83                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84         }
85
86         return rc;
87 }
88
89 static void
90 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
91 {
92         struct cnxk_sso_evdev *dev = arg;
93         struct cn9k_sso_hws_dual *dws;
94         struct cn9k_sso_hws *ws;
95         uint64_t val;
96
97         /* Set get_work tmo for HWS */
98         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
99         if (dev->dual_ws) {
100                 dws = hws;
101                 dws->grp_base = grp_base;
102                 dws->fc_mem = (uint64_t *)dev->fc_iova;
103                 dws->xaq_lmt = dev->xaq_lmt;
104
105                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
106                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
107         } else {
108                 ws = hws;
109                 ws->grp_base = grp_base;
110                 ws->fc_mem = (uint64_t *)dev->fc_iova;
111                 ws->xaq_lmt = dev->xaq_lmt;
112
113                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
114         }
115 }
116
117 static void
118 cn9k_sso_hws_release(void *arg, void *hws)
119 {
120         struct cnxk_sso_evdev *dev = arg;
121         struct cn9k_sso_hws_dual *dws;
122         struct cn9k_sso_hws *ws;
123         int i;
124
125         if (dev->dual_ws) {
126                 dws = hws;
127                 for (i = 0; i < dev->nb_event_queues; i++) {
128                         roc_sso_hws_unlink(&dev->sso,
129                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
130                                            (uint16_t *)&i, 1);
131                         roc_sso_hws_unlink(&dev->sso,
132                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
133                                            (uint16_t *)&i, 1);
134                 }
135                 memset(dws, 0, sizeof(*dws));
136         } else {
137                 ws = hws;
138                 for (i = 0; i < dev->nb_event_queues; i++)
139                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
140                                            (uint16_t *)&i, 1);
141                 memset(ws, 0, sizeof(*ws));
142         }
143 }
144
145 static void
146 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
147                           cnxk_handle_event_t fn, void *arg)
148 {
149         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
150         struct cn9k_sso_hws_dual *dws;
151         struct cn9k_sso_hws_state *st;
152         struct cn9k_sso_hws *ws;
153         uint64_t cq_ds_cnt = 1;
154         uint64_t aq_cnt = 1;
155         uint64_t ds_cnt = 1;
156         struct rte_event ev;
157         uintptr_t ws_base;
158         uint64_t val, req;
159
160         plt_write64(0, base + SSO_LF_GGRP_QCTL);
161
162         req = queue_id;     /* GGRP ID */
163         req |= BIT_ULL(18); /* Grouped */
164         req |= BIT_ULL(16); /* WAIT */
165
166         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
167         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
168         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
169         cq_ds_cnt &= 0x3FFF3FFF0000;
170
171         if (dev->dual_ws) {
172                 dws = hws;
173                 st = &dws->ws_state[0];
174                 ws_base = dws->base[0];
175         } else {
176                 ws = hws;
177                 st = (struct cn9k_sso_hws_state *)ws;
178                 ws_base = ws->base;
179         }
180
181         while (aq_cnt || cq_ds_cnt || ds_cnt) {
182                 plt_write64(req, st->getwrk_op);
183                 cn9k_sso_hws_get_work_empty(st, &ev);
184                 if (fn != NULL && ev.u64 != 0)
185                         fn(arg, ev);
186                 if (ev.sched_type != SSO_TT_EMPTY)
187                         cnxk_sso_hws_swtag_flush(st->tag_op,
188                                                  st->swtag_flush_op);
189                 do {
190                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
191                 } while (val & BIT_ULL(56));
192                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
193                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
194                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
195                 /* Extract cq and ds count */
196                 cq_ds_cnt &= 0x3FFF3FFF0000;
197         }
198
199         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
200 }
201
202 static void
203 cn9k_sso_hws_reset(void *arg, void *hws)
204 {
205         struct cnxk_sso_evdev *dev = arg;
206         struct cn9k_sso_hws_dual *dws;
207         struct cn9k_sso_hws *ws;
208         uint64_t pend_state;
209         uint8_t pend_tt;
210         uintptr_t base;
211         uint64_t tag;
212         uint8_t i;
213
214         dws = hws;
215         ws = hws;
216         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
217                 base = dev->dual_ws ? dws->base[i] : ws->base;
218                 /* Wait till getwork/swtp/waitw/desched completes. */
219                 do {
220                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
221                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
222                                        BIT_ULL(56)));
223
224                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
225                 pend_tt = (tag >> 32) & 0x3;
226                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
227                         if (pend_tt == SSO_TT_ATOMIC ||
228                             pend_tt == SSO_TT_ORDERED)
229                                 cnxk_sso_hws_swtag_untag(
230                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
231                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
232                 }
233
234                 /* Wait for desched to complete. */
235                 do {
236                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
237                 } while (pend_state & BIT_ULL(58));
238         }
239 }
240
241 void
242 cn9k_sso_set_rsrc(void *arg)
243 {
244         struct cnxk_sso_evdev *dev = arg;
245
246         if (dev->dual_ws)
247                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
248         else
249                 dev->max_event_ports = dev->sso.max_hws;
250         dev->max_event_queues =
251                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
252                               RTE_EVENT_MAX_QUEUES_PER_DEV :
253                               dev->sso.max_hwgrp;
254 }
255
256 static int
257 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
258 {
259         struct cnxk_sso_evdev *dev = arg;
260
261         if (dev->dual_ws)
262                 hws = hws * CN9K_DUAL_WS_NB_WS;
263
264         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
265 }
266
267 static int
268 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
269 {
270         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
271         int i;
272
273         if (dev->tx_adptr_data == NULL)
274                 return 0;
275
276         for (i = 0; i < dev->nb_event_ports; i++) {
277                 if (dev->dual_ws) {
278                         struct cn9k_sso_hws_dual *dws =
279                                 event_dev->data->ports[i];
280                         void *ws_cookie;
281
282                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
283                         ws_cookie = rte_realloc_socket(
284                                 ws_cookie,
285                                 sizeof(struct cnxk_sso_hws_cookie) +
286                                         sizeof(struct cn9k_sso_hws_dual) +
287                                         (sizeof(uint64_t) *
288                                          (dev->max_port_id + 1) *
289                                          RTE_MAX_QUEUES_PER_PORT),
290                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
291                         if (ws_cookie == NULL)
292                                 return -ENOMEM;
293                         dws = RTE_PTR_ADD(ws_cookie,
294                                           sizeof(struct cnxk_sso_hws_cookie));
295                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
296                                sizeof(uint64_t) * (dev->max_port_id + 1) *
297                                        RTE_MAX_QUEUES_PER_PORT);
298                         event_dev->data->ports[i] = dws;
299                 } else {
300                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
301                         void *ws_cookie;
302
303                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
304                         ws_cookie = rte_realloc_socket(
305                                 ws_cookie,
306                                 sizeof(struct cnxk_sso_hws_cookie) +
307                                         sizeof(struct cn9k_sso_hws_dual) +
308                                         (sizeof(uint64_t) *
309                                          (dev->max_port_id + 1) *
310                                          RTE_MAX_QUEUES_PER_PORT),
311                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
312                         if (ws_cookie == NULL)
313                                 return -ENOMEM;
314                         ws = RTE_PTR_ADD(ws_cookie,
315                                          sizeof(struct cnxk_sso_hws_cookie));
316                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
317                                sizeof(uint64_t) * (dev->max_port_id + 1) *
318                                        RTE_MAX_QUEUES_PER_PORT);
319                         event_dev->data->ports[i] = ws;
320                 }
321         }
322         rte_mb();
323
324         return 0;
325 }
326
327 static void
328 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
329 {
330         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
331         /* Single WS modes */
332         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
333 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
334         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
335                 NIX_RX_FASTPATH_MODES
336 #undef R
337         };
338
339         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
340 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
341         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
342                 NIX_RX_FASTPATH_MODES
343 #undef R
344         };
345
346         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
347 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
348         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
349                 NIX_RX_FASTPATH_MODES
350 #undef R
351         };
352
353         const event_dequeue_burst_t
354                 sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
355 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
356         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
362 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
363         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
364                 NIX_RX_FASTPATH_MODES
365 #undef R
366         };
367
368         const event_dequeue_burst_t
369                 sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
370 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
371         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
372                 NIX_RX_FASTPATH_MODES
373 #undef R
374         };
375
376         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
377 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
378         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
379                 NIX_RX_FASTPATH_MODES
380 #undef R
381         };
382
383         const event_dequeue_burst_t
384                 sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
385 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
386         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
387                 NIX_RX_FASTPATH_MODES
388 #undef R
389         };
390
391         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
392 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
393         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
394                 NIX_RX_FASTPATH_MODES
395 #undef R
396         };
397
398         const event_dequeue_burst_t
399                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
400 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
401         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
402                         NIX_RX_FASTPATH_MODES
403 #undef R
404         };
405
406         const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
407 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
408         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
409                 NIX_RX_FASTPATH_MODES
410 #undef R
411         };
412
413         const event_dequeue_burst_t
414                 sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
415 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
416         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
417                         NIX_RX_FASTPATH_MODES
418 #undef R
419         };
420
421         /* Dual WS modes */
422         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2][2] = {
423 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
424         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
425                 NIX_RX_FASTPATH_MODES
426 #undef R
427         };
428
429         const event_dequeue_burst_t
430                 sso_hws_dual_deq_burst[2][2][2][2][2][2][2] = {
431 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
432         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
433                 NIX_RX_FASTPATH_MODES
434 #undef R
435         };
436
437         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2][2] = {
438 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
439         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
440                 NIX_RX_FASTPATH_MODES
441 #undef R
442         };
443
444         const event_dequeue_burst_t
445                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2][2] = {
446 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
447         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
448                         NIX_RX_FASTPATH_MODES
449 #undef R
450         };
451
452         const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2][2] = {
453 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
454         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
455                 NIX_RX_FASTPATH_MODES
456 #undef R
457         };
458
459         const event_dequeue_burst_t
460                 sso_hws_dual_deq_ca_burst[2][2][2][2][2][2][2] = {
461 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
462         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
463                         NIX_RX_FASTPATH_MODES
464 #undef R
465         };
466
467         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2][2] = {
468 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
469         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
470                 NIX_RX_FASTPATH_MODES
471 #undef R
472         };
473
474         const event_dequeue_burst_t
475                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2][2] = {
476 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
477         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
478                         NIX_RX_FASTPATH_MODES
479 #undef R
480                 };
481
482         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2][2] = {
483 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
484         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
485                 NIX_RX_FASTPATH_MODES
486 #undef R
487         };
488
489         const event_dequeue_burst_t
490                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
491 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
492         [f6][f5][f4][f3][f2][f1][f0] =                                         \
493                         cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
494                         NIX_RX_FASTPATH_MODES
495 #undef R
496                 };
497
498         const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2][2] = {
499 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
500         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
501                 NIX_RX_FASTPATH_MODES
502 #undef R
503         };
504
505         const event_dequeue_burst_t
506                 sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
507 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
508         [f6][f5][f4][f3][f2][f1][f0] =                                         \
509                         cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
510                         NIX_RX_FASTPATH_MODES
511 #undef R
512         };
513
514         /* Tx modes */
515         const event_tx_adapter_enqueue_t
516                 sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
517 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
518         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
519                         NIX_TX_FASTPATH_MODES
520 #undef T
521                 };
522
523         const event_tx_adapter_enqueue_t
524                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
525 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
526         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
527                         NIX_TX_FASTPATH_MODES
528 #undef T
529                 };
530
531         const event_tx_adapter_enqueue_t
532                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
533 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
534         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
535                         NIX_TX_FASTPATH_MODES
536 #undef T
537                 };
538
539         const event_tx_adapter_enqueue_t
540                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
541 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
542         [f6][f5][f4][f3][f2][f1][f0] =                                         \
543                         cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
544                         NIX_TX_FASTPATH_MODES
545 #undef T
546                 };
547
548         event_dev->enqueue = cn9k_sso_hws_enq;
549         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
550         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
551         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
552         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
553                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
554                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
555                                       sso_hws_deq_seg_burst);
556                 if (dev->is_timeout_deq) {
557                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558                                               sso_hws_deq_tmo_seg);
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560                                               sso_hws_deq_tmo_seg_burst);
561                 }
562                 if (dev->is_ca_internal_port) {
563                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
564                                               sso_hws_deq_ca_seg);
565                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
566                                               sso_hws_deq_ca_seg_burst);
567                 }
568         } else {
569                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
570                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
571                                       sso_hws_deq_burst);
572                 if (dev->is_timeout_deq) {
573                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
574                                               sso_hws_deq_tmo);
575                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
576                                               sso_hws_deq_tmo_burst);
577                 }
578                 if (dev->is_ca_internal_port) {
579                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
580                                               sso_hws_deq_ca);
581                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
582                                               sso_hws_deq_ca_burst);
583                 }
584         }
585         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
586
587         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
588                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
589                                       sso_hws_tx_adptr_enq_seg);
590         else
591                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
592                                       sso_hws_tx_adptr_enq);
593
594         if (dev->dual_ws) {
595                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
596                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
597                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
598                 event_dev->enqueue_forward_burst =
599                         cn9k_sso_hws_dual_enq_fwd_burst;
600                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
601
602                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
603                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
604                                               sso_hws_dual_deq_seg);
605                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
606                                               sso_hws_dual_deq_seg_burst);
607                         if (dev->is_timeout_deq) {
608                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
609                                                       sso_hws_dual_deq_tmo_seg);
610                                 CN9K_SET_EVDEV_DEQ_OP(
611                                         dev, event_dev->dequeue_burst,
612                                         sso_hws_dual_deq_tmo_seg_burst);
613                         }
614                         if (dev->is_ca_internal_port) {
615                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
616                                                       sso_hws_dual_deq_ca_seg);
617                                 CN9K_SET_EVDEV_DEQ_OP(
618                                         dev, event_dev->dequeue_burst,
619                                         sso_hws_dual_deq_ca_seg_burst);
620                         }
621                 } else {
622                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
623                                               sso_hws_dual_deq);
624                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
625                                               sso_hws_dual_deq_burst);
626                         if (dev->is_timeout_deq) {
627                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
628                                                       sso_hws_dual_deq_tmo);
629                                 CN9K_SET_EVDEV_DEQ_OP(
630                                         dev, event_dev->dequeue_burst,
631                                         sso_hws_dual_deq_tmo_burst);
632                         }
633                         if (dev->is_ca_internal_port) {
634                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
635                                                       sso_hws_dual_deq_ca);
636                                 CN9K_SET_EVDEV_DEQ_OP(
637                                         dev, event_dev->dequeue_burst,
638                                         sso_hws_dual_deq_ca_burst);
639                         }
640                 }
641
642                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
643                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
644                                               sso_hws_dual_tx_adptr_enq_seg);
645                 else
646                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
647                                               sso_hws_dual_tx_adptr_enq);
648         }
649
650         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
651         rte_mb();
652 }
653
654 static void *
655 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
656 {
657         struct cnxk_sso_evdev *dev = arg;
658         struct cn9k_sso_hws_dual *dws;
659         struct cn9k_sso_hws *ws;
660         void *data;
661
662         if (dev->dual_ws) {
663                 dws = rte_zmalloc("cn9k_dual_ws",
664                                   sizeof(struct cn9k_sso_hws_dual) +
665                                           RTE_CACHE_LINE_SIZE,
666                                   RTE_CACHE_LINE_SIZE);
667                 if (dws == NULL) {
668                         plt_err("Failed to alloc memory for port=%d", port_id);
669                         return NULL;
670                 }
671
672                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
673                 dws->base[0] = roc_sso_hws_base_get(
674                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
675                 dws->base[1] = roc_sso_hws_base_get(
676                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
677                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
678                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
679                 dws->hws_id = port_id;
680                 dws->swtag_req = 0;
681                 dws->vws = 0;
682
683                 data = dws;
684         } else {
685                 /* Allocate event port memory */
686                 ws = rte_zmalloc("cn9k_ws",
687                                  sizeof(struct cn9k_sso_hws) +
688                                          RTE_CACHE_LINE_SIZE,
689                                  RTE_CACHE_LINE_SIZE);
690                 if (ws == NULL) {
691                         plt_err("Failed to alloc memory for port=%d", port_id);
692                         return NULL;
693                 }
694
695                 /* First cache line is reserved for cookie */
696                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
697                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
698                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
699                 ws->hws_id = port_id;
700                 ws->swtag_req = 0;
701
702                 data = ws;
703         }
704
705         return data;
706 }
707
708 static void
709 cn9k_sso_info_get(struct rte_eventdev *event_dev,
710                   struct rte_event_dev_info *dev_info)
711 {
712         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
713
714         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
715         cnxk_sso_info_get(dev, dev_info);
716 }
717
718 static int
719 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
720 {
721         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
722         int rc;
723
724         rc = cnxk_sso_dev_validate(event_dev);
725         if (rc < 0) {
726                 plt_err("Invalid event device configuration");
727                 return -EINVAL;
728         }
729
730         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
731         if (rc < 0) {
732                 plt_err("Failed to initialize SSO resources");
733                 return -ENODEV;
734         }
735
736         rc = cnxk_sso_xaq_allocate(dev);
737         if (rc < 0)
738                 goto cnxk_rsrc_fini;
739
740         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
741                                     cn9k_sso_hws_setup);
742         if (rc < 0)
743                 goto cnxk_rsrc_fini;
744
745         /* Restore any prior port-queue mapping. */
746         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
747
748         dev->configured = 1;
749         rte_mb();
750
751         return 0;
752 cnxk_rsrc_fini:
753         roc_sso_rsrc_fini(&dev->sso);
754         dev->nb_event_ports = 0;
755         return rc;
756 }
757
758 static int
759 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
760                     const struct rte_event_port_conf *port_conf)
761 {
762
763         RTE_SET_USED(port_conf);
764         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
765 }
766
767 static void
768 cn9k_sso_port_release(void *port)
769 {
770         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
771         struct cnxk_sso_evdev *dev;
772
773         if (port == NULL)
774                 return;
775
776         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
777         if (!gws_cookie->configured)
778                 goto free;
779
780         cn9k_sso_hws_release(dev, port);
781         memset(gws_cookie, 0, sizeof(*gws_cookie));
782 free:
783         rte_free(gws_cookie);
784 }
785
786 static int
787 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
788                    const uint8_t queues[], const uint8_t priorities[],
789                    uint16_t nb_links)
790 {
791         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
792         uint16_t hwgrp_ids[nb_links];
793         uint16_t link;
794
795         RTE_SET_USED(priorities);
796         for (link = 0; link < nb_links; link++)
797                 hwgrp_ids[link] = queues[link];
798         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
799
800         return (int)nb_links;
801 }
802
803 static int
804 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
805                      uint8_t queues[], uint16_t nb_unlinks)
806 {
807         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
808         uint16_t hwgrp_ids[nb_unlinks];
809         uint16_t unlink;
810
811         for (unlink = 0; unlink < nb_unlinks; unlink++)
812                 hwgrp_ids[unlink] = queues[unlink];
813         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
814
815         return (int)nb_unlinks;
816 }
817
818 static int
819 cn9k_sso_start(struct rte_eventdev *event_dev)
820 {
821         int rc;
822
823         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
824         if (rc < 0)
825                 return rc;
826
827         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
828                             cn9k_sso_hws_flush_events);
829         if (rc < 0)
830                 return rc;
831
832         cn9k_sso_fp_fns_set(event_dev);
833
834         return rc;
835 }
836
837 static void
838 cn9k_sso_stop(struct rte_eventdev *event_dev)
839 {
840         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
841 }
842
843 static int
844 cn9k_sso_close(struct rte_eventdev *event_dev)
845 {
846         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
847 }
848
849 static int
850 cn9k_sso_selftest(void)
851 {
852         return cnxk_sso_selftest(RTE_STR(event_cn9k));
853 }
854
855 static int
856 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
857                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
858 {
859         int rc;
860
861         RTE_SET_USED(event_dev);
862         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
863         if (rc)
864                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
865         else
866                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
867                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
868                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
869
870         return 0;
871 }
872
873 static void
874 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
875                       void *tstmp_info)
876 {
877         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
878         int i;
879
880         for (i = 0; i < dev->nb_event_ports; i++) {
881                 if (dev->dual_ws) {
882                         struct cn9k_sso_hws_dual *dws =
883                                 event_dev->data->ports[i];
884                         dws->lookup_mem = lookup_mem;
885                         dws->tstamp = tstmp_info;
886                 } else {
887                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
888                         ws->lookup_mem = lookup_mem;
889                         ws->tstamp = tstmp_info;
890                 }
891         }
892 }
893
894 static int
895 cn9k_sso_rx_adapter_queue_add(
896         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
897         int32_t rx_queue_id,
898         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
899 {
900         struct cn9k_eth_rxq *rxq;
901         void *lookup_mem;
902         void *tstmp_info;
903         int rc;
904
905         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
906         if (rc)
907                 return -EINVAL;
908
909         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
910                                            queue_conf);
911         if (rc)
912                 return -EINVAL;
913
914         rxq = eth_dev->data->rx_queues[0];
915         lookup_mem = rxq->lookup_mem;
916         tstmp_info = rxq->tstamp;
917         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
918         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
919
920         return 0;
921 }
922
923 static int
924 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
925                               const struct rte_eth_dev *eth_dev,
926                               int32_t rx_queue_id)
927 {
928         int rc;
929
930         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
931         if (rc)
932                 return -EINVAL;
933
934         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
935 }
936
937 static int
938 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
939                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
940 {
941         int ret;
942
943         RTE_SET_USED(dev);
944         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
945         if (ret)
946                 *caps = 0;
947         else
948                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
949
950         return 0;
951 }
952
953 static void
954 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
955                        bool ena)
956 {
957         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
958         struct cn9k_eth_txq *txq;
959         struct roc_nix_sq *sq;
960         int i;
961
962         if (tx_queue_id < 0) {
963                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
964                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
965         } else {
966                 uint16_t sq_limit;
967
968                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
969                 txq = eth_dev->data->tx_queues[tx_queue_id];
970                 sq_limit =
971                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
972                                     sq->nb_sqb_bufs;
973                 txq->nb_sqb_bufs_adj =
974                         sq_limit -
975                         RTE_ALIGN_MUL_CEIL(sq_limit,
976                                            (1ULL << txq->sqes_per_sqb_log2)) /
977                                 (1ULL << txq->sqes_per_sqb_log2);
978                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
979         }
980 }
981
982 static int
983 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
984                               const struct rte_eth_dev *eth_dev,
985                               int32_t tx_queue_id)
986 {
987         int rc;
988
989         RTE_SET_USED(id);
990         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
991         if (rc < 0)
992                 return rc;
993         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
994         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
995         if (rc < 0)
996                 return rc;
997         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
998
999         return 0;
1000 }
1001
1002 static int
1003 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1004                               const struct rte_eth_dev *eth_dev,
1005                               int32_t tx_queue_id)
1006 {
1007         int rc;
1008
1009         RTE_SET_USED(id);
1010         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1011         if (rc < 0)
1012                 return rc;
1013         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1014         return cn9k_sso_updt_tx_adptr_data(event_dev);
1015 }
1016
1017 static int
1018 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1019                              const struct rte_cryptodev *cdev, uint32_t *caps)
1020 {
1021         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1022         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1023
1024         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1025                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1026
1027         return 0;
1028 }
1029
1030 static int
1031 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1032                            const struct rte_cryptodev *cdev,
1033                            int32_t queue_pair_id, const struct rte_event *event)
1034 {
1035         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1036
1037         RTE_SET_USED(event);
1038
1039         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1040         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1041
1042         dev->is_ca_internal_port = 1;
1043         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1044
1045         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1046 }
1047
1048 static int
1049 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1050                            const struct rte_cryptodev *cdev,
1051                            int32_t queue_pair_id)
1052 {
1053         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1054         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1055
1056         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1057 }
1058
1059 static struct eventdev_ops cn9k_sso_dev_ops = {
1060         .dev_infos_get = cn9k_sso_info_get,
1061         .dev_configure = cn9k_sso_dev_configure,
1062         .queue_def_conf = cnxk_sso_queue_def_conf,
1063         .queue_setup = cnxk_sso_queue_setup,
1064         .queue_release = cnxk_sso_queue_release,
1065         .port_def_conf = cnxk_sso_port_def_conf,
1066         .port_setup = cn9k_sso_port_setup,
1067         .port_release = cn9k_sso_port_release,
1068         .port_link = cn9k_sso_port_link,
1069         .port_unlink = cn9k_sso_port_unlink,
1070         .timeout_ticks = cnxk_sso_timeout_ticks,
1071
1072         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1073         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1074         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1075         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1076         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1077
1078         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1079         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1080         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1081
1082         .timer_adapter_caps_get = cnxk_tim_caps_get,
1083
1084         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1085         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1086         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1087
1088         .dump = cnxk_sso_dump,
1089         .dev_start = cn9k_sso_start,
1090         .dev_stop = cn9k_sso_stop,
1091         .dev_close = cn9k_sso_close,
1092         .dev_selftest = cn9k_sso_selftest,
1093 };
1094
1095 static int
1096 cn9k_sso_init(struct rte_eventdev *event_dev)
1097 {
1098         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1099         int rc;
1100
1101         if (RTE_CACHE_LINE_SIZE != 128) {
1102                 plt_err("Driver not compiled for CN9K");
1103                 return -EFAULT;
1104         }
1105
1106         rc = roc_plt_init();
1107         if (rc < 0) {
1108                 plt_err("Failed to initialize platform model");
1109                 return rc;
1110         }
1111
1112         event_dev->dev_ops = &cn9k_sso_dev_ops;
1113         /* For secondary processes, the primary has done all the work */
1114         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1115                 cn9k_sso_fp_fns_set(event_dev);
1116                 return 0;
1117         }
1118
1119         rc = cnxk_sso_init(event_dev);
1120         if (rc < 0)
1121                 return rc;
1122
1123         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1124         if (!dev->max_event_ports || !dev->max_event_queues) {
1125                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1126                         dev->max_event_queues, dev->max_event_ports);
1127                 cnxk_sso_fini(event_dev);
1128                 return -ENODEV;
1129         }
1130
1131         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1132                     event_dev->data->name, dev->max_event_queues,
1133                     dev->max_event_ports);
1134
1135         return 0;
1136 }
1137
1138 static int
1139 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1140 {
1141         return rte_event_pmd_pci_probe(
1142                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1143 }
1144
1145 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1146         {
1147                 .vendor_id = 0,
1148         },
1149 };
1150
1151 static struct rte_pci_driver cn9k_pci_sso = {
1152         .id_table = cn9k_pci_sso_map,
1153         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1154         .probe = cn9k_sso_probe,
1155         .remove = cnxk_sso_remove,
1156 };
1157
1158 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1159 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1160 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1161 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1162                               CNXK_SSO_GGRP_QOS "=<string>"
1163                               CNXK_SSO_FORCE_BP "=1"
1164                               CN9K_SSO_SINGLE_WS "=1"
1165                               CNXK_TIM_DISABLE_NPA "=1"
1166                               CNXK_TIM_CHNK_SLOTS "=<int>"
1167                               CNXK_TIM_RINGS_LMT "=<int>"
1168                               CNXK_TIM_STATS_ENA "=1");