event/cnxk: support Tx adapter
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 static void
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
14 {
15         ws->tag_op = base + SSOW_LF_GWS_TAG;
16         ws->wqp_op = base + SSOW_LF_GWS_WQP;
17         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
21 }
22
23 static int
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
25 {
26         struct cnxk_sso_evdev *dev = arg;
27         struct cn9k_sso_hws_dual *dws;
28         struct cn9k_sso_hws *ws;
29         int rc;
30
31         if (dev->dual_ws) {
32                 dws = port;
33                 rc = roc_sso_hws_link(&dev->sso,
34                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
35                                       nb_link);
36                 rc |= roc_sso_hws_link(&dev->sso,
37                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
38                                        map, nb_link);
39         } else {
40                 ws = port;
41                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
42         }
43
44         return rc;
45 }
46
47 static int
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
49 {
50         struct cnxk_sso_evdev *dev = arg;
51         struct cn9k_sso_hws_dual *dws;
52         struct cn9k_sso_hws *ws;
53         int rc;
54
55         if (dev->dual_ws) {
56                 dws = port;
57                 rc = roc_sso_hws_unlink(&dev->sso,
58                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
59                                         map, nb_link);
60                 rc |= roc_sso_hws_unlink(&dev->sso,
61                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
62                                          map, nb_link);
63         } else {
64                 ws = port;
65                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
66         }
67
68         return rc;
69 }
70
71 static void
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn9k_sso_hws_dual *dws;
76         struct cn9k_sso_hws *ws;
77         uint64_t val;
78
79         /* Set get_work tmo for HWS */
80         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
81         if (dev->dual_ws) {
82                 dws = hws;
83                 rte_memcpy(dws->grps_base, grps_base,
84                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85                 dws->fc_mem = dev->fc_mem;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 rte_memcpy(ws->grps_base, grps_base,
93                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94                 ws->fc_mem = dev->fc_mem;
95                 ws->xaq_lmt = dev->xaq_lmt;
96
97                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
98         }
99 }
100
101 static void
102 cn9k_sso_hws_release(void *arg, void *hws)
103 {
104         struct cnxk_sso_evdev *dev = arg;
105         struct cn9k_sso_hws_dual *dws;
106         struct cn9k_sso_hws *ws;
107         int i;
108
109         if (dev->dual_ws) {
110                 dws = hws;
111                 for (i = 0; i < dev->nb_event_queues; i++) {
112                         roc_sso_hws_unlink(&dev->sso,
113                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114                                            (uint16_t *)&i, 1);
115                         roc_sso_hws_unlink(&dev->sso,
116                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
117                                            (uint16_t *)&i, 1);
118                 }
119                 memset(dws, 0, sizeof(*dws));
120         } else {
121                 ws = hws;
122                 for (i = 0; i < dev->nb_event_queues; i++)
123                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124                                            (uint16_t *)&i, 1);
125                 memset(ws, 0, sizeof(*ws));
126         }
127 }
128
129 static void
130 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
131                           cnxk_handle_event_t fn, void *arg)
132 {
133         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
134         struct cn9k_sso_hws_dual *dws;
135         struct cn9k_sso_hws_state *st;
136         struct cn9k_sso_hws *ws;
137         uint64_t cq_ds_cnt = 1;
138         uint64_t aq_cnt = 1;
139         uint64_t ds_cnt = 1;
140         struct rte_event ev;
141         uintptr_t ws_base;
142         uint64_t val, req;
143
144         plt_write64(0, base + SSO_LF_GGRP_QCTL);
145
146         req = queue_id;     /* GGRP ID */
147         req |= BIT_ULL(18); /* Grouped */
148         req |= BIT_ULL(16); /* WAIT */
149
150         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
151         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
152         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
153         cq_ds_cnt &= 0x3FFF3FFF0000;
154
155         if (dev->dual_ws) {
156                 dws = hws;
157                 st = &dws->ws_state[0];
158                 ws_base = dws->base[0];
159         } else {
160                 ws = hws;
161                 st = (struct cn9k_sso_hws_state *)ws;
162                 ws_base = ws->base;
163         }
164
165         while (aq_cnt || cq_ds_cnt || ds_cnt) {
166                 plt_write64(req, st->getwrk_op);
167                 cn9k_sso_hws_get_work_empty(st, &ev);
168                 if (fn != NULL && ev.u64 != 0)
169                         fn(arg, ev);
170                 if (ev.sched_type != SSO_TT_EMPTY)
171                         cnxk_sso_hws_swtag_flush(st->tag_op,
172                                                  st->swtag_flush_op);
173                 do {
174                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
175                 } while (val & BIT_ULL(56));
176                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
177                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
178                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
179                 /* Extract cq and ds count */
180                 cq_ds_cnt &= 0x3FFF3FFF0000;
181         }
182
183         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
184 }
185
186 static void
187 cn9k_sso_hws_reset(void *arg, void *hws)
188 {
189         struct cnxk_sso_evdev *dev = arg;
190         struct cn9k_sso_hws_dual *dws;
191         struct cn9k_sso_hws *ws;
192         uint64_t pend_state;
193         uint8_t pend_tt;
194         uintptr_t base;
195         uint64_t tag;
196         uint8_t i;
197
198         dws = hws;
199         ws = hws;
200         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
201                 base = dev->dual_ws ? dws->base[i] : ws->base;
202                 /* Wait till getwork/swtp/waitw/desched completes. */
203                 do {
204                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
205                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
206                                        BIT_ULL(56)));
207
208                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
209                 pend_tt = (tag >> 32) & 0x3;
210                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
211                         if (pend_tt == SSO_TT_ATOMIC ||
212                             pend_tt == SSO_TT_ORDERED)
213                                 cnxk_sso_hws_swtag_untag(
214                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
215                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
216                 }
217
218                 /* Wait for desched to complete. */
219                 do {
220                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
221                 } while (pend_state & BIT_ULL(58));
222         }
223 }
224
225 void
226 cn9k_sso_set_rsrc(void *arg)
227 {
228         struct cnxk_sso_evdev *dev = arg;
229
230         if (dev->dual_ws)
231                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
232         else
233                 dev->max_event_ports = dev->sso.max_hws;
234         dev->max_event_queues =
235                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
236                               RTE_EVENT_MAX_QUEUES_PER_DEV :
237                               dev->sso.max_hwgrp;
238 }
239
240 static int
241 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
242 {
243         struct cnxk_sso_evdev *dev = arg;
244
245         if (dev->dual_ws)
246                 hws = hws * CN9K_DUAL_WS_NB_WS;
247
248         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
249 }
250
251 static int
252 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
253 {
254         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
255         int i;
256
257         if (dev->tx_adptr_data == NULL)
258                 return 0;
259
260         for (i = 0; i < dev->nb_event_ports; i++) {
261                 if (dev->dual_ws) {
262                         struct cn9k_sso_hws_dual *dws =
263                                 event_dev->data->ports[i];
264                         void *ws_cookie;
265
266                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
267                         ws_cookie = rte_realloc_socket(
268                                 ws_cookie,
269                                 sizeof(struct cnxk_sso_hws_cookie) +
270                                         sizeof(struct cn9k_sso_hws_dual) +
271                                         (sizeof(uint64_t) *
272                                          (dev->max_port_id + 1) *
273                                          RTE_MAX_QUEUES_PER_PORT),
274                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
275                         if (ws_cookie == NULL)
276                                 return -ENOMEM;
277                         dws = RTE_PTR_ADD(ws_cookie,
278                                           sizeof(struct cnxk_sso_hws_cookie));
279                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
280                                sizeof(uint64_t) * (dev->max_port_id + 1) *
281                                        RTE_MAX_QUEUES_PER_PORT);
282                         event_dev->data->ports[i] = dws;
283                 } else {
284                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
285                         void *ws_cookie;
286
287                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
288                         ws_cookie = rte_realloc_socket(
289                                 ws_cookie,
290                                 sizeof(struct cnxk_sso_hws_cookie) +
291                                         sizeof(struct cn9k_sso_hws_dual) +
292                                         (sizeof(uint64_t) *
293                                          (dev->max_port_id + 1) *
294                                          RTE_MAX_QUEUES_PER_PORT),
295                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
296                         if (ws_cookie == NULL)
297                                 return -ENOMEM;
298                         ws = RTE_PTR_ADD(ws_cookie,
299                                          sizeof(struct cnxk_sso_hws_cookie));
300                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
301                                sizeof(uint64_t) * (dev->max_port_id + 1) *
302                                        RTE_MAX_QUEUES_PER_PORT);
303                         event_dev->data->ports[i] = ws;
304                 }
305         }
306         rte_mb();
307
308         return 0;
309 }
310
311 static void
312 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
313 {
314         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
315         /* Single WS modes */
316         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
317 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
318         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
319                 NIX_RX_FASTPATH_MODES
320 #undef R
321         };
322
323         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
324 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
325         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
326                 NIX_RX_FASTPATH_MODES
327 #undef R
328         };
329
330         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
331 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
332         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
333                 NIX_RX_FASTPATH_MODES
334 #undef R
335         };
336
337         const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
338 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
339         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
340                 NIX_RX_FASTPATH_MODES
341 #undef R
342         };
343
344         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
345 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
346         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
347                 NIX_RX_FASTPATH_MODES
348 #undef R
349         };
350
351         const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
352 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
353         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
354                 NIX_RX_FASTPATH_MODES
355 #undef R
356         };
357
358         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
359 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
360         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
361                 NIX_RX_FASTPATH_MODES
362 #undef R
363         };
364
365         const event_dequeue_burst_t
366                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
367 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
368         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
369                         NIX_RX_FASTPATH_MODES
370 #undef R
371                 };
372
373         /* Dual WS modes */
374         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
375 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
376         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
377                 NIX_RX_FASTPATH_MODES
378 #undef R
379         };
380
381         const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
382 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
383         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
384                 NIX_RX_FASTPATH_MODES
385 #undef R
386         };
387
388         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
389 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
390         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
391                 NIX_RX_FASTPATH_MODES
392 #undef R
393         };
394
395         const event_dequeue_burst_t
396                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
397 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
398         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
399                         NIX_RX_FASTPATH_MODES
400 #undef R
401                 };
402
403         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
404 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
405         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
406                 NIX_RX_FASTPATH_MODES
407 #undef R
408         };
409
410         const event_dequeue_burst_t
411                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
412 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
413         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
414                         NIX_RX_FASTPATH_MODES
415 #undef R
416                 };
417
418         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
419 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
420         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
421                 NIX_RX_FASTPATH_MODES
422 #undef R
423         };
424
425         const event_dequeue_burst_t
426                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
427 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
428         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
429                         NIX_RX_FASTPATH_MODES
430 #undef R
431                 };
432
433         event_dev->enqueue = cn9k_sso_hws_enq;
434         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
435         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
436         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
437         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
438                 event_dev->dequeue = sso_hws_deq_seg
439                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
440                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
441                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
442                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
443                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
444                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
445                 event_dev->dequeue_burst = sso_hws_deq_seg_burst
446                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
447                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
448                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
449                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
450                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
451                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
452                 if (dev->is_timeout_deq) {
453                         event_dev->dequeue = sso_hws_deq_tmo_seg
454                                 [!!(dev->rx_offloads &
455                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
456                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
457                                 [!!(dev->rx_offloads &
458                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
459                                 [!!(dev->rx_offloads &
460                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
461                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
462                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
463                         event_dev->dequeue_burst = sso_hws_deq_tmo_seg_burst
464                                 [!!(dev->rx_offloads &
465                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
466                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
467                                 [!!(dev->rx_offloads &
468                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
469                                 [!!(dev->rx_offloads &
470                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
471                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
472                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
473                 }
474         } else {
475                 event_dev->dequeue = sso_hws_deq
476                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
477                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
478                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
479                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
480                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
481                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
482                 event_dev->dequeue_burst = sso_hws_deq_burst
483                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
484                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
485                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
486                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
487                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
488                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
489                 if (dev->is_timeout_deq) {
490                         event_dev->dequeue = sso_hws_deq_tmo
491                                 [!!(dev->rx_offloads &
492                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
493                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
494                                 [!!(dev->rx_offloads &
495                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
496                                 [!!(dev->rx_offloads &
497                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
498                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
499                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
500                         event_dev->dequeue_burst = sso_hws_deq_tmo_burst
501                                 [!!(dev->rx_offloads &
502                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
503                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
504                                 [!!(dev->rx_offloads &
505                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
506                                 [!!(dev->rx_offloads &
507                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
508                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
509                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
510                 }
511         }
512
513         if (dev->dual_ws) {
514                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
515                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
516                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
517                 event_dev->enqueue_forward_burst =
518                         cn9k_sso_hws_dual_enq_fwd_burst;
519
520                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
521                         event_dev->dequeue = sso_hws_dual_deq_seg
522                                 [!!(dev->rx_offloads &
523                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
524                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
525                                 [!!(dev->rx_offloads &
526                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
527                                 [!!(dev->rx_offloads &
528                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
529                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
530                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
531                         event_dev->dequeue_burst = sso_hws_dual_deq_seg_burst
532                                 [!!(dev->rx_offloads &
533                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
534                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
535                                 [!!(dev->rx_offloads &
536                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
537                                 [!!(dev->rx_offloads &
538                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
539                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
540                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
541                         if (dev->is_timeout_deq) {
542                                 event_dev->dequeue = sso_hws_dual_deq_tmo_seg
543                                         [!!(dev->rx_offloads &
544                                             NIX_RX_OFFLOAD_VLAN_STRIP_F)]
545                                         [!!(dev->rx_offloads &
546                                             NIX_RX_OFFLOAD_TSTAMP_F)]
547                                         [!!(dev->rx_offloads &
548                                             NIX_RX_OFFLOAD_MARK_UPDATE_F)]
549                                         [!!(dev->rx_offloads &
550                                             NIX_RX_OFFLOAD_CHECKSUM_F)]
551                                         [!!(dev->rx_offloads &
552                                             NIX_RX_OFFLOAD_PTYPE_F)]
553                                         [!!(dev->rx_offloads &
554                                             NIX_RX_OFFLOAD_RSS_F)];
555                                 event_dev->dequeue_burst =
556                                         sso_hws_dual_deq_tmo_seg_burst
557                                                 [!!(dev->rx_offloads &
558                                                   NIX_RX_OFFLOAD_VLAN_STRIP_F)]
559                                                 [!!(dev->rx_offloads &
560                                                     NIX_RX_OFFLOAD_TSTAMP_F)]
561                                                 [!!(dev->rx_offloads &
562                                                   NIX_RX_OFFLOAD_MARK_UPDATE_F)]
563                                                 [!!(dev->rx_offloads &
564                                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
565                                                 [!!(dev->rx_offloads &
566                                                     NIX_RX_OFFLOAD_PTYPE_F)]
567                                                 [!!(dev->rx_offloads &
568                                                     NIX_RX_OFFLOAD_RSS_F)];
569                         }
570                 } else {
571                         event_dev->dequeue = sso_hws_dual_deq
572                                 [!!(dev->rx_offloads &
573                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
574                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
575                                 [!!(dev->rx_offloads &
576                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
577                                 [!!(dev->rx_offloads &
578                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
579                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
580                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
581                         event_dev->dequeue_burst = sso_hws_dual_deq_burst
582                                 [!!(dev->rx_offloads &
583                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
584                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
585                                 [!!(dev->rx_offloads &
586                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
587                                 [!!(dev->rx_offloads &
588                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
589                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
590                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
591                         if (dev->is_timeout_deq) {
592                                 event_dev->dequeue = sso_hws_dual_deq_tmo
593                                         [!!(dev->rx_offloads &
594                                             NIX_RX_OFFLOAD_VLAN_STRIP_F)]
595                                         [!!(dev->rx_offloads &
596                                             NIX_RX_OFFLOAD_TSTAMP_F)]
597                                         [!!(dev->rx_offloads &
598                                             NIX_RX_OFFLOAD_MARK_UPDATE_F)]
599                                         [!!(dev->rx_offloads &
600                                             NIX_RX_OFFLOAD_CHECKSUM_F)]
601                                         [!!(dev->rx_offloads &
602                                             NIX_RX_OFFLOAD_PTYPE_F)]
603                                         [!!(dev->rx_offloads &
604                                             NIX_RX_OFFLOAD_RSS_F)];
605                                 event_dev->dequeue_burst =
606                                         sso_hws_dual_deq_tmo_burst
607                                                 [!!(dev->rx_offloads &
608                                                   NIX_RX_OFFLOAD_VLAN_STRIP_F)]
609                                                 [!!(dev->rx_offloads &
610                                                   NIX_RX_OFFLOAD_TSTAMP_F)]
611                                                 [!!(dev->rx_offloads &
612                                                   NIX_RX_OFFLOAD_MARK_UPDATE_F)]
613                                                 [!!(dev->rx_offloads &
614                                                   NIX_RX_OFFLOAD_CHECKSUM_F)]
615                                                 [!!(dev->rx_offloads &
616                                                   NIX_RX_OFFLOAD_PTYPE_F)]
617                                                 [!!(dev->rx_offloads &
618                                                   NIX_RX_OFFLOAD_RSS_F)];
619                         }
620                 }
621         }
622
623         rte_mb();
624 }
625
626 static void *
627 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
628 {
629         struct cnxk_sso_evdev *dev = arg;
630         struct cn9k_sso_hws_dual *dws;
631         struct cn9k_sso_hws *ws;
632         void *data;
633
634         if (dev->dual_ws) {
635                 dws = rte_zmalloc("cn9k_dual_ws",
636                                   sizeof(struct cn9k_sso_hws_dual) +
637                                           RTE_CACHE_LINE_SIZE,
638                                   RTE_CACHE_LINE_SIZE);
639                 if (dws == NULL) {
640                         plt_err("Failed to alloc memory for port=%d", port_id);
641                         return NULL;
642                 }
643
644                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
645                 dws->base[0] = roc_sso_hws_base_get(
646                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
647                 dws->base[1] = roc_sso_hws_base_get(
648                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
649                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
650                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
651                 dws->hws_id = port_id;
652                 dws->swtag_req = 0;
653                 dws->vws = 0;
654
655                 data = dws;
656         } else {
657                 /* Allocate event port memory */
658                 ws = rte_zmalloc("cn9k_ws",
659                                  sizeof(struct cn9k_sso_hws) +
660                                          RTE_CACHE_LINE_SIZE,
661                                  RTE_CACHE_LINE_SIZE);
662                 if (ws == NULL) {
663                         plt_err("Failed to alloc memory for port=%d", port_id);
664                         return NULL;
665                 }
666
667                 /* First cache line is reserved for cookie */
668                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
669                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
670                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
671                 ws->hws_id = port_id;
672                 ws->swtag_req = 0;
673
674                 data = ws;
675         }
676
677         return data;
678 }
679
680 static void
681 cn9k_sso_info_get(struct rte_eventdev *event_dev,
682                   struct rte_event_dev_info *dev_info)
683 {
684         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
685
686         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
687         cnxk_sso_info_get(dev, dev_info);
688 }
689
690 static int
691 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
692 {
693         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
694         int rc;
695
696         rc = cnxk_sso_dev_validate(event_dev);
697         if (rc < 0) {
698                 plt_err("Invalid event device configuration");
699                 return -EINVAL;
700         }
701
702         roc_sso_rsrc_fini(&dev->sso);
703
704         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
705         if (rc < 0) {
706                 plt_err("Failed to initialize SSO resources");
707                 return -ENODEV;
708         }
709
710         rc = cnxk_sso_xaq_allocate(dev);
711         if (rc < 0)
712                 goto cnxk_rsrc_fini;
713
714         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
715                                     cn9k_sso_hws_setup);
716         if (rc < 0)
717                 goto cnxk_rsrc_fini;
718
719         /* Restore any prior port-queue mapping. */
720         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
721
722         dev->configured = 1;
723         rte_mb();
724
725         return 0;
726 cnxk_rsrc_fini:
727         roc_sso_rsrc_fini(&dev->sso);
728         dev->nb_event_ports = 0;
729         return rc;
730 }
731
732 static int
733 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
734                     const struct rte_event_port_conf *port_conf)
735 {
736
737         RTE_SET_USED(port_conf);
738         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
739 }
740
741 static void
742 cn9k_sso_port_release(void *port)
743 {
744         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
745         struct cnxk_sso_evdev *dev;
746
747         if (port == NULL)
748                 return;
749
750         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
751         if (!gws_cookie->configured)
752                 goto free;
753
754         cn9k_sso_hws_release(dev, port);
755         memset(gws_cookie, 0, sizeof(*gws_cookie));
756 free:
757         rte_free(gws_cookie);
758 }
759
760 static int
761 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
762                    const uint8_t queues[], const uint8_t priorities[],
763                    uint16_t nb_links)
764 {
765         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
766         uint16_t hwgrp_ids[nb_links];
767         uint16_t link;
768
769         RTE_SET_USED(priorities);
770         for (link = 0; link < nb_links; link++)
771                 hwgrp_ids[link] = queues[link];
772         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
773
774         return (int)nb_links;
775 }
776
777 static int
778 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
779                      uint8_t queues[], uint16_t nb_unlinks)
780 {
781         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
782         uint16_t hwgrp_ids[nb_unlinks];
783         uint16_t unlink;
784
785         for (unlink = 0; unlink < nb_unlinks; unlink++)
786                 hwgrp_ids[unlink] = queues[unlink];
787         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
788
789         return (int)nb_unlinks;
790 }
791
792 static int
793 cn9k_sso_start(struct rte_eventdev *event_dev)
794 {
795         int rc;
796
797         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
798         if (rc < 0)
799                 return rc;
800
801         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
802                             cn9k_sso_hws_flush_events);
803         if (rc < 0)
804                 return rc;
805
806         cn9k_sso_fp_fns_set(event_dev);
807
808         return rc;
809 }
810
811 static void
812 cn9k_sso_stop(struct rte_eventdev *event_dev)
813 {
814         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
815 }
816
817 static int
818 cn9k_sso_close(struct rte_eventdev *event_dev)
819 {
820         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
821 }
822
823 static int
824 cn9k_sso_selftest(void)
825 {
826         return cnxk_sso_selftest(RTE_STR(event_cn9k));
827 }
828
829 static int
830 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
831                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
832 {
833         int rc;
834
835         RTE_SET_USED(event_dev);
836         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
837         if (rc)
838                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
839         else
840                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
841                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
842                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
843
844         return 0;
845 }
846
847 static void
848 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
849                       void *tstmp_info)
850 {
851         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
852         int i;
853
854         for (i = 0; i < dev->nb_event_ports; i++) {
855                 if (dev->dual_ws) {
856                         struct cn9k_sso_hws_dual *dws =
857                                 event_dev->data->ports[i];
858                         dws->lookup_mem = lookup_mem;
859                         dws->tstamp = tstmp_info;
860                 } else {
861                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
862                         ws->lookup_mem = lookup_mem;
863                         ws->tstamp = tstmp_info;
864                 }
865         }
866 }
867
868 static int
869 cn9k_sso_rx_adapter_queue_add(
870         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
871         int32_t rx_queue_id,
872         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
873 {
874         struct cn9k_eth_rxq *rxq;
875         void *lookup_mem;
876         void *tstmp_info;
877         int rc;
878
879         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
880         if (rc)
881                 return -EINVAL;
882
883         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
884                                            queue_conf);
885         if (rc)
886                 return -EINVAL;
887
888         rxq = eth_dev->data->rx_queues[0];
889         lookup_mem = rxq->lookup_mem;
890         tstmp_info = rxq->tstamp;
891         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
892         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
893
894         return 0;
895 }
896
897 static int
898 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
899                               const struct rte_eth_dev *eth_dev,
900                               int32_t rx_queue_id)
901 {
902         int rc;
903
904         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
905         if (rc)
906                 return -EINVAL;
907
908         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
909 }
910
911 static int
912 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
913                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
914 {
915         int ret;
916
917         RTE_SET_USED(dev);
918         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
919         if (ret)
920                 *caps = 0;
921         else
922                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
923
924         return 0;
925 }
926
927 static void
928 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
929                        bool ena)
930 {
931         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
932         struct cn9k_eth_txq *txq;
933         struct roc_nix_sq *sq;
934         int i;
935
936         if (tx_queue_id < 0) {
937                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
938                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
939         } else {
940                 uint16_t sq_limit;
941
942                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
943                 txq = eth_dev->data->tx_queues[tx_queue_id];
944                 sq_limit =
945                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
946                                     sq->nb_sqb_bufs;
947                 txq->nb_sqb_bufs_adj =
948                         sq_limit -
949                         RTE_ALIGN_MUL_CEIL(sq_limit,
950                                            (1ULL << txq->sqes_per_sqb_log2)) /
951                                 (1ULL << txq->sqes_per_sqb_log2);
952                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
953         }
954 }
955
956 static int
957 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
958                               const struct rte_eth_dev *eth_dev,
959                               int32_t tx_queue_id)
960 {
961         int rc;
962
963         RTE_SET_USED(id);
964         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
965         if (rc < 0)
966                 return rc;
967         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
968         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
969         if (rc < 0)
970                 return rc;
971         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
972
973         return 0;
974 }
975
976 static int
977 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
978                               const struct rte_eth_dev *eth_dev,
979                               int32_t tx_queue_id)
980 {
981         int rc;
982
983         RTE_SET_USED(id);
984         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
985         if (rc < 0)
986                 return rc;
987         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
988         return cn9k_sso_updt_tx_adptr_data(event_dev);
989 }
990
991 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
992         .dev_infos_get = cn9k_sso_info_get,
993         .dev_configure = cn9k_sso_dev_configure,
994         .queue_def_conf = cnxk_sso_queue_def_conf,
995         .queue_setup = cnxk_sso_queue_setup,
996         .queue_release = cnxk_sso_queue_release,
997         .port_def_conf = cnxk_sso_port_def_conf,
998         .port_setup = cn9k_sso_port_setup,
999         .port_release = cn9k_sso_port_release,
1000         .port_link = cn9k_sso_port_link,
1001         .port_unlink = cn9k_sso_port_unlink,
1002         .timeout_ticks = cnxk_sso_timeout_ticks,
1003
1004         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1005         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1006         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1007         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1008         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1009
1010         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1011         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1012         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1013
1014         .timer_adapter_caps_get = cnxk_tim_caps_get,
1015
1016         .dump = cnxk_sso_dump,
1017         .dev_start = cn9k_sso_start,
1018         .dev_stop = cn9k_sso_stop,
1019         .dev_close = cn9k_sso_close,
1020         .dev_selftest = cn9k_sso_selftest,
1021 };
1022
1023 static int
1024 cn9k_sso_init(struct rte_eventdev *event_dev)
1025 {
1026         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1027         int rc;
1028
1029         if (RTE_CACHE_LINE_SIZE != 128) {
1030                 plt_err("Driver not compiled for CN9K");
1031                 return -EFAULT;
1032         }
1033
1034         rc = roc_plt_init();
1035         if (rc < 0) {
1036                 plt_err("Failed to initialize platform model");
1037                 return rc;
1038         }
1039
1040         event_dev->dev_ops = &cn9k_sso_dev_ops;
1041         /* For secondary processes, the primary has done all the work */
1042         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1043                 cn9k_sso_fp_fns_set(event_dev);
1044                 return 0;
1045         }
1046
1047         rc = cnxk_sso_init(event_dev);
1048         if (rc < 0)
1049                 return rc;
1050
1051         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1052         if (!dev->max_event_ports || !dev->max_event_queues) {
1053                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1054                         dev->max_event_queues, dev->max_event_ports);
1055                 cnxk_sso_fini(event_dev);
1056                 return -ENODEV;
1057         }
1058
1059         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1060                     event_dev->data->name, dev->max_event_queues,
1061                     dev->max_event_ports);
1062
1063         return 0;
1064 }
1065
1066 static int
1067 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1068 {
1069         return rte_event_pmd_pci_probe(
1070                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1071 }
1072
1073 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1074         {
1075                 .vendor_id = 0,
1076         },
1077 };
1078
1079 static struct rte_pci_driver cn9k_pci_sso = {
1080         .id_table = cn9k_pci_sso_map,
1081         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1082         .probe = cn9k_sso_probe,
1083         .remove = cnxk_sso_remove,
1084 };
1085
1086 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1087 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1088 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1089 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1090                               CNXK_SSO_GGRP_QOS "=<string>"
1091                               CNXK_SSO_FORCE_BP "=1"
1092                               CN9K_SSO_SINGLE_WS "=1"
1093                               CNXK_TIM_DISABLE_NPA "=1"
1094                               CNXK_TIM_CHNK_SLOTS "=<int>"
1095                               CNXK_TIM_RINGS_LMT "=<int>"
1096                               CNXK_TIM_STATS_ENA "=1");