net/cnxk: add cn9k template Rx functions to build
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
14
15 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
16         (enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
17                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
18                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
19                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
20                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
21                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
22                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
23
24 static int
25 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
26 {
27         struct cnxk_sso_evdev *dev = arg;
28         struct cn9k_sso_hws_dual *dws;
29         struct cn9k_sso_hws *ws;
30         int rc;
31
32         if (dev->dual_ws) {
33                 dws = port;
34                 rc = roc_sso_hws_link(&dev->sso,
35                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
36                                       nb_link);
37                 rc |= roc_sso_hws_link(&dev->sso,
38                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
39                                        map, nb_link);
40         } else {
41                 ws = port;
42                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
43         }
44
45         return rc;
46 }
47
48 static int
49 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
50 {
51         struct cnxk_sso_evdev *dev = arg;
52         struct cn9k_sso_hws_dual *dws;
53         struct cn9k_sso_hws *ws;
54         int rc;
55
56         if (dev->dual_ws) {
57                 dws = port;
58                 rc = roc_sso_hws_unlink(&dev->sso,
59                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
60                                         map, nb_link);
61                 rc |= roc_sso_hws_unlink(&dev->sso,
62                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
63                                          map, nb_link);
64         } else {
65                 ws = port;
66                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
67         }
68
69         return rc;
70 }
71
72 static void
73 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
74 {
75         struct cnxk_sso_evdev *dev = arg;
76         struct cn9k_sso_hws_dual *dws;
77         struct cn9k_sso_hws *ws;
78         uint64_t val;
79
80         /* Set get_work tmo for HWS */
81         val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
82         if (dev->dual_ws) {
83                 dws = hws;
84                 dws->grp_base = grp_base;
85                 dws->fc_mem = (uint64_t *)dev->fc_iova;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 ws->grp_base = grp_base;
93                 ws->fc_mem = (uint64_t *)dev->fc_iova;
94                 ws->xaq_lmt = dev->xaq_lmt;
95
96                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
97         }
98 }
99
100 static void
101 cn9k_sso_hws_release(void *arg, void *hws)
102 {
103         struct cnxk_sso_evdev *dev = arg;
104         struct cn9k_sso_hws_dual *dws;
105         struct cn9k_sso_hws *ws;
106         int i;
107
108         if (dev->dual_ws) {
109                 dws = hws;
110                 for (i = 0; i < dev->nb_event_queues; i++) {
111                         roc_sso_hws_unlink(&dev->sso,
112                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
113                                            (uint16_t *)&i, 1);
114                         roc_sso_hws_unlink(&dev->sso,
115                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
116                                            (uint16_t *)&i, 1);
117                 }
118                 memset(dws, 0, sizeof(*dws));
119         } else {
120                 ws = hws;
121                 for (i = 0; i < dev->nb_event_queues; i++)
122                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
123                                            (uint16_t *)&i, 1);
124                 memset(ws, 0, sizeof(*ws));
125         }
126 }
127
128 static void
129 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
130                           cnxk_handle_event_t fn, void *arg)
131 {
132         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
133         struct cn9k_sso_hws_dual *dws;
134         struct cn9k_sso_hws *ws;
135         uint64_t cq_ds_cnt = 1;
136         uint64_t aq_cnt = 1;
137         uint64_t ds_cnt = 1;
138         struct rte_event ev;
139         uintptr_t ws_base;
140         uint64_t val, req;
141
142         plt_write64(0, base + SSO_LF_GGRP_QCTL);
143
144         req = queue_id;     /* GGRP ID */
145         req |= BIT_ULL(18); /* Grouped */
146         req |= BIT_ULL(16); /* WAIT */
147
148         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
149         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
150         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
151         cq_ds_cnt &= 0x3FFF3FFF0000;
152
153         if (dev->dual_ws) {
154                 dws = hws;
155                 ws_base = dws->base[0];
156         } else {
157                 ws = hws;
158                 ws_base = ws->base;
159         }
160
161         while (aq_cnt || cq_ds_cnt || ds_cnt) {
162                 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
163                 cn9k_sso_hws_get_work_empty(ws_base, &ev);
164                 if (fn != NULL && ev.u64 != 0)
165                         fn(arg, ev);
166                 if (ev.sched_type != SSO_TT_EMPTY)
167                         cnxk_sso_hws_swtag_flush(
168                                 ws_base + SSOW_LF_GWS_TAG,
169                                 ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
170                 do {
171                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
172                 } while (val & BIT_ULL(56));
173                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
174                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
175                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
176                 /* Extract cq and ds count */
177                 cq_ds_cnt &= 0x3FFF3FFF0000;
178         }
179
180         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
181 }
182
183 static void
184 cn9k_sso_hws_reset(void *arg, void *hws)
185 {
186         struct cnxk_sso_evdev *dev = arg;
187         struct cn9k_sso_hws_dual *dws;
188         struct cn9k_sso_hws *ws;
189         uint64_t pend_state;
190         uint8_t pend_tt;
191         uintptr_t base;
192         uint64_t tag;
193         uint8_t i;
194
195         dws = hws;
196         ws = hws;
197         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
198                 base = dev->dual_ws ? dws->base[i] : ws->base;
199                 /* Wait till getwork/swtp/waitw/desched completes. */
200                 do {
201                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
202                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
203                                        BIT_ULL(56)));
204
205                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
206                 pend_tt = (tag >> 32) & 0x3;
207                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
208                         if (pend_tt == SSO_TT_ATOMIC ||
209                             pend_tt == SSO_TT_ORDERED)
210                                 cnxk_sso_hws_swtag_untag(
211                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
212                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
213                 }
214
215                 /* Wait for desched to complete. */
216                 do {
217                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
218                 } while (pend_state & BIT_ULL(58));
219         }
220 }
221
222 void
223 cn9k_sso_set_rsrc(void *arg)
224 {
225         struct cnxk_sso_evdev *dev = arg;
226
227         if (dev->dual_ws)
228                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
229         else
230                 dev->max_event_ports = dev->sso.max_hws;
231         dev->max_event_queues =
232                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
233                               RTE_EVENT_MAX_QUEUES_PER_DEV :
234                               dev->sso.max_hwgrp;
235 }
236
237 static int
238 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
239 {
240         struct cnxk_sso_evdev *dev = arg;
241
242         if (dev->dual_ws)
243                 hws = hws * CN9K_DUAL_WS_NB_WS;
244
245         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
246 }
247
248 static int
249 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
250 {
251         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
252         int i;
253
254         if (dev->tx_adptr_data == NULL)
255                 return 0;
256
257         for (i = 0; i < dev->nb_event_ports; i++) {
258                 if (dev->dual_ws) {
259                         struct cn9k_sso_hws_dual *dws =
260                                 event_dev->data->ports[i];
261                         void *ws_cookie;
262
263                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
264                         ws_cookie = rte_realloc_socket(
265                                 ws_cookie,
266                                 sizeof(struct cnxk_sso_hws_cookie) +
267                                         sizeof(struct cn9k_sso_hws_dual) +
268                                         (sizeof(uint64_t) *
269                                          (dev->max_port_id + 1) *
270                                          RTE_MAX_QUEUES_PER_PORT),
271                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
272                         if (ws_cookie == NULL)
273                                 return -ENOMEM;
274                         dws = RTE_PTR_ADD(ws_cookie,
275                                           sizeof(struct cnxk_sso_hws_cookie));
276                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
277                                sizeof(uint64_t) * (dev->max_port_id + 1) *
278                                        RTE_MAX_QUEUES_PER_PORT);
279                         event_dev->data->ports[i] = dws;
280                 } else {
281                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
282                         void *ws_cookie;
283
284                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
285                         ws_cookie = rte_realloc_socket(
286                                 ws_cookie,
287                                 sizeof(struct cnxk_sso_hws_cookie) +
288                                         sizeof(struct cn9k_sso_hws_dual) +
289                                         (sizeof(uint64_t) *
290                                          (dev->max_port_id + 1) *
291                                          RTE_MAX_QUEUES_PER_PORT),
292                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
293                         if (ws_cookie == NULL)
294                                 return -ENOMEM;
295                         ws = RTE_PTR_ADD(ws_cookie,
296                                          sizeof(struct cnxk_sso_hws_cookie));
297                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
298                                sizeof(uint64_t) * (dev->max_port_id + 1) *
299                                        RTE_MAX_QUEUES_PER_PORT);
300                         event_dev->data->ports[i] = ws;
301                 }
302         }
303         rte_mb();
304
305         return 0;
306 }
307
308 static void
309 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
310 {
311         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
312         /* Single WS modes */
313         const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
314 #define R(name, flags)[flags] = cn9k_sso_hws_deq_##name,
315                 NIX_RX_FASTPATH_MODES
316 #undef R
317         };
318
319         const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
320 #define R(name, flags)[flags] = cn9k_sso_hws_deq_burst_##name,
321                 NIX_RX_FASTPATH_MODES
322 #undef R
323         };
324
325         const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
326 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_##name,
327                 NIX_RX_FASTPATH_MODES
328 #undef R
329         };
330
331         const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
332 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_burst_##name,
333                 NIX_RX_FASTPATH_MODES
334 #undef R
335         };
336
337         const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
338 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_##name,
339                 NIX_RX_FASTPATH_MODES
340 #undef R
341         };
342
343         const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
344 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_burst_##name,
345                 NIX_RX_FASTPATH_MODES
346 #undef R
347         };
348
349         const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
350 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_##name,
351                 NIX_RX_FASTPATH_MODES
352 #undef R
353         };
354
355         const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
356 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_burst_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
362 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_##name,
363                 NIX_RX_FASTPATH_MODES
364 #undef R
365         };
366
367         const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
368 #define R(name, flags)[flags] = cn9k_sso_hws_deq_seg_burst_##name,
369                 NIX_RX_FASTPATH_MODES
370 #undef R
371         };
372
373         const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
374 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_##name,
375                 NIX_RX_FASTPATH_MODES
376 #undef R
377         };
378
379         const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
380 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
381                 NIX_RX_FASTPATH_MODES
382 #undef R
383         };
384
385         const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
386 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_##name,
387                 NIX_RX_FASTPATH_MODES
388 #undef R
389         };
390
391         const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
392 #define R(name, flags)[flags] = cn9k_sso_hws_deq_ca_seg_burst_##name,
393                 NIX_RX_FASTPATH_MODES
394 #undef R
395         };
396
397         const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
398 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_##name,
399                 NIX_RX_FASTPATH_MODES
400 #undef R
401         };
402
403         const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
404 #define R(name, flags)[flags] = cn9k_sso_hws_deq_tmo_ca_seg_burst_##name,
405                 NIX_RX_FASTPATH_MODES
406 #undef R
407         };
408
409         /* Dual WS modes */
410         const event_dequeue_t sso_hws_dual_deq[NIX_RX_OFFLOAD_MAX] = {
411 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_##name,
412                 NIX_RX_FASTPATH_MODES
413 #undef R
414         };
415
416         const event_dequeue_burst_t sso_hws_dual_deq_burst[NIX_RX_OFFLOAD_MAX] = {
417 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_burst_##name,
418                 NIX_RX_FASTPATH_MODES
419 #undef R
420         };
421
422         const event_dequeue_t sso_hws_dual_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
423 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_##name,
424                 NIX_RX_FASTPATH_MODES
425 #undef R
426         };
427
428         const event_dequeue_burst_t sso_hws_dual_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
429 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
430                 NIX_RX_FASTPATH_MODES
431 #undef R
432         };
433
434         const event_dequeue_t sso_hws_dual_deq_ca[NIX_RX_OFFLOAD_MAX] = {
435 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_##name,
436                 NIX_RX_FASTPATH_MODES
437 #undef R
438         };
439
440         const event_dequeue_burst_t sso_hws_dual_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
441 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_burst_##name,
442                 NIX_RX_FASTPATH_MODES
443 #undef R
444         };
445
446         const event_dequeue_t sso_hws_dual_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
447 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_##name,
448                 NIX_RX_FASTPATH_MODES
449 #undef R
450         };
451
452         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
453 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_burst_##name,
454                 NIX_RX_FASTPATH_MODES
455 #undef R
456         };
457
458         const event_dequeue_t sso_hws_dual_deq_seg[NIX_RX_OFFLOAD_MAX] = {
459 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_##name,
460                 NIX_RX_FASTPATH_MODES
461 #undef R
462         };
463
464         const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
465 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_seg_burst_##name,
466                 NIX_RX_FASTPATH_MODES
467 #undef R
468         };
469
470         const event_dequeue_t sso_hws_dual_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
471 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
472                 NIX_RX_FASTPATH_MODES
473 #undef R
474         };
475
476         const event_dequeue_burst_t sso_hws_dual_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
477 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
478                 NIX_RX_FASTPATH_MODES
479 #undef R
480         };
481
482         const event_dequeue_t sso_hws_dual_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
483 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_##name,
484                 NIX_RX_FASTPATH_MODES
485 #undef R
486         };
487
488         const event_dequeue_burst_t sso_hws_dual_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
489 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
490                 NIX_RX_FASTPATH_MODES
491 #undef R
492         };
493
494         const event_dequeue_t sso_hws_dual_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
495 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_##name,
496                 NIX_RX_FASTPATH_MODES
497 #undef R
498         };
499
500         const event_dequeue_burst_t sso_hws_dual_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
501 #define R(name, flags)[flags] = cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name,
502                 NIX_RX_FASTPATH_MODES
503 #undef R
504         };
505
506         /* Tx modes */
507         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
508 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
509         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
510                 NIX_TX_FASTPATH_MODES
511 #undef T
512         };
513
514         const event_tx_adapter_enqueue_t sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
515 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
516         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
517                 NIX_TX_FASTPATH_MODES
518 #undef T
519         };
520
521         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
522 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
523         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
524                 NIX_TX_FASTPATH_MODES
525 #undef T
526         };
527
528         const event_tx_adapter_enqueue_t sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
529 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
530         [f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
531                 NIX_TX_FASTPATH_MODES
532 #undef T
533         };
534
535         event_dev->enqueue = cn9k_sso_hws_enq;
536         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
537         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
538         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
539         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
540                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
541                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
542                                       sso_hws_deq_seg_burst);
543                 if (dev->is_timeout_deq) {
544                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
545                                               sso_hws_deq_tmo_seg);
546                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
547                                               sso_hws_deq_tmo_seg_burst);
548                 }
549                 if (dev->is_ca_internal_port) {
550                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
551                                               sso_hws_deq_ca_seg);
552                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
553                                               sso_hws_deq_ca_seg_burst);
554                 }
555
556                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
557                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
558                                               sso_hws_deq_tmo_ca_seg);
559                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
560                                               sso_hws_deq_tmo_ca_seg_burst);
561                 }
562         } else {
563                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
564                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
565                                       sso_hws_deq_burst);
566                 if (dev->is_timeout_deq) {
567                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
568                                               sso_hws_deq_tmo);
569                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
570                                               sso_hws_deq_tmo_burst);
571                 }
572                 if (dev->is_ca_internal_port) {
573                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
574                                               sso_hws_deq_ca);
575                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
576                                               sso_hws_deq_ca_burst);
577                 }
578
579                 if (dev->is_ca_internal_port && dev->is_timeout_deq) {
580                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
581                                               sso_hws_deq_tmo_ca);
582                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
583                                               sso_hws_deq_tmo_ca_burst);
584                 }
585         }
586         event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
587
588         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
589                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
590                                       sso_hws_tx_adptr_enq_seg);
591         else
592                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
593                                       sso_hws_tx_adptr_enq);
594
595         if (dev->dual_ws) {
596                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
597                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
598                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
599                 event_dev->enqueue_forward_burst =
600                         cn9k_sso_hws_dual_enq_fwd_burst;
601                 event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
602
603                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
604                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
605                                               sso_hws_dual_deq_seg);
606                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
607                                               sso_hws_dual_deq_seg_burst);
608                         if (dev->is_timeout_deq) {
609                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
610                                                       sso_hws_dual_deq_tmo_seg);
611                                 CN9K_SET_EVDEV_DEQ_OP(
612                                         dev, event_dev->dequeue_burst,
613                                         sso_hws_dual_deq_tmo_seg_burst);
614                         }
615                         if (dev->is_ca_internal_port) {
616                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
617                                                       sso_hws_dual_deq_ca_seg);
618                                 CN9K_SET_EVDEV_DEQ_OP(
619                                         dev, event_dev->dequeue_burst,
620                                         sso_hws_dual_deq_ca_seg_burst);
621                         }
622                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
623                                 CN9K_SET_EVDEV_DEQ_OP(
624                                         dev, event_dev->dequeue,
625                                         sso_hws_dual_deq_tmo_ca_seg);
626                                 CN9K_SET_EVDEV_DEQ_OP(
627                                         dev, event_dev->dequeue_burst,
628                                         sso_hws_dual_deq_tmo_ca_seg_burst);
629                         }
630                 } else {
631                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
632                                               sso_hws_dual_deq);
633                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
634                                               sso_hws_dual_deq_burst);
635                         if (dev->is_timeout_deq) {
636                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
637                                                       sso_hws_dual_deq_tmo);
638                                 CN9K_SET_EVDEV_DEQ_OP(
639                                         dev, event_dev->dequeue_burst,
640                                         sso_hws_dual_deq_tmo_burst);
641                         }
642                         if (dev->is_ca_internal_port) {
643                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
644                                                       sso_hws_dual_deq_ca);
645                                 CN9K_SET_EVDEV_DEQ_OP(
646                                         dev, event_dev->dequeue_burst,
647                                         sso_hws_dual_deq_ca_burst);
648                         }
649                         if (dev->is_ca_internal_port && dev->is_timeout_deq) {
650                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
651                                                       sso_hws_dual_deq_tmo_ca);
652                                 CN9K_SET_EVDEV_DEQ_OP(
653                                         dev, event_dev->dequeue_burst,
654                                         sso_hws_dual_deq_tmo_ca_burst);
655                         }
656                 }
657
658                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
659                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
660                                               sso_hws_dual_tx_adptr_enq_seg);
661                 else
662                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
663                                               sso_hws_dual_tx_adptr_enq);
664         }
665
666         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
667         rte_mb();
668 }
669
670 static void *
671 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
672 {
673         struct cnxk_sso_evdev *dev = arg;
674         struct cn9k_sso_hws_dual *dws;
675         struct cn9k_sso_hws *ws;
676         void *data;
677
678         if (dev->dual_ws) {
679                 dws = rte_zmalloc("cn9k_dual_ws",
680                                   sizeof(struct cn9k_sso_hws_dual) +
681                                           RTE_CACHE_LINE_SIZE,
682                                   RTE_CACHE_LINE_SIZE);
683                 if (dws == NULL) {
684                         plt_err("Failed to alloc memory for port=%d", port_id);
685                         return NULL;
686                 }
687
688                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
689                 dws->base[0] = roc_sso_hws_base_get(
690                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
691                 dws->base[1] = roc_sso_hws_base_get(
692                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
693                 dws->hws_id = port_id;
694                 dws->swtag_req = 0;
695                 dws->vws = 0;
696
697                 data = dws;
698         } else {
699                 /* Allocate event port memory */
700                 ws = rte_zmalloc("cn9k_ws",
701                                  sizeof(struct cn9k_sso_hws) +
702                                          RTE_CACHE_LINE_SIZE,
703                                  RTE_CACHE_LINE_SIZE);
704                 if (ws == NULL) {
705                         plt_err("Failed to alloc memory for port=%d", port_id);
706                         return NULL;
707                 }
708
709                 /* First cache line is reserved for cookie */
710                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
711                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
712                 ws->hws_id = port_id;
713                 ws->swtag_req = 0;
714
715                 data = ws;
716         }
717
718         return data;
719 }
720
721 static void
722 cn9k_sso_info_get(struct rte_eventdev *event_dev,
723                   struct rte_event_dev_info *dev_info)
724 {
725         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
726
727         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
728         cnxk_sso_info_get(dev, dev_info);
729 }
730
731 static int
732 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
733 {
734         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
735         int rc;
736
737         rc = cnxk_sso_dev_validate(event_dev);
738         if (rc < 0) {
739                 plt_err("Invalid event device configuration");
740                 return -EINVAL;
741         }
742
743         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
744         if (rc < 0) {
745                 plt_err("Failed to initialize SSO resources");
746                 return -ENODEV;
747         }
748
749         rc = cnxk_sso_xaq_allocate(dev);
750         if (rc < 0)
751                 goto cnxk_rsrc_fini;
752
753         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
754                                     cn9k_sso_hws_setup);
755         if (rc < 0)
756                 goto cnxk_rsrc_fini;
757
758         /* Restore any prior port-queue mapping. */
759         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
760
761         dev->configured = 1;
762         rte_mb();
763
764         return 0;
765 cnxk_rsrc_fini:
766         roc_sso_rsrc_fini(&dev->sso);
767         dev->nb_event_ports = 0;
768         return rc;
769 }
770
771 static int
772 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
773                     const struct rte_event_port_conf *port_conf)
774 {
775
776         RTE_SET_USED(port_conf);
777         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
778 }
779
780 static void
781 cn9k_sso_port_release(void *port)
782 {
783         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
784         struct cnxk_sso_evdev *dev;
785
786         if (port == NULL)
787                 return;
788
789         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
790         if (!gws_cookie->configured)
791                 goto free;
792
793         cn9k_sso_hws_release(dev, port);
794         memset(gws_cookie, 0, sizeof(*gws_cookie));
795 free:
796         rte_free(gws_cookie);
797 }
798
799 static int
800 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
801                    const uint8_t queues[], const uint8_t priorities[],
802                    uint16_t nb_links)
803 {
804         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
805         uint16_t hwgrp_ids[nb_links];
806         uint16_t link;
807
808         RTE_SET_USED(priorities);
809         for (link = 0; link < nb_links; link++)
810                 hwgrp_ids[link] = queues[link];
811         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
812
813         return (int)nb_links;
814 }
815
816 static int
817 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
818                      uint8_t queues[], uint16_t nb_unlinks)
819 {
820         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
821         uint16_t hwgrp_ids[nb_unlinks];
822         uint16_t unlink;
823
824         for (unlink = 0; unlink < nb_unlinks; unlink++)
825                 hwgrp_ids[unlink] = queues[unlink];
826         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
827
828         return (int)nb_unlinks;
829 }
830
831 static int
832 cn9k_sso_start(struct rte_eventdev *event_dev)
833 {
834         int rc;
835
836         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
837         if (rc < 0)
838                 return rc;
839
840         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
841                             cn9k_sso_hws_flush_events);
842         if (rc < 0)
843                 return rc;
844
845         cn9k_sso_fp_fns_set(event_dev);
846
847         return rc;
848 }
849
850 static void
851 cn9k_sso_stop(struct rte_eventdev *event_dev)
852 {
853         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
854 }
855
856 static int
857 cn9k_sso_close(struct rte_eventdev *event_dev)
858 {
859         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
860 }
861
862 static int
863 cn9k_sso_selftest(void)
864 {
865         return cnxk_sso_selftest(RTE_STR(event_cn9k));
866 }
867
868 static int
869 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
870                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
871 {
872         int rc;
873
874         RTE_SET_USED(event_dev);
875         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
876         if (rc)
877                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
878         else
879                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
880                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
881                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
882
883         return 0;
884 }
885
886 static void
887 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
888                       void *tstmp_info)
889 {
890         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
891         int i;
892
893         for (i = 0; i < dev->nb_event_ports; i++) {
894                 if (dev->dual_ws) {
895                         struct cn9k_sso_hws_dual *dws =
896                                 event_dev->data->ports[i];
897                         dws->lookup_mem = lookup_mem;
898                         dws->tstamp = tstmp_info;
899                 } else {
900                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
901                         ws->lookup_mem = lookup_mem;
902                         ws->tstamp = tstmp_info;
903                 }
904         }
905 }
906
907 static int
908 cn9k_sso_rx_adapter_queue_add(
909         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
910         int32_t rx_queue_id,
911         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
912 {
913         struct cn9k_eth_rxq *rxq;
914         void *lookup_mem;
915         void *tstmp_info;
916         int rc;
917
918         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
919         if (rc)
920                 return -EINVAL;
921
922         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
923                                            queue_conf);
924         if (rc)
925                 return -EINVAL;
926
927         rxq = eth_dev->data->rx_queues[0];
928         lookup_mem = rxq->lookup_mem;
929         tstmp_info = rxq->tstamp;
930         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
931         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
932
933         return 0;
934 }
935
936 static int
937 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
938                               const struct rte_eth_dev *eth_dev,
939                               int32_t rx_queue_id)
940 {
941         int rc;
942
943         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
944         if (rc)
945                 return -EINVAL;
946
947         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
948 }
949
950 static int
951 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
952                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
953 {
954         int ret;
955
956         RTE_SET_USED(dev);
957         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
958         if (ret)
959                 *caps = 0;
960         else
961                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
962
963         return 0;
964 }
965
966 static void
967 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
968                        bool ena)
969 {
970         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
971         struct cn9k_eth_txq *txq;
972         struct roc_nix_sq *sq;
973         int i;
974
975         if (tx_queue_id < 0) {
976                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
977                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
978         } else {
979                 uint16_t sq_limit;
980
981                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
982                 txq = eth_dev->data->tx_queues[tx_queue_id];
983                 sq_limit =
984                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
985                                     sq->nb_sqb_bufs;
986                 txq->nb_sqb_bufs_adj =
987                         sq_limit -
988                         RTE_ALIGN_MUL_CEIL(sq_limit,
989                                            (1ULL << txq->sqes_per_sqb_log2)) /
990                                 (1ULL << txq->sqes_per_sqb_log2);
991                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
992         }
993 }
994
995 static int
996 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
997                               const struct rte_eth_dev *eth_dev,
998                               int32_t tx_queue_id)
999 {
1000         int rc;
1001
1002         RTE_SET_USED(id);
1003         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
1004         if (rc < 0)
1005                 return rc;
1006         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
1007         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
1008         if (rc < 0)
1009                 return rc;
1010         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1011
1012         return 0;
1013 }
1014
1015 static int
1016 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
1017                               const struct rte_eth_dev *eth_dev,
1018                               int32_t tx_queue_id)
1019 {
1020         int rc;
1021
1022         RTE_SET_USED(id);
1023         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
1024         if (rc < 0)
1025                 return rc;
1026         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
1027         return cn9k_sso_updt_tx_adptr_data(event_dev);
1028 }
1029
1030 static int
1031 cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
1032                              const struct rte_cryptodev *cdev, uint32_t *caps)
1033 {
1034         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1035         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1036
1037         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
1038                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
1039
1040         return 0;
1041 }
1042
1043 static int
1044 cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
1045                            const struct rte_cryptodev *cdev,
1046                            int32_t queue_pair_id, const struct rte_event *event)
1047 {
1048         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1049
1050         RTE_SET_USED(event);
1051
1052         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1053         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1054
1055         dev->is_ca_internal_port = 1;
1056         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
1057
1058         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
1059 }
1060
1061 static int
1062 cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
1063                            const struct rte_cryptodev *cdev,
1064                            int32_t queue_pair_id)
1065 {
1066         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
1067         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
1068
1069         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
1070 }
1071
1072 static struct eventdev_ops cn9k_sso_dev_ops = {
1073         .dev_infos_get = cn9k_sso_info_get,
1074         .dev_configure = cn9k_sso_dev_configure,
1075         .queue_def_conf = cnxk_sso_queue_def_conf,
1076         .queue_setup = cnxk_sso_queue_setup,
1077         .queue_release = cnxk_sso_queue_release,
1078         .port_def_conf = cnxk_sso_port_def_conf,
1079         .port_setup = cn9k_sso_port_setup,
1080         .port_release = cn9k_sso_port_release,
1081         .port_link = cn9k_sso_port_link,
1082         .port_unlink = cn9k_sso_port_unlink,
1083         .timeout_ticks = cnxk_sso_timeout_ticks,
1084
1085         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
1086         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
1087         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
1088         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
1089         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
1090
1091         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
1092         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
1093         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
1094
1095         .timer_adapter_caps_get = cnxk_tim_caps_get,
1096
1097         .crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
1098         .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
1099         .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
1100
1101         .dump = cnxk_sso_dump,
1102         .dev_start = cn9k_sso_start,
1103         .dev_stop = cn9k_sso_stop,
1104         .dev_close = cn9k_sso_close,
1105         .dev_selftest = cn9k_sso_selftest,
1106 };
1107
1108 static int
1109 cn9k_sso_init(struct rte_eventdev *event_dev)
1110 {
1111         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
1112         int rc;
1113
1114         if (RTE_CACHE_LINE_SIZE != 128) {
1115                 plt_err("Driver not compiled for CN9K");
1116                 return -EFAULT;
1117         }
1118
1119         rc = roc_plt_init();
1120         if (rc < 0) {
1121                 plt_err("Failed to initialize platform model");
1122                 return rc;
1123         }
1124
1125         event_dev->dev_ops = &cn9k_sso_dev_ops;
1126         /* For secondary processes, the primary has done all the work */
1127         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1128                 cn9k_sso_fp_fns_set(event_dev);
1129                 return 0;
1130         }
1131
1132         rc = cnxk_sso_init(event_dev);
1133         if (rc < 0)
1134                 return rc;
1135
1136         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
1137         if (!dev->max_event_ports || !dev->max_event_queues) {
1138                 plt_err("Not enough eventdev resource queues=%d ports=%d",
1139                         dev->max_event_queues, dev->max_event_ports);
1140                 cnxk_sso_fini(event_dev);
1141                 return -ENODEV;
1142         }
1143
1144         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
1145                     event_dev->data->name, dev->max_event_queues,
1146                     dev->max_event_ports);
1147
1148         return 0;
1149 }
1150
1151 static int
1152 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1153 {
1154         return rte_event_pmd_pci_probe(
1155                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1156 }
1157
1158 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1159         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1160         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1161         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1162         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1163         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
1164         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1165         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1166         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1167         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1168         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
1169         {
1170                 .vendor_id = 0,
1171         },
1172 };
1173
1174 static struct rte_pci_driver cn9k_pci_sso = {
1175         .id_table = cn9k_pci_sso_map,
1176         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1177         .probe = cn9k_sso_probe,
1178         .remove = cnxk_sso_remove,
1179 };
1180
1181 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1182 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1183 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1184 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1185                               CNXK_SSO_GGRP_QOS "=<string>"
1186                               CNXK_SSO_FORCE_BP "=1"
1187                               CN9K_SSO_SINGLE_WS "=1"
1188                               CNXK_TIM_DISABLE_NPA "=1"
1189                               CNXK_TIM_CHNK_SLOTS "=<int>"
1190                               CNXK_TIM_RINGS_LMT "=<int>"
1191                               CNXK_TIM_STATS_ENA "=1");