event/cnxk: add macros to set eventdev operations
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
13         (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
14                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
15                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
16                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
17                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
18                          [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
19
20 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
21         (enq_op =                                                              \
22                  enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
23                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
24                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
25                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
26                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
27                         [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
28
29 static void
30 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
31 {
32         ws->tag_op = base + SSOW_LF_GWS_TAG;
33         ws->wqp_op = base + SSOW_LF_GWS_WQP;
34         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
35         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
36         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
37         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
38 }
39
40 static int
41 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
42 {
43         struct cnxk_sso_evdev *dev = arg;
44         struct cn9k_sso_hws_dual *dws;
45         struct cn9k_sso_hws *ws;
46         int rc;
47
48         if (dev->dual_ws) {
49                 dws = port;
50                 rc = roc_sso_hws_link(&dev->sso,
51                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
52                                       nb_link);
53                 rc |= roc_sso_hws_link(&dev->sso,
54                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
55                                        map, nb_link);
56         } else {
57                 ws = port;
58                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
59         }
60
61         return rc;
62 }
63
64 static int
65 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
66 {
67         struct cnxk_sso_evdev *dev = arg;
68         struct cn9k_sso_hws_dual *dws;
69         struct cn9k_sso_hws *ws;
70         int rc;
71
72         if (dev->dual_ws) {
73                 dws = port;
74                 rc = roc_sso_hws_unlink(&dev->sso,
75                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
76                                         map, nb_link);
77                 rc |= roc_sso_hws_unlink(&dev->sso,
78                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
79                                          map, nb_link);
80         } else {
81                 ws = port;
82                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
83         }
84
85         return rc;
86 }
87
88 static void
89 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
90 {
91         struct cnxk_sso_evdev *dev = arg;
92         struct cn9k_sso_hws_dual *dws;
93         struct cn9k_sso_hws *ws;
94         uint64_t val;
95
96         /* Set get_work tmo for HWS */
97         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
98         if (dev->dual_ws) {
99                 dws = hws;
100                 rte_memcpy(dws->grps_base, grps_base,
101                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
102                 dws->fc_mem = dev->fc_mem;
103                 dws->xaq_lmt = dev->xaq_lmt;
104
105                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
106                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
107         } else {
108                 ws = hws;
109                 rte_memcpy(ws->grps_base, grps_base,
110                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
111                 ws->fc_mem = dev->fc_mem;
112                 ws->xaq_lmt = dev->xaq_lmt;
113
114                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
115         }
116 }
117
118 static void
119 cn9k_sso_hws_release(void *arg, void *hws)
120 {
121         struct cnxk_sso_evdev *dev = arg;
122         struct cn9k_sso_hws_dual *dws;
123         struct cn9k_sso_hws *ws;
124         int i;
125
126         if (dev->dual_ws) {
127                 dws = hws;
128                 for (i = 0; i < dev->nb_event_queues; i++) {
129                         roc_sso_hws_unlink(&dev->sso,
130                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
131                                            (uint16_t *)&i, 1);
132                         roc_sso_hws_unlink(&dev->sso,
133                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
134                                            (uint16_t *)&i, 1);
135                 }
136                 memset(dws, 0, sizeof(*dws));
137         } else {
138                 ws = hws;
139                 for (i = 0; i < dev->nb_event_queues; i++)
140                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
141                                            (uint16_t *)&i, 1);
142                 memset(ws, 0, sizeof(*ws));
143         }
144 }
145
146 static void
147 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
148                           cnxk_handle_event_t fn, void *arg)
149 {
150         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
151         struct cn9k_sso_hws_dual *dws;
152         struct cn9k_sso_hws_state *st;
153         struct cn9k_sso_hws *ws;
154         uint64_t cq_ds_cnt = 1;
155         uint64_t aq_cnt = 1;
156         uint64_t ds_cnt = 1;
157         struct rte_event ev;
158         uintptr_t ws_base;
159         uint64_t val, req;
160
161         plt_write64(0, base + SSO_LF_GGRP_QCTL);
162
163         req = queue_id;     /* GGRP ID */
164         req |= BIT_ULL(18); /* Grouped */
165         req |= BIT_ULL(16); /* WAIT */
166
167         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
168         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
169         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
170         cq_ds_cnt &= 0x3FFF3FFF0000;
171
172         if (dev->dual_ws) {
173                 dws = hws;
174                 st = &dws->ws_state[0];
175                 ws_base = dws->base[0];
176         } else {
177                 ws = hws;
178                 st = (struct cn9k_sso_hws_state *)ws;
179                 ws_base = ws->base;
180         }
181
182         while (aq_cnt || cq_ds_cnt || ds_cnt) {
183                 plt_write64(req, st->getwrk_op);
184                 cn9k_sso_hws_get_work_empty(st, &ev);
185                 if (fn != NULL && ev.u64 != 0)
186                         fn(arg, ev);
187                 if (ev.sched_type != SSO_TT_EMPTY)
188                         cnxk_sso_hws_swtag_flush(st->tag_op,
189                                                  st->swtag_flush_op);
190                 do {
191                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
192                 } while (val & BIT_ULL(56));
193                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
194                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
195                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
196                 /* Extract cq and ds count */
197                 cq_ds_cnt &= 0x3FFF3FFF0000;
198         }
199
200         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
201 }
202
203 static void
204 cn9k_sso_hws_reset(void *arg, void *hws)
205 {
206         struct cnxk_sso_evdev *dev = arg;
207         struct cn9k_sso_hws_dual *dws;
208         struct cn9k_sso_hws *ws;
209         uint64_t pend_state;
210         uint8_t pend_tt;
211         uintptr_t base;
212         uint64_t tag;
213         uint8_t i;
214
215         dws = hws;
216         ws = hws;
217         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
218                 base = dev->dual_ws ? dws->base[i] : ws->base;
219                 /* Wait till getwork/swtp/waitw/desched completes. */
220                 do {
221                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
222                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
223                                        BIT_ULL(56)));
224
225                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
226                 pend_tt = (tag >> 32) & 0x3;
227                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
228                         if (pend_tt == SSO_TT_ATOMIC ||
229                             pend_tt == SSO_TT_ORDERED)
230                                 cnxk_sso_hws_swtag_untag(
231                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
232                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
233                 }
234
235                 /* Wait for desched to complete. */
236                 do {
237                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
238                 } while (pend_state & BIT_ULL(58));
239         }
240 }
241
242 void
243 cn9k_sso_set_rsrc(void *arg)
244 {
245         struct cnxk_sso_evdev *dev = arg;
246
247         if (dev->dual_ws)
248                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
249         else
250                 dev->max_event_ports = dev->sso.max_hws;
251         dev->max_event_queues =
252                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
253                               RTE_EVENT_MAX_QUEUES_PER_DEV :
254                               dev->sso.max_hwgrp;
255 }
256
257 static int
258 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
259 {
260         struct cnxk_sso_evdev *dev = arg;
261
262         if (dev->dual_ws)
263                 hws = hws * CN9K_DUAL_WS_NB_WS;
264
265         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
266 }
267
268 static int
269 cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
270 {
271         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
272         int i;
273
274         if (dev->tx_adptr_data == NULL)
275                 return 0;
276
277         for (i = 0; i < dev->nb_event_ports; i++) {
278                 if (dev->dual_ws) {
279                         struct cn9k_sso_hws_dual *dws =
280                                 event_dev->data->ports[i];
281                         void *ws_cookie;
282
283                         ws_cookie = cnxk_sso_hws_get_cookie(dws);
284                         ws_cookie = rte_realloc_socket(
285                                 ws_cookie,
286                                 sizeof(struct cnxk_sso_hws_cookie) +
287                                         sizeof(struct cn9k_sso_hws_dual) +
288                                         (sizeof(uint64_t) *
289                                          (dev->max_port_id + 1) *
290                                          RTE_MAX_QUEUES_PER_PORT),
291                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
292                         if (ws_cookie == NULL)
293                                 return -ENOMEM;
294                         dws = RTE_PTR_ADD(ws_cookie,
295                                           sizeof(struct cnxk_sso_hws_cookie));
296                         memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
297                                sizeof(uint64_t) * (dev->max_port_id + 1) *
298                                        RTE_MAX_QUEUES_PER_PORT);
299                         event_dev->data->ports[i] = dws;
300                 } else {
301                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
302                         void *ws_cookie;
303
304                         ws_cookie = cnxk_sso_hws_get_cookie(ws);
305                         ws_cookie = rte_realloc_socket(
306                                 ws_cookie,
307                                 sizeof(struct cnxk_sso_hws_cookie) +
308                                         sizeof(struct cn9k_sso_hws_dual) +
309                                         (sizeof(uint64_t) *
310                                          (dev->max_port_id + 1) *
311                                          RTE_MAX_QUEUES_PER_PORT),
312                                 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
313                         if (ws_cookie == NULL)
314                                 return -ENOMEM;
315                         ws = RTE_PTR_ADD(ws_cookie,
316                                          sizeof(struct cnxk_sso_hws_cookie));
317                         memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
318                                sizeof(uint64_t) * (dev->max_port_id + 1) *
319                                        RTE_MAX_QUEUES_PER_PORT);
320                         event_dev->data->ports[i] = ws;
321                 }
322         }
323         rte_mb();
324
325         return 0;
326 }
327
328 static void
329 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
330 {
331         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
332         /* Single WS modes */
333         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
334 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
335         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
336                 NIX_RX_FASTPATH_MODES
337 #undef R
338         };
339
340         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
341 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
342         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
343                 NIX_RX_FASTPATH_MODES
344 #undef R
345         };
346
347         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
348 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
349         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
350                 NIX_RX_FASTPATH_MODES
351 #undef R
352         };
353
354         const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
355 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
356         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
357                 NIX_RX_FASTPATH_MODES
358 #undef R
359         };
360
361         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
362 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
363         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
364                 NIX_RX_FASTPATH_MODES
365 #undef R
366         };
367
368         const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
369 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
370         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
371                 NIX_RX_FASTPATH_MODES
372 #undef R
373         };
374
375         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
376 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
377         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
378                 NIX_RX_FASTPATH_MODES
379 #undef R
380         };
381
382         const event_dequeue_burst_t
383                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
384 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
385         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
386                         NIX_RX_FASTPATH_MODES
387 #undef R
388                 };
389
390         /* Dual WS modes */
391         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
392 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
393         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
394                 NIX_RX_FASTPATH_MODES
395 #undef R
396         };
397
398         const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
399 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
400         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
401                 NIX_RX_FASTPATH_MODES
402 #undef R
403         };
404
405         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
406 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
407         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
408                 NIX_RX_FASTPATH_MODES
409 #undef R
410         };
411
412         const event_dequeue_burst_t
413                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
414 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
415         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
416                         NIX_RX_FASTPATH_MODES
417 #undef R
418                 };
419
420         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
421 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
422         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
423                 NIX_RX_FASTPATH_MODES
424 #undef R
425         };
426
427         const event_dequeue_burst_t
428                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
429 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
430         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
431                         NIX_RX_FASTPATH_MODES
432 #undef R
433                 };
434
435         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
436 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
437         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
438                 NIX_RX_FASTPATH_MODES
439 #undef R
440         };
441
442         const event_dequeue_burst_t
443                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
444 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
445         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
446                         NIX_RX_FASTPATH_MODES
447 #undef R
448                 };
449
450         /* Tx modes */
451         const event_tx_adapter_enqueue
452                 sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
453 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
454         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
455                         NIX_TX_FASTPATH_MODES
456 #undef T
457                 };
458
459         const event_tx_adapter_enqueue
460                 sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
461 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
462         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
463                         NIX_TX_FASTPATH_MODES
464 #undef T
465                 };
466
467         const event_tx_adapter_enqueue
468                 sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
469 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
470         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
471                         NIX_TX_FASTPATH_MODES
472 #undef T
473                 };
474
475         const event_tx_adapter_enqueue
476                 sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
477 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
478         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
479                         NIX_TX_FASTPATH_MODES
480 #undef T
481                 };
482
483         event_dev->enqueue = cn9k_sso_hws_enq;
484         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
485         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
486         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
487         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
488                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq_seg);
489                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
490                                       sso_hws_deq_seg_burst);
491                 if (dev->is_timeout_deq) {
492                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
493                                               sso_hws_deq_tmo_seg);
494                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
495                                               sso_hws_deq_tmo_seg_burst);
496                 }
497         } else {
498                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
499                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
500                                       sso_hws_deq_burst);
501                 if (dev->is_timeout_deq) {
502                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
503                                               sso_hws_deq_tmo);
504                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
505                                               sso_hws_deq_tmo_burst);
506                 }
507         }
508
509         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
510                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
511                                       sso_hws_tx_adptr_enq_seg);
512         else
513                 CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
514                                       sso_hws_tx_adptr_enq);
515
516         if (dev->dual_ws) {
517                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
518                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
519                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
520                 event_dev->enqueue_forward_burst =
521                         cn9k_sso_hws_dual_enq_fwd_burst;
522
523                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
524                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
525                                               sso_hws_dual_deq_seg);
526                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
527                                               sso_hws_dual_deq_seg_burst);
528                         if (dev->is_timeout_deq) {
529                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
530                                                       sso_hws_dual_deq_tmo_seg);
531                                 CN9K_SET_EVDEV_DEQ_OP(
532                                         dev, event_dev->dequeue_burst,
533                                         sso_hws_dual_deq_tmo_seg_burst);
534                         }
535                 } else {
536                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
537                                               sso_hws_dual_deq);
538                         CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
539                                               sso_hws_dual_deq_burst);
540                         if (dev->is_timeout_deq) {
541                                 CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
542                                                       sso_hws_dual_deq_tmo);
543                                 CN9K_SET_EVDEV_DEQ_OP(
544                                         dev, event_dev->dequeue_burst,
545                                         sso_hws_dual_deq_tmo_burst);
546                         }
547                 }
548
549                 if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
550                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
551                                               sso_hws_dual_tx_adptr_enq_seg);
552                 else
553                         CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
554                                               sso_hws_dual_tx_adptr_enq);
555         }
556
557         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
558         rte_mb();
559 }
560
561 static void *
562 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
563 {
564         struct cnxk_sso_evdev *dev = arg;
565         struct cn9k_sso_hws_dual *dws;
566         struct cn9k_sso_hws *ws;
567         void *data;
568
569         if (dev->dual_ws) {
570                 dws = rte_zmalloc("cn9k_dual_ws",
571                                   sizeof(struct cn9k_sso_hws_dual) +
572                                           RTE_CACHE_LINE_SIZE,
573                                   RTE_CACHE_LINE_SIZE);
574                 if (dws == NULL) {
575                         plt_err("Failed to alloc memory for port=%d", port_id);
576                         return NULL;
577                 }
578
579                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
580                 dws->base[0] = roc_sso_hws_base_get(
581                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
582                 dws->base[1] = roc_sso_hws_base_get(
583                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
584                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
585                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
586                 dws->hws_id = port_id;
587                 dws->swtag_req = 0;
588                 dws->vws = 0;
589
590                 data = dws;
591         } else {
592                 /* Allocate event port memory */
593                 ws = rte_zmalloc("cn9k_ws",
594                                  sizeof(struct cn9k_sso_hws) +
595                                          RTE_CACHE_LINE_SIZE,
596                                  RTE_CACHE_LINE_SIZE);
597                 if (ws == NULL) {
598                         plt_err("Failed to alloc memory for port=%d", port_id);
599                         return NULL;
600                 }
601
602                 /* First cache line is reserved for cookie */
603                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
604                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
605                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
606                 ws->hws_id = port_id;
607                 ws->swtag_req = 0;
608
609                 data = ws;
610         }
611
612         return data;
613 }
614
615 static void
616 cn9k_sso_info_get(struct rte_eventdev *event_dev,
617                   struct rte_event_dev_info *dev_info)
618 {
619         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
620
621         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
622         cnxk_sso_info_get(dev, dev_info);
623 }
624
625 static int
626 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
627 {
628         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
629         int rc;
630
631         rc = cnxk_sso_dev_validate(event_dev);
632         if (rc < 0) {
633                 plt_err("Invalid event device configuration");
634                 return -EINVAL;
635         }
636
637         roc_sso_rsrc_fini(&dev->sso);
638
639         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
640         if (rc < 0) {
641                 plt_err("Failed to initialize SSO resources");
642                 return -ENODEV;
643         }
644
645         rc = cnxk_sso_xaq_allocate(dev);
646         if (rc < 0)
647                 goto cnxk_rsrc_fini;
648
649         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
650                                     cn9k_sso_hws_setup);
651         if (rc < 0)
652                 goto cnxk_rsrc_fini;
653
654         /* Restore any prior port-queue mapping. */
655         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
656
657         dev->configured = 1;
658         rte_mb();
659
660         return 0;
661 cnxk_rsrc_fini:
662         roc_sso_rsrc_fini(&dev->sso);
663         dev->nb_event_ports = 0;
664         return rc;
665 }
666
667 static int
668 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
669                     const struct rte_event_port_conf *port_conf)
670 {
671
672         RTE_SET_USED(port_conf);
673         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
674 }
675
676 static void
677 cn9k_sso_port_release(void *port)
678 {
679         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
680         struct cnxk_sso_evdev *dev;
681
682         if (port == NULL)
683                 return;
684
685         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
686         if (!gws_cookie->configured)
687                 goto free;
688
689         cn9k_sso_hws_release(dev, port);
690         memset(gws_cookie, 0, sizeof(*gws_cookie));
691 free:
692         rte_free(gws_cookie);
693 }
694
695 static int
696 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
697                    const uint8_t queues[], const uint8_t priorities[],
698                    uint16_t nb_links)
699 {
700         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
701         uint16_t hwgrp_ids[nb_links];
702         uint16_t link;
703
704         RTE_SET_USED(priorities);
705         for (link = 0; link < nb_links; link++)
706                 hwgrp_ids[link] = queues[link];
707         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
708
709         return (int)nb_links;
710 }
711
712 static int
713 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
714                      uint8_t queues[], uint16_t nb_unlinks)
715 {
716         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
717         uint16_t hwgrp_ids[nb_unlinks];
718         uint16_t unlink;
719
720         for (unlink = 0; unlink < nb_unlinks; unlink++)
721                 hwgrp_ids[unlink] = queues[unlink];
722         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
723
724         return (int)nb_unlinks;
725 }
726
727 static int
728 cn9k_sso_start(struct rte_eventdev *event_dev)
729 {
730         int rc;
731
732         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
733         if (rc < 0)
734                 return rc;
735
736         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
737                             cn9k_sso_hws_flush_events);
738         if (rc < 0)
739                 return rc;
740
741         cn9k_sso_fp_fns_set(event_dev);
742
743         return rc;
744 }
745
746 static void
747 cn9k_sso_stop(struct rte_eventdev *event_dev)
748 {
749         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
750 }
751
752 static int
753 cn9k_sso_close(struct rte_eventdev *event_dev)
754 {
755         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
756 }
757
758 static int
759 cn9k_sso_selftest(void)
760 {
761         return cnxk_sso_selftest(RTE_STR(event_cn9k));
762 }
763
764 static int
765 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
766                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
767 {
768         int rc;
769
770         RTE_SET_USED(event_dev);
771         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
772         if (rc)
773                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
774         else
775                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
776                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
777                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
778
779         return 0;
780 }
781
782 static void
783 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
784                       void *tstmp_info)
785 {
786         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
787         int i;
788
789         for (i = 0; i < dev->nb_event_ports; i++) {
790                 if (dev->dual_ws) {
791                         struct cn9k_sso_hws_dual *dws =
792                                 event_dev->data->ports[i];
793                         dws->lookup_mem = lookup_mem;
794                         dws->tstamp = tstmp_info;
795                 } else {
796                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
797                         ws->lookup_mem = lookup_mem;
798                         ws->tstamp = tstmp_info;
799                 }
800         }
801 }
802
803 static int
804 cn9k_sso_rx_adapter_queue_add(
805         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
806         int32_t rx_queue_id,
807         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
808 {
809         struct cn9k_eth_rxq *rxq;
810         void *lookup_mem;
811         void *tstmp_info;
812         int rc;
813
814         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
815         if (rc)
816                 return -EINVAL;
817
818         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
819                                            queue_conf);
820         if (rc)
821                 return -EINVAL;
822
823         rxq = eth_dev->data->rx_queues[0];
824         lookup_mem = rxq->lookup_mem;
825         tstmp_info = rxq->tstamp;
826         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
827         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
828
829         return 0;
830 }
831
832 static int
833 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
834                               const struct rte_eth_dev *eth_dev,
835                               int32_t rx_queue_id)
836 {
837         int rc;
838
839         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
840         if (rc)
841                 return -EINVAL;
842
843         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
844 }
845
846 static int
847 cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
848                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
849 {
850         int ret;
851
852         RTE_SET_USED(dev);
853         ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
854         if (ret)
855                 *caps = 0;
856         else
857                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
858
859         return 0;
860 }
861
862 static void
863 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
864                        bool ena)
865 {
866         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
867         struct cn9k_eth_txq *txq;
868         struct roc_nix_sq *sq;
869         int i;
870
871         if (tx_queue_id < 0) {
872                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
873                         cn9k_sso_txq_fc_update(eth_dev, i, ena);
874         } else {
875                 uint16_t sq_limit;
876
877                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
878                 txq = eth_dev->data->tx_queues[tx_queue_id];
879                 sq_limit =
880                         ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
881                                     sq->nb_sqb_bufs;
882                 txq->nb_sqb_bufs_adj =
883                         sq_limit -
884                         RTE_ALIGN_MUL_CEIL(sq_limit,
885                                            (1ULL << txq->sqes_per_sqb_log2)) /
886                                 (1ULL << txq->sqes_per_sqb_log2);
887                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
888         }
889 }
890
891 static int
892 cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
893                               const struct rte_eth_dev *eth_dev,
894                               int32_t tx_queue_id)
895 {
896         int rc;
897
898         RTE_SET_USED(id);
899         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
900         if (rc < 0)
901                 return rc;
902         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
903         rc = cn9k_sso_updt_tx_adptr_data(event_dev);
904         if (rc < 0)
905                 return rc;
906         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
907
908         return 0;
909 }
910
911 static int
912 cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
913                               const struct rte_eth_dev *eth_dev,
914                               int32_t tx_queue_id)
915 {
916         int rc;
917
918         RTE_SET_USED(id);
919         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
920         if (rc < 0)
921                 return rc;
922         cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
923         return cn9k_sso_updt_tx_adptr_data(event_dev);
924 }
925
926 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
927         .dev_infos_get = cn9k_sso_info_get,
928         .dev_configure = cn9k_sso_dev_configure,
929         .queue_def_conf = cnxk_sso_queue_def_conf,
930         .queue_setup = cnxk_sso_queue_setup,
931         .queue_release = cnxk_sso_queue_release,
932         .port_def_conf = cnxk_sso_port_def_conf,
933         .port_setup = cn9k_sso_port_setup,
934         .port_release = cn9k_sso_port_release,
935         .port_link = cn9k_sso_port_link,
936         .port_unlink = cn9k_sso_port_unlink,
937         .timeout_ticks = cnxk_sso_timeout_ticks,
938
939         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
940         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
941         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
942         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
943         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
944
945         .eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
946         .eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
947         .eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
948
949         .timer_adapter_caps_get = cnxk_tim_caps_get,
950
951         .dump = cnxk_sso_dump,
952         .dev_start = cn9k_sso_start,
953         .dev_stop = cn9k_sso_stop,
954         .dev_close = cn9k_sso_close,
955         .dev_selftest = cn9k_sso_selftest,
956 };
957
958 static int
959 cn9k_sso_init(struct rte_eventdev *event_dev)
960 {
961         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
962         int rc;
963
964         if (RTE_CACHE_LINE_SIZE != 128) {
965                 plt_err("Driver not compiled for CN9K");
966                 return -EFAULT;
967         }
968
969         rc = roc_plt_init();
970         if (rc < 0) {
971                 plt_err("Failed to initialize platform model");
972                 return rc;
973         }
974
975         event_dev->dev_ops = &cn9k_sso_dev_ops;
976         /* For secondary processes, the primary has done all the work */
977         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
978                 cn9k_sso_fp_fns_set(event_dev);
979                 return 0;
980         }
981
982         rc = cnxk_sso_init(event_dev);
983         if (rc < 0)
984                 return rc;
985
986         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
987         if (!dev->max_event_ports || !dev->max_event_queues) {
988                 plt_err("Not enough eventdev resource queues=%d ports=%d",
989                         dev->max_event_queues, dev->max_event_ports);
990                 cnxk_sso_fini(event_dev);
991                 return -ENODEV;
992         }
993
994         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
995                     event_dev->data->name, dev->max_event_queues,
996                     dev->max_event_ports);
997
998         return 0;
999 }
1000
1001 static int
1002 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1003 {
1004         return rte_event_pmd_pci_probe(
1005                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
1006 }
1007
1008 static const struct rte_pci_id cn9k_pci_sso_map[] = {
1009         {
1010                 .vendor_id = 0,
1011         },
1012 };
1013
1014 static struct rte_pci_driver cn9k_pci_sso = {
1015         .id_table = cn9k_pci_sso_map,
1016         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
1017         .probe = cn9k_sso_probe,
1018         .remove = cnxk_sso_remove,
1019 };
1020
1021 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
1022 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
1023 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
1024 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
1025                               CNXK_SSO_GGRP_QOS "=<string>"
1026                               CNXK_SSO_FORCE_BP "=1"
1027                               CN9K_SSO_SINGLE_WS "=1"
1028                               CNXK_TIM_DISABLE_NPA "=1"
1029                               CNXK_TIM_CHNK_SLOTS "=<int>"
1030                               CNXK_TIM_RINGS_LMT "=<int>"
1031                               CNXK_TIM_STATS_ENA "=1");