event/cnxk: support Rx adapter fast path
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 static void
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
14 {
15         ws->tag_op = base + SSOW_LF_GWS_TAG;
16         ws->wqp_op = base + SSOW_LF_GWS_WQP;
17         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
21 }
22
23 static int
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
25 {
26         struct cnxk_sso_evdev *dev = arg;
27         struct cn9k_sso_hws_dual *dws;
28         struct cn9k_sso_hws *ws;
29         int rc;
30
31         if (dev->dual_ws) {
32                 dws = port;
33                 rc = roc_sso_hws_link(&dev->sso,
34                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
35                                       nb_link);
36                 rc |= roc_sso_hws_link(&dev->sso,
37                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
38                                        map, nb_link);
39         } else {
40                 ws = port;
41                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
42         }
43
44         return rc;
45 }
46
47 static int
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
49 {
50         struct cnxk_sso_evdev *dev = arg;
51         struct cn9k_sso_hws_dual *dws;
52         struct cn9k_sso_hws *ws;
53         int rc;
54
55         if (dev->dual_ws) {
56                 dws = port;
57                 rc = roc_sso_hws_unlink(&dev->sso,
58                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
59                                         map, nb_link);
60                 rc |= roc_sso_hws_unlink(&dev->sso,
61                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
62                                          map, nb_link);
63         } else {
64                 ws = port;
65                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
66         }
67
68         return rc;
69 }
70
71 static void
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn9k_sso_hws_dual *dws;
76         struct cn9k_sso_hws *ws;
77         uint64_t val;
78
79         /* Set get_work tmo for HWS */
80         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
81         if (dev->dual_ws) {
82                 dws = hws;
83                 rte_memcpy(dws->grps_base, grps_base,
84                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85                 dws->fc_mem = dev->fc_mem;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 rte_memcpy(ws->grps_base, grps_base,
93                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94                 ws->fc_mem = dev->fc_mem;
95                 ws->xaq_lmt = dev->xaq_lmt;
96
97                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
98         }
99 }
100
101 static void
102 cn9k_sso_hws_release(void *arg, void *hws)
103 {
104         struct cnxk_sso_evdev *dev = arg;
105         struct cn9k_sso_hws_dual *dws;
106         struct cn9k_sso_hws *ws;
107         int i;
108
109         if (dev->dual_ws) {
110                 dws = hws;
111                 for (i = 0; i < dev->nb_event_queues; i++) {
112                         roc_sso_hws_unlink(&dev->sso,
113                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114                                            (uint16_t *)&i, 1);
115                         roc_sso_hws_unlink(&dev->sso,
116                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
117                                            (uint16_t *)&i, 1);
118                 }
119                 memset(dws, 0, sizeof(*dws));
120         } else {
121                 ws = hws;
122                 for (i = 0; i < dev->nb_event_queues; i++)
123                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124                                            (uint16_t *)&i, 1);
125                 memset(ws, 0, sizeof(*ws));
126         }
127 }
128
129 static void
130 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
131                           cnxk_handle_event_t fn, void *arg)
132 {
133         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
134         struct cn9k_sso_hws_dual *dws;
135         struct cn9k_sso_hws_state *st;
136         struct cn9k_sso_hws *ws;
137         uint64_t cq_ds_cnt = 1;
138         uint64_t aq_cnt = 1;
139         uint64_t ds_cnt = 1;
140         struct rte_event ev;
141         uintptr_t ws_base;
142         uint64_t val, req;
143
144         plt_write64(0, base + SSO_LF_GGRP_QCTL);
145
146         req = queue_id;     /* GGRP ID */
147         req |= BIT_ULL(18); /* Grouped */
148         req |= BIT_ULL(16); /* WAIT */
149
150         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
151         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
152         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
153         cq_ds_cnt &= 0x3FFF3FFF0000;
154
155         if (dev->dual_ws) {
156                 dws = hws;
157                 st = &dws->ws_state[0];
158                 ws_base = dws->base[0];
159         } else {
160                 ws = hws;
161                 st = (struct cn9k_sso_hws_state *)ws;
162                 ws_base = ws->base;
163         }
164
165         while (aq_cnt || cq_ds_cnt || ds_cnt) {
166                 plt_write64(req, st->getwrk_op);
167                 cn9k_sso_hws_get_work_empty(st, &ev);
168                 if (fn != NULL && ev.u64 != 0)
169                         fn(arg, ev);
170                 if (ev.sched_type != SSO_TT_EMPTY)
171                         cnxk_sso_hws_swtag_flush(st->tag_op,
172                                                  st->swtag_flush_op);
173                 do {
174                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
175                 } while (val & BIT_ULL(56));
176                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
177                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
178                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
179                 /* Extract cq and ds count */
180                 cq_ds_cnt &= 0x3FFF3FFF0000;
181         }
182
183         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
184 }
185
186 static void
187 cn9k_sso_hws_reset(void *arg, void *hws)
188 {
189         struct cnxk_sso_evdev *dev = arg;
190         struct cn9k_sso_hws_dual *dws;
191         struct cn9k_sso_hws *ws;
192         uint64_t pend_state;
193         uint8_t pend_tt;
194         uintptr_t base;
195         uint64_t tag;
196         uint8_t i;
197
198         dws = hws;
199         ws = hws;
200         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
201                 base = dev->dual_ws ? dws->base[i] : ws->base;
202                 /* Wait till getwork/swtp/waitw/desched completes. */
203                 do {
204                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
205                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
206                                        BIT_ULL(56)));
207
208                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
209                 pend_tt = (tag >> 32) & 0x3;
210                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
211                         if (pend_tt == SSO_TT_ATOMIC ||
212                             pend_tt == SSO_TT_ORDERED)
213                                 cnxk_sso_hws_swtag_untag(
214                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
215                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
216                 }
217
218                 /* Wait for desched to complete. */
219                 do {
220                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
221                 } while (pend_state & BIT_ULL(58));
222         }
223 }
224
225 void
226 cn9k_sso_set_rsrc(void *arg)
227 {
228         struct cnxk_sso_evdev *dev = arg;
229
230         if (dev->dual_ws)
231                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
232         else
233                 dev->max_event_ports = dev->sso.max_hws;
234         dev->max_event_queues =
235                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
236                               RTE_EVENT_MAX_QUEUES_PER_DEV :
237                               dev->sso.max_hwgrp;
238 }
239
240 static int
241 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
242 {
243         struct cnxk_sso_evdev *dev = arg;
244
245         if (dev->dual_ws)
246                 hws = hws * CN9K_DUAL_WS_NB_WS;
247
248         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
249 }
250
251 static void
252 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
253 {
254         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
255         /* Single WS modes */
256         const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
257 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
258         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
259                 NIX_RX_FASTPATH_MODES
260 #undef R
261         };
262
263         const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
264 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
265         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
266                 NIX_RX_FASTPATH_MODES
267 #undef R
268         };
269
270         const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
271 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
272         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
273                 NIX_RX_FASTPATH_MODES
274 #undef R
275         };
276
277         const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
278 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
279         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
280                 NIX_RX_FASTPATH_MODES
281 #undef R
282         };
283
284         const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
285 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
286         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
287                 NIX_RX_FASTPATH_MODES
288 #undef R
289         };
290
291         const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
292 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
293         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
294                 NIX_RX_FASTPATH_MODES
295 #undef R
296         };
297
298         const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
299 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
300         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
301                 NIX_RX_FASTPATH_MODES
302 #undef R
303         };
304
305         const event_dequeue_burst_t
306                 sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
307 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
308         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
309                         NIX_RX_FASTPATH_MODES
310 #undef R
311                 };
312
313         /* Dual WS modes */
314         const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
315 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
316         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
317                 NIX_RX_FASTPATH_MODES
318 #undef R
319         };
320
321         const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
322 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
323         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
324                 NIX_RX_FASTPATH_MODES
325 #undef R
326         };
327
328         const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
329 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
330         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
331                 NIX_RX_FASTPATH_MODES
332 #undef R
333         };
334
335         const event_dequeue_burst_t
336                 sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
337 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
338         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
339                         NIX_RX_FASTPATH_MODES
340 #undef R
341                 };
342
343         const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
344 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
345         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
346                 NIX_RX_FASTPATH_MODES
347 #undef R
348         };
349
350         const event_dequeue_burst_t
351                 sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
352 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
353         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
354                         NIX_RX_FASTPATH_MODES
355 #undef R
356                 };
357
358         const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
359 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
360         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
361                 NIX_RX_FASTPATH_MODES
362 #undef R
363         };
364
365         const event_dequeue_burst_t
366                 sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
367 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
368         [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
369                         NIX_RX_FASTPATH_MODES
370 #undef R
371                 };
372
373         event_dev->enqueue = cn9k_sso_hws_enq;
374         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
375         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
376         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
377         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
378                 event_dev->dequeue = sso_hws_deq_seg
379                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
380                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
381                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
382                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
383                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
384                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
385                 event_dev->dequeue_burst = sso_hws_deq_seg_burst
386                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
387                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
388                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
389                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
390                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
391                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
392                 if (dev->is_timeout_deq) {
393                         event_dev->dequeue = sso_hws_deq_tmo_seg
394                                 [!!(dev->rx_offloads &
395                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
396                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
397                                 [!!(dev->rx_offloads &
398                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
399                                 [!!(dev->rx_offloads &
400                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
401                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
402                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
403                         event_dev->dequeue_burst = sso_hws_deq_tmo_seg_burst
404                                 [!!(dev->rx_offloads &
405                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
406                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
407                                 [!!(dev->rx_offloads &
408                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
409                                 [!!(dev->rx_offloads &
410                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
411                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
412                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
413                 }
414         } else {
415                 event_dev->dequeue = sso_hws_deq
416                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
417                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
418                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
419                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
420                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
421                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
422                 event_dev->dequeue_burst = sso_hws_deq_burst
423                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
424                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
425                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
426                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
427                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
428                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
429                 if (dev->is_timeout_deq) {
430                         event_dev->dequeue = sso_hws_deq_tmo
431                                 [!!(dev->rx_offloads &
432                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
433                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
434                                 [!!(dev->rx_offloads &
435                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
436                                 [!!(dev->rx_offloads &
437                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
438                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
439                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
440                         event_dev->dequeue_burst = sso_hws_deq_tmo_burst
441                                 [!!(dev->rx_offloads &
442                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
443                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
444                                 [!!(dev->rx_offloads &
445                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
446                                 [!!(dev->rx_offloads &
447                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
448                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
449                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
450                 }
451         }
452
453         if (dev->dual_ws) {
454                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
455                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
456                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
457                 event_dev->enqueue_forward_burst =
458                         cn9k_sso_hws_dual_enq_fwd_burst;
459
460                 if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
461                         event_dev->dequeue = sso_hws_dual_deq_seg
462                                 [!!(dev->rx_offloads &
463                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
464                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
465                                 [!!(dev->rx_offloads &
466                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
467                                 [!!(dev->rx_offloads &
468                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
469                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
470                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
471                         event_dev->dequeue_burst = sso_hws_dual_deq_seg_burst
472                                 [!!(dev->rx_offloads &
473                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
474                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
475                                 [!!(dev->rx_offloads &
476                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
477                                 [!!(dev->rx_offloads &
478                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
479                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
480                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
481                         if (dev->is_timeout_deq) {
482                                 event_dev->dequeue = sso_hws_dual_deq_tmo_seg
483                                         [!!(dev->rx_offloads &
484                                             NIX_RX_OFFLOAD_VLAN_STRIP_F)]
485                                         [!!(dev->rx_offloads &
486                                             NIX_RX_OFFLOAD_TSTAMP_F)]
487                                         [!!(dev->rx_offloads &
488                                             NIX_RX_OFFLOAD_MARK_UPDATE_F)]
489                                         [!!(dev->rx_offloads &
490                                             NIX_RX_OFFLOAD_CHECKSUM_F)]
491                                         [!!(dev->rx_offloads &
492                                             NIX_RX_OFFLOAD_PTYPE_F)]
493                                         [!!(dev->rx_offloads &
494                                             NIX_RX_OFFLOAD_RSS_F)];
495                                 event_dev->dequeue_burst =
496                                         sso_hws_dual_deq_tmo_seg_burst
497                                                 [!!(dev->rx_offloads &
498                                                   NIX_RX_OFFLOAD_VLAN_STRIP_F)]
499                                                 [!!(dev->rx_offloads &
500                                                     NIX_RX_OFFLOAD_TSTAMP_F)]
501                                                 [!!(dev->rx_offloads &
502                                                   NIX_RX_OFFLOAD_MARK_UPDATE_F)]
503                                                 [!!(dev->rx_offloads &
504                                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
505                                                 [!!(dev->rx_offloads &
506                                                     NIX_RX_OFFLOAD_PTYPE_F)]
507                                                 [!!(dev->rx_offloads &
508                                                     NIX_RX_OFFLOAD_RSS_F)];
509                         }
510                 } else {
511                         event_dev->dequeue = sso_hws_dual_deq
512                                 [!!(dev->rx_offloads &
513                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
514                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
515                                 [!!(dev->rx_offloads &
516                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
517                                 [!!(dev->rx_offloads &
518                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
519                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
520                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
521                         event_dev->dequeue_burst = sso_hws_dual_deq_burst
522                                 [!!(dev->rx_offloads &
523                                     NIX_RX_OFFLOAD_VLAN_STRIP_F)]
524                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
525                                 [!!(dev->rx_offloads &
526                                     NIX_RX_OFFLOAD_MARK_UPDATE_F)]
527                                 [!!(dev->rx_offloads &
528                                     NIX_RX_OFFLOAD_CHECKSUM_F)]
529                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
530                                 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
531                         if (dev->is_timeout_deq) {
532                                 event_dev->dequeue = sso_hws_dual_deq_tmo
533                                         [!!(dev->rx_offloads &
534                                             NIX_RX_OFFLOAD_VLAN_STRIP_F)]
535                                         [!!(dev->rx_offloads &
536                                             NIX_RX_OFFLOAD_TSTAMP_F)]
537                                         [!!(dev->rx_offloads &
538                                             NIX_RX_OFFLOAD_MARK_UPDATE_F)]
539                                         [!!(dev->rx_offloads &
540                                             NIX_RX_OFFLOAD_CHECKSUM_F)]
541                                         [!!(dev->rx_offloads &
542                                             NIX_RX_OFFLOAD_PTYPE_F)]
543                                         [!!(dev->rx_offloads &
544                                             NIX_RX_OFFLOAD_RSS_F)];
545                                 event_dev->dequeue_burst =
546                                         sso_hws_dual_deq_tmo_burst
547                                                 [!!(dev->rx_offloads &
548                                                   NIX_RX_OFFLOAD_VLAN_STRIP_F)]
549                                                 [!!(dev->rx_offloads &
550                                                   NIX_RX_OFFLOAD_TSTAMP_F)]
551                                                 [!!(dev->rx_offloads &
552                                                   NIX_RX_OFFLOAD_MARK_UPDATE_F)]
553                                                 [!!(dev->rx_offloads &
554                                                   NIX_RX_OFFLOAD_CHECKSUM_F)]
555                                                 [!!(dev->rx_offloads &
556                                                   NIX_RX_OFFLOAD_PTYPE_F)]
557                                                 [!!(dev->rx_offloads &
558                                                   NIX_RX_OFFLOAD_RSS_F)];
559                         }
560                 }
561         }
562
563         rte_mb();
564 }
565
566 static void *
567 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
568 {
569         struct cnxk_sso_evdev *dev = arg;
570         struct cn9k_sso_hws_dual *dws;
571         struct cn9k_sso_hws *ws;
572         void *data;
573
574         if (dev->dual_ws) {
575                 dws = rte_zmalloc("cn9k_dual_ws",
576                                   sizeof(struct cn9k_sso_hws_dual) +
577                                           RTE_CACHE_LINE_SIZE,
578                                   RTE_CACHE_LINE_SIZE);
579                 if (dws == NULL) {
580                         plt_err("Failed to alloc memory for port=%d", port_id);
581                         return NULL;
582                 }
583
584                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
585                 dws->base[0] = roc_sso_hws_base_get(
586                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
587                 dws->base[1] = roc_sso_hws_base_get(
588                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
589                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
590                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
591                 dws->hws_id = port_id;
592                 dws->swtag_req = 0;
593                 dws->vws = 0;
594
595                 data = dws;
596         } else {
597                 /* Allocate event port memory */
598                 ws = rte_zmalloc("cn9k_ws",
599                                  sizeof(struct cn9k_sso_hws) +
600                                          RTE_CACHE_LINE_SIZE,
601                                  RTE_CACHE_LINE_SIZE);
602                 if (ws == NULL) {
603                         plt_err("Failed to alloc memory for port=%d", port_id);
604                         return NULL;
605                 }
606
607                 /* First cache line is reserved for cookie */
608                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
609                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
610                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
611                 ws->hws_id = port_id;
612                 ws->swtag_req = 0;
613
614                 data = ws;
615         }
616
617         return data;
618 }
619
620 static void
621 cn9k_sso_info_get(struct rte_eventdev *event_dev,
622                   struct rte_event_dev_info *dev_info)
623 {
624         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
625
626         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
627         cnxk_sso_info_get(dev, dev_info);
628 }
629
630 static int
631 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
632 {
633         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
634         int rc;
635
636         rc = cnxk_sso_dev_validate(event_dev);
637         if (rc < 0) {
638                 plt_err("Invalid event device configuration");
639                 return -EINVAL;
640         }
641
642         roc_sso_rsrc_fini(&dev->sso);
643
644         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
645         if (rc < 0) {
646                 plt_err("Failed to initialize SSO resources");
647                 return -ENODEV;
648         }
649
650         rc = cnxk_sso_xaq_allocate(dev);
651         if (rc < 0)
652                 goto cnxk_rsrc_fini;
653
654         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
655                                     cn9k_sso_hws_setup);
656         if (rc < 0)
657                 goto cnxk_rsrc_fini;
658
659         /* Restore any prior port-queue mapping. */
660         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
661
662         dev->configured = 1;
663         rte_mb();
664
665         return 0;
666 cnxk_rsrc_fini:
667         roc_sso_rsrc_fini(&dev->sso);
668         dev->nb_event_ports = 0;
669         return rc;
670 }
671
672 static int
673 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
674                     const struct rte_event_port_conf *port_conf)
675 {
676
677         RTE_SET_USED(port_conf);
678         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
679 }
680
681 static void
682 cn9k_sso_port_release(void *port)
683 {
684         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
685         struct cnxk_sso_evdev *dev;
686
687         if (port == NULL)
688                 return;
689
690         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
691         if (!gws_cookie->configured)
692                 goto free;
693
694         cn9k_sso_hws_release(dev, port);
695         memset(gws_cookie, 0, sizeof(*gws_cookie));
696 free:
697         rte_free(gws_cookie);
698 }
699
700 static int
701 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
702                    const uint8_t queues[], const uint8_t priorities[],
703                    uint16_t nb_links)
704 {
705         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
706         uint16_t hwgrp_ids[nb_links];
707         uint16_t link;
708
709         RTE_SET_USED(priorities);
710         for (link = 0; link < nb_links; link++)
711                 hwgrp_ids[link] = queues[link];
712         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
713
714         return (int)nb_links;
715 }
716
717 static int
718 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
719                      uint8_t queues[], uint16_t nb_unlinks)
720 {
721         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
722         uint16_t hwgrp_ids[nb_unlinks];
723         uint16_t unlink;
724
725         for (unlink = 0; unlink < nb_unlinks; unlink++)
726                 hwgrp_ids[unlink] = queues[unlink];
727         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
728
729         return (int)nb_unlinks;
730 }
731
732 static int
733 cn9k_sso_start(struct rte_eventdev *event_dev)
734 {
735         int rc;
736
737         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
738                             cn9k_sso_hws_flush_events);
739         if (rc < 0)
740                 return rc;
741
742         cn9k_sso_fp_fns_set(event_dev);
743
744         return rc;
745 }
746
747 static void
748 cn9k_sso_stop(struct rte_eventdev *event_dev)
749 {
750         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
751 }
752
753 static int
754 cn9k_sso_close(struct rte_eventdev *event_dev)
755 {
756         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
757 }
758
759 static int
760 cn9k_sso_selftest(void)
761 {
762         return cnxk_sso_selftest(RTE_STR(event_cn9k));
763 }
764
765 static int
766 cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
767                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
768 {
769         int rc;
770
771         RTE_SET_USED(event_dev);
772         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
773         if (rc)
774                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
775         else
776                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
777                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
778                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
779
780         return 0;
781 }
782
783 static void
784 cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
785                       void *tstmp_info)
786 {
787         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
788         int i;
789
790         for (i = 0; i < dev->nb_event_ports; i++) {
791                 if (dev->dual_ws) {
792                         struct cn9k_sso_hws_dual *dws =
793                                 event_dev->data->ports[i];
794                         dws->lookup_mem = lookup_mem;
795                         dws->tstamp = tstmp_info;
796                 } else {
797                         struct cn9k_sso_hws *ws = event_dev->data->ports[i];
798                         ws->lookup_mem = lookup_mem;
799                         ws->tstamp = tstmp_info;
800                 }
801         }
802 }
803
804 static int
805 cn9k_sso_rx_adapter_queue_add(
806         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
807         int32_t rx_queue_id,
808         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
809 {
810         struct cn9k_eth_rxq *rxq;
811         void *lookup_mem;
812         void *tstmp_info;
813         int rc;
814
815         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
816         if (rc)
817                 return -EINVAL;
818
819         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
820                                            queue_conf);
821         if (rc)
822                 return -EINVAL;
823
824         rxq = eth_dev->data->rx_queues[0];
825         lookup_mem = rxq->lookup_mem;
826         tstmp_info = rxq->tstamp;
827         cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
828         cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
829
830         return 0;
831 }
832
833 static int
834 cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
835                               const struct rte_eth_dev *eth_dev,
836                               int32_t rx_queue_id)
837 {
838         int rc;
839
840         rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
841         if (rc)
842                 return -EINVAL;
843
844         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
845 }
846
847 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
848         .dev_infos_get = cn9k_sso_info_get,
849         .dev_configure = cn9k_sso_dev_configure,
850         .queue_def_conf = cnxk_sso_queue_def_conf,
851         .queue_setup = cnxk_sso_queue_setup,
852         .queue_release = cnxk_sso_queue_release,
853         .port_def_conf = cnxk_sso_port_def_conf,
854         .port_setup = cn9k_sso_port_setup,
855         .port_release = cn9k_sso_port_release,
856         .port_link = cn9k_sso_port_link,
857         .port_unlink = cn9k_sso_port_unlink,
858         .timeout_ticks = cnxk_sso_timeout_ticks,
859
860         .eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
861         .eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
862         .eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
863         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
864         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
865
866         .timer_adapter_caps_get = cnxk_tim_caps_get,
867
868         .dump = cnxk_sso_dump,
869         .dev_start = cn9k_sso_start,
870         .dev_stop = cn9k_sso_stop,
871         .dev_close = cn9k_sso_close,
872         .dev_selftest = cn9k_sso_selftest,
873 };
874
875 static int
876 cn9k_sso_init(struct rte_eventdev *event_dev)
877 {
878         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
879         int rc;
880
881         if (RTE_CACHE_LINE_SIZE != 128) {
882                 plt_err("Driver not compiled for CN9K");
883                 return -EFAULT;
884         }
885
886         rc = roc_plt_init();
887         if (rc < 0) {
888                 plt_err("Failed to initialize platform model");
889                 return rc;
890         }
891
892         event_dev->dev_ops = &cn9k_sso_dev_ops;
893         /* For secondary processes, the primary has done all the work */
894         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
895                 cn9k_sso_fp_fns_set(event_dev);
896                 return 0;
897         }
898
899         rc = cnxk_sso_init(event_dev);
900         if (rc < 0)
901                 return rc;
902
903         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
904         if (!dev->max_event_ports || !dev->max_event_queues) {
905                 plt_err("Not enough eventdev resource queues=%d ports=%d",
906                         dev->max_event_queues, dev->max_event_ports);
907                 cnxk_sso_fini(event_dev);
908                 return -ENODEV;
909         }
910
911         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
912                     event_dev->data->name, dev->max_event_queues,
913                     dev->max_event_ports);
914
915         return 0;
916 }
917
918 static int
919 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
920 {
921         return rte_event_pmd_pci_probe(
922                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
923 }
924
925 static const struct rte_pci_id cn9k_pci_sso_map[] = {
926         {
927                 .vendor_id = 0,
928         },
929 };
930
931 static struct rte_pci_driver cn9k_pci_sso = {
932         .id_table = cn9k_pci_sso_map,
933         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
934         .probe = cn9k_sso_probe,
935         .remove = cnxk_sso_remove,
936 };
937
938 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
939 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
940 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
941 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
942                               CNXK_SSO_GGRP_QOS "=<string>"
943                               CNXK_SSO_FORCE_BP "=1"
944                               CN9K_SSO_SINGLE_WS "=1"
945                               CNXK_TIM_DISABLE_NPA "=1"
946                               CNXK_TIM_CHNK_SLOTS "=<int>"
947                               CNXK_TIM_RINGS_LMT "=<int>"
948                               CNXK_TIM_STATS_ENA "=1");