event/cnxk: add checks in release operation
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
10         deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
11
12 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
13         enq_op = enq_ops[dev->tx_offloads & (NIX_TX_OFFLOAD_MAX - 1)]
14
15 static uint32_t
16 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
17 {
18         uint32_t wdata = 1;
19
20         if (dev->deq_tmo_ns)
21                 wdata |= BIT(16);
22
23         switch (dev->gw_mode) {
24         case CN10K_GW_MODE_NONE:
25         default:
26                 break;
27         case CN10K_GW_MODE_PREF:
28                 wdata |= BIT(19);
29                 break;
30         case CN10K_GW_MODE_PREF_WFE:
31                 wdata |= BIT(20) | BIT(19);
32                 break;
33         }
34
35         return wdata;
36 }
37
38 static void *
39 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
40 {
41         struct cnxk_sso_evdev *dev = arg;
42         struct cn10k_sso_hws *ws;
43
44         /* Allocate event port memory */
45         ws = rte_zmalloc("cn10k_ws",
46                          sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
47                          RTE_CACHE_LINE_SIZE);
48         if (ws == NULL) {
49                 plt_err("Failed to alloc memory for port=%d", port_id);
50                 return NULL;
51         }
52
53         /* First cache line is reserved for cookie */
54         ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
55         ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
56         ws->hws_id = port_id;
57         ws->swtag_req = 0;
58         ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
59         ws->lmt_base = dev->sso.lmt_base;
60
61         return ws;
62 }
63
64 static int
65 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
66 {
67         struct cnxk_sso_evdev *dev = arg;
68         struct cn10k_sso_hws *ws = port;
69
70         return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
71 }
72
73 static int
74 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
75 {
76         struct cnxk_sso_evdev *dev = arg;
77         struct cn10k_sso_hws *ws = port;
78
79         return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
80 }
81
82 static void
83 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
84 {
85         struct cnxk_sso_evdev *dev = arg;
86         struct cn10k_sso_hws *ws = hws;
87         uint64_t val;
88
89         ws->grp_base = grp_base;
90         ws->fc_mem = (uint64_t *)dev->fc_iova;
91         ws->xaq_lmt = dev->xaq_lmt;
92
93         /* Set get_work timeout for HWS */
94         val = NSEC2USEC(dev->deq_tmo_ns);
95         val = val ? val - 1 : 0;
96         plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
97 }
98
99 static void
100 cn10k_sso_hws_release(void *arg, void *hws)
101 {
102         struct cnxk_sso_evdev *dev = arg;
103         struct cn10k_sso_hws *ws = hws;
104         uint16_t i;
105
106         for (i = 0; i < dev->nb_event_queues; i++)
107                 roc_sso_hws_unlink(&dev->sso, ws->hws_id, &i, 1);
108         memset(ws, 0, sizeof(*ws));
109 }
110
111 static void
112 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
113                            cnxk_handle_event_t fn, void *arg)
114 {
115         struct cn10k_sso_hws *ws = hws;
116         uint64_t cq_ds_cnt = 1;
117         uint64_t aq_cnt = 1;
118         uint64_t ds_cnt = 1;
119         struct rte_event ev;
120         uint64_t val, req;
121
122         plt_write64(0, base + SSO_LF_GGRP_QCTL);
123
124         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
125         req = queue_id;     /* GGRP ID */
126         req |= BIT_ULL(18); /* Grouped */
127         req |= BIT_ULL(16); /* WAIT */
128
129         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
130         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
131         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
132         cq_ds_cnt &= 0x3FFF3FFF0000;
133
134         while (aq_cnt || cq_ds_cnt || ds_cnt) {
135                 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
136                 cn10k_sso_hws_get_work_empty(ws, &ev);
137                 if (fn != NULL && ev.u64 != 0)
138                         fn(arg, ev);
139                 if (ev.sched_type != SSO_TT_EMPTY)
140                         cnxk_sso_hws_swtag_flush(ws->base);
141                 do {
142                         val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
143                 } while (val & BIT_ULL(56));
144                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
145                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
146                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
147                 /* Extract cq and ds count */
148                 cq_ds_cnt &= 0x3FFF3FFF0000;
149         }
150
151         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
152         rte_mb();
153 }
154
155 static void
156 cn10k_sso_hws_reset(void *arg, void *hws)
157 {
158         struct cnxk_sso_evdev *dev = arg;
159         struct cn10k_sso_hws *ws = hws;
160         uintptr_t base = ws->base;
161         uint64_t pend_state;
162         union {
163                 __uint128_t wdata;
164                 uint64_t u64[2];
165         } gw;
166         uint8_t pend_tt;
167
168         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
169         /* Wait till getwork/swtp/waitw/desched completes. */
170         do {
171                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
172         } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
173                                BIT_ULL(56) | BIT_ULL(54)));
174         pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
175         if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
176                 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
177                         cnxk_sso_hws_swtag_untag(base +
178                                                  SSOW_LF_GWS_OP_SWTAG_UNTAG);
179                 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
180         }
181
182         /* Wait for desched to complete. */
183         do {
184                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
185         } while (pend_state & BIT_ULL(58));
186
187         switch (dev->gw_mode) {
188         case CN10K_GW_MODE_PREF:
189                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
190                         ;
191                 break;
192         case CN10K_GW_MODE_PREF_WFE:
193                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
194                        SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
195                         continue;
196                 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
197                 break;
198         case CN10K_GW_MODE_NONE:
199         default:
200                 break;
201         }
202
203         if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
204             SSO_TT_EMPTY) {
205                 plt_write64(BIT_ULL(16) | 1,
206                             ws->base + SSOW_LF_GWS_OP_GET_WORK0);
207                 do {
208                         roc_load_pair(gw.u64[0], gw.u64[1],
209                                       ws->base + SSOW_LF_GWS_WQE0);
210                 } while (gw.u64[0] & BIT_ULL(63));
211                 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
212                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
213                         if (pend_tt == SSO_TT_ATOMIC ||
214                             pend_tt == SSO_TT_ORDERED)
215                                 cnxk_sso_hws_swtag_untag(
216                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
217                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
218                 }
219         }
220
221         plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
222         rte_mb();
223 }
224
225 static void
226 cn10k_sso_set_rsrc(void *arg)
227 {
228         struct cnxk_sso_evdev *dev = arg;
229
230         dev->max_event_ports = dev->sso.max_hws;
231         dev->max_event_queues =
232                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
233                               RTE_EVENT_MAX_QUEUES_PER_DEV :
234                               dev->sso.max_hwgrp;
235 }
236
237 static int
238 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
239 {
240         struct cnxk_sso_evdev *dev = arg;
241
242         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
243 }
244
245 static int
246 cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
247 {
248         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
249         int i;
250
251         if (dev->tx_adptr_data == NULL)
252                 return 0;
253
254         for (i = 0; i < dev->nb_event_ports; i++) {
255                 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
256                 void *ws_cookie;
257
258                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
259                 ws_cookie = rte_realloc_socket(
260                         ws_cookie,
261                         sizeof(struct cnxk_sso_hws_cookie) +
262                                 sizeof(struct cn10k_sso_hws) +
263                                 dev->tx_adptr_data_sz,
264                         RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
265                 if (ws_cookie == NULL)
266                         return -ENOMEM;
267                 ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
268                 memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
269                        dev->tx_adptr_data_sz);
270                 event_dev->data->ports[i] = ws;
271         }
272
273         return 0;
274 }
275
276 static void
277 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
278 {
279         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
280         const event_dequeue_t sso_hws_deq[NIX_RX_OFFLOAD_MAX] = {
281 #define R(name, flags)[flags] = cn10k_sso_hws_deq_##name,
282                 NIX_RX_FASTPATH_MODES
283 #undef R
284         };
285
286         const event_dequeue_burst_t sso_hws_deq_burst[NIX_RX_OFFLOAD_MAX] = {
287 #define R(name, flags)[flags] = cn10k_sso_hws_deq_burst_##name,
288                 NIX_RX_FASTPATH_MODES
289 #undef R
290         };
291
292         const event_dequeue_t sso_hws_deq_tmo[NIX_RX_OFFLOAD_MAX] = {
293 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_##name,
294                 NIX_RX_FASTPATH_MODES
295 #undef R
296         };
297
298         const event_dequeue_burst_t sso_hws_deq_tmo_burst[NIX_RX_OFFLOAD_MAX] = {
299 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_burst_##name,
300                 NIX_RX_FASTPATH_MODES
301 #undef R
302         };
303
304         const event_dequeue_t sso_hws_deq_ca[NIX_RX_OFFLOAD_MAX] = {
305 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_##name,
306                 NIX_RX_FASTPATH_MODES
307 #undef R
308         };
309
310         const event_dequeue_burst_t sso_hws_deq_ca_burst[NIX_RX_OFFLOAD_MAX] = {
311 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_burst_##name,
312                 NIX_RX_FASTPATH_MODES
313 #undef R
314         };
315
316         const event_dequeue_t sso_hws_deq_tmo_ca[NIX_RX_OFFLOAD_MAX] = {
317 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_##name,
318                 NIX_RX_FASTPATH_MODES
319 #undef R
320         };
321
322         const event_dequeue_burst_t sso_hws_deq_tmo_ca_burst[NIX_RX_OFFLOAD_MAX] = {
323 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_burst_##name,
324                 NIX_RX_FASTPATH_MODES
325 #undef R
326         };
327
328         const event_dequeue_t sso_hws_deq_seg[NIX_RX_OFFLOAD_MAX] = {
329 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_##name,
330
331                 NIX_RX_FASTPATH_MODES
332 #undef R
333         };
334
335         const event_dequeue_burst_t sso_hws_deq_seg_burst[NIX_RX_OFFLOAD_MAX] = {
336 #define R(name, flags)[flags] = cn10k_sso_hws_deq_seg_burst_##name,
337                         NIX_RX_FASTPATH_MODES
338 #undef R
339         };
340
341         const event_dequeue_t sso_hws_deq_tmo_seg[NIX_RX_OFFLOAD_MAX] = {
342 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_##name,
343                 NIX_RX_FASTPATH_MODES
344 #undef R
345         };
346
347         const event_dequeue_burst_t sso_hws_deq_tmo_seg_burst[NIX_RX_OFFLOAD_MAX] = {
348 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
349                 NIX_RX_FASTPATH_MODES
350 #undef R
351         };
352
353         const event_dequeue_t sso_hws_deq_ca_seg[NIX_RX_OFFLOAD_MAX] = {
354 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_##name,
355                 NIX_RX_FASTPATH_MODES
356 #undef R
357         };
358
359         const event_dequeue_burst_t sso_hws_deq_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
360 #define R(name, flags)[flags] = cn10k_sso_hws_deq_ca_seg_burst_##name,
361                 NIX_RX_FASTPATH_MODES
362 #undef R
363         };
364
365         const event_dequeue_t sso_hws_deq_tmo_ca_seg[NIX_RX_OFFLOAD_MAX] = {
366 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_##name,
367                 NIX_RX_FASTPATH_MODES
368 #undef R
369         };
370
371         const event_dequeue_burst_t sso_hws_deq_tmo_ca_seg_burst[NIX_RX_OFFLOAD_MAX] = {
372 #define R(name, flags)[flags] = cn10k_sso_hws_deq_tmo_ca_seg_burst_##name,
373                 NIX_RX_FASTPATH_MODES
374 #undef R
375         };
376
377         /* Tx modes */
378         const event_tx_adapter_enqueue_t
379                 sso_hws_tx_adptr_enq[NIX_TX_OFFLOAD_MAX] = {
380 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_##name,
381                         NIX_TX_FASTPATH_MODES
382 #undef T
383                 };
384
385         const event_tx_adapter_enqueue_t
386                 sso_hws_tx_adptr_enq_seg[NIX_TX_OFFLOAD_MAX] = {
387 #define T(name, sz, flags)[flags] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
388                         NIX_TX_FASTPATH_MODES
389 #undef T
390                 };
391
392         event_dev->enqueue = cn10k_sso_hws_enq;
393         event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
394         event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
395         event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
396         if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
397                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
398                                        sso_hws_deq_seg);
399                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
400                                        sso_hws_deq_seg_burst);
401                 if (dev->is_timeout_deq) {
402                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
403                                                sso_hws_deq_tmo_seg);
404                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
405                                                sso_hws_deq_tmo_seg_burst);
406                 }
407                 if (dev->is_ca_internal_port) {
408                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
409                                                sso_hws_deq_ca_seg);
410                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
411                                                sso_hws_deq_ca_seg_burst);
412                 }
413                 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
414                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
415                                                sso_hws_deq_tmo_ca_seg);
416                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
417                                                sso_hws_deq_tmo_ca_seg_burst);
418                 }
419         } else {
420                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
421                 CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
422                                        sso_hws_deq_burst);
423                 if (dev->is_timeout_deq) {
424                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
425                                                sso_hws_deq_tmo);
426                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
427                                                sso_hws_deq_tmo_burst);
428                 }
429                 if (dev->is_ca_internal_port) {
430                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
431                                                sso_hws_deq_ca);
432                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
433                                                sso_hws_deq_ca_burst);
434                 }
435                 if (dev->is_timeout_deq && dev->is_ca_internal_port) {
436                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
437                                                sso_hws_deq_tmo_ca);
438                         CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
439                                                sso_hws_deq_tmo_ca_burst);
440                 }
441         }
442         event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
443
444         if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
445                 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
446                                        sso_hws_tx_adptr_enq_seg);
447         else
448                 CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
449                                        sso_hws_tx_adptr_enq);
450
451         event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
452 }
453
454 static void
455 cn10k_sso_info_get(struct rte_eventdev *event_dev,
456                    struct rte_event_dev_info *dev_info)
457 {
458         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
459
460         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
461         cnxk_sso_info_get(dev, dev_info);
462 }
463
464 static int
465 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
466 {
467         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
468         int rc;
469
470         rc = cnxk_sso_dev_validate(event_dev);
471         if (rc < 0) {
472                 plt_err("Invalid event device configuration");
473                 return -EINVAL;
474         }
475
476         rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
477                                  dev->nb_event_queues);
478         if (rc < 0) {
479                 plt_err("Failed to initialize SSO resources");
480                 return -ENODEV;
481         }
482
483         rc = cnxk_sso_xaq_allocate(dev);
484         if (rc < 0)
485                 goto cnxk_rsrc_fini;
486
487         rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
488                                     cn10k_sso_hws_setup);
489         if (rc < 0)
490                 goto cnxk_rsrc_fini;
491
492         /* Restore any prior port-queue mapping. */
493         cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
494
495         dev->configured = 1;
496         rte_mb();
497
498         return 0;
499 cnxk_rsrc_fini:
500         roc_sso_rsrc_fini(&dev->sso);
501         dev->nb_event_ports = 0;
502         return rc;
503 }
504
505 static int
506 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
507                      const struct rte_event_port_conf *port_conf)
508 {
509
510         RTE_SET_USED(port_conf);
511         return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
512 }
513
514 static void
515 cn10k_sso_port_release(void *port)
516 {
517         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
518         struct cnxk_sso_evdev *dev;
519
520         if (port == NULL)
521                 return;
522
523         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
524         if (!gws_cookie->configured)
525                 goto free;
526
527         cn10k_sso_hws_release(dev, port);
528         memset(gws_cookie, 0, sizeof(*gws_cookie));
529 free:
530         rte_free(gws_cookie);
531 }
532
533 static int
534 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
535                     const uint8_t queues[], const uint8_t priorities[],
536                     uint16_t nb_links)
537 {
538         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
539         uint16_t hwgrp_ids[nb_links];
540         uint16_t link;
541
542         RTE_SET_USED(priorities);
543         for (link = 0; link < nb_links; link++)
544                 hwgrp_ids[link] = queues[link];
545         nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
546
547         return (int)nb_links;
548 }
549
550 static int
551 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
552                       uint8_t queues[], uint16_t nb_unlinks)
553 {
554         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
555         uint16_t hwgrp_ids[nb_unlinks];
556         uint16_t unlink;
557
558         for (unlink = 0; unlink < nb_unlinks; unlink++)
559                 hwgrp_ids[unlink] = queues[unlink];
560         nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
561
562         return (int)nb_unlinks;
563 }
564
565 static int
566 cn10k_sso_start(struct rte_eventdev *event_dev)
567 {
568         int rc;
569
570         rc = cn10k_sso_updt_tx_adptr_data(event_dev);
571         if (rc < 0)
572                 return rc;
573
574         rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
575                             cn10k_sso_hws_flush_events);
576         if (rc < 0)
577                 return rc;
578         cn10k_sso_fp_fns_set(event_dev);
579
580         return rc;
581 }
582
583 static void
584 cn10k_sso_stop(struct rte_eventdev *event_dev)
585 {
586         cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
587                       cn10k_sso_hws_flush_events);
588 }
589
590 static int
591 cn10k_sso_close(struct rte_eventdev *event_dev)
592 {
593         return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
594 }
595
596 static int
597 cn10k_sso_selftest(void)
598 {
599         return cnxk_sso_selftest(RTE_STR(event_cn10k));
600 }
601
602 static int
603 cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
604                               const struct rte_eth_dev *eth_dev, uint32_t *caps)
605 {
606         int rc;
607
608         RTE_SET_USED(event_dev);
609         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
610         if (rc)
611                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
612         else
613                 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
614                         RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
615                         RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
616                         RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
617
618         return 0;
619 }
620
621 static void
622 cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
623                        void *tstmp_info)
624 {
625         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
626         int i;
627
628         for (i = 0; i < dev->nb_event_ports; i++) {
629                 struct cn10k_sso_hws *ws = event_dev->data->ports[i];
630                 ws->lookup_mem = lookup_mem;
631                 ws->tstamp = tstmp_info;
632         }
633 }
634
635 static int
636 cn10k_sso_rx_adapter_queue_add(
637         const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
638         int32_t rx_queue_id,
639         const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
640 {
641         struct cn10k_eth_rxq *rxq;
642         void *lookup_mem;
643         void *tstmp_info;
644         int rc;
645
646         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
647         if (rc)
648                 return -EINVAL;
649
650         rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
651                                            queue_conf);
652         if (rc)
653                 return -EINVAL;
654         rxq = eth_dev->data->rx_queues[0];
655         lookup_mem = rxq->lookup_mem;
656         tstmp_info = rxq->tstamp;
657         cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
658         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
659
660         return 0;
661 }
662
663 static int
664 cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
665                                const struct rte_eth_dev *eth_dev,
666                                int32_t rx_queue_id)
667 {
668         int rc;
669
670         rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
671         if (rc)
672                 return -EINVAL;
673
674         return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
675 }
676
677 static int
678 cn10k_sso_rx_adapter_vector_limits(
679         const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
680         struct rte_event_eth_rx_adapter_vector_limits *limits)
681 {
682         struct cnxk_eth_dev *cnxk_eth_dev;
683         int ret;
684
685         RTE_SET_USED(dev);
686         ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
687         if (ret)
688                 return -ENOTSUP;
689
690         cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
691         limits->log2_sz = true;
692         limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
693         limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
694         limits->min_timeout_ns =
695                 (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
696         limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
697
698         return 0;
699 }
700
701 static int
702 cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
703                               const struct rte_eth_dev *eth_dev, uint32_t *caps)
704 {
705         int ret;
706
707         RTE_SET_USED(dev);
708         ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
709         if (ret)
710                 *caps = 0;
711         else
712                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
713                         RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
714
715         return 0;
716 }
717
718 static void
719 cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
720 {
721         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
722         struct cn10k_eth_txq *txq;
723         struct roc_nix_sq *sq;
724         int i;
725
726         if (tx_queue_id < 0) {
727                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
728                         cn10k_sso_txq_fc_update(eth_dev, i);
729         } else {
730                 uint16_t sqes_per_sqb;
731
732                 sq = &cnxk_eth_dev->sqs[tx_queue_id];
733                 txq = eth_dev->data->tx_queues[tx_queue_id];
734                 sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
735                 sq->nb_sqb_bufs_adj =
736                         sq->nb_sqb_bufs -
737                         RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
738                                 sqes_per_sqb;
739                 if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
740                         sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
741                                                 (sqes_per_sqb - 1));
742                 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
743                 txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
744         }
745 }
746
747 static int
748 cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
749                                const struct rte_eth_dev *eth_dev,
750                                int32_t tx_queue_id)
751 {
752         struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
753         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
754         uint64_t tx_offloads;
755         int rc;
756
757         RTE_SET_USED(id);
758         rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
759         if (rc < 0)
760                 return rc;
761
762         /* Can't enable tstamp if all the ports don't have it enabled. */
763         tx_offloads = cnxk_eth_dev->tx_offload_flags;
764         if (dev->tx_adptr_configured) {
765                 uint8_t tstmp_req = !!(tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
766                 uint8_t tstmp_ena =
767                         !!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F);
768
769                 if (tstmp_ena && !tstmp_req)
770                         dev->tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
771                 else if (!tstmp_ena && tstmp_req)
772                         tx_offloads &= ~(NIX_TX_OFFLOAD_TSTAMP_F);
773         }
774
775         dev->tx_offloads |= tx_offloads;
776         cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
777         rc = cn10k_sso_updt_tx_adptr_data(event_dev);
778         if (rc < 0)
779                 return rc;
780         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
781         dev->tx_adptr_configured = 1;
782
783         return 0;
784 }
785
786 static int
787 cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
788                                const struct rte_eth_dev *eth_dev,
789                                int32_t tx_queue_id)
790 {
791         int rc;
792
793         RTE_SET_USED(id);
794         rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
795         if (rc < 0)
796                 return rc;
797         return cn10k_sso_updt_tx_adptr_data(event_dev);
798 }
799
800 static int
801 cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
802                               const struct rte_cryptodev *cdev, uint32_t *caps)
803 {
804         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
805         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
806
807         *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
808                 RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
809
810         return 0;
811 }
812
813 static int
814 cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
815                             const struct rte_cryptodev *cdev,
816                             int32_t queue_pair_id,
817                             const struct rte_event *event)
818 {
819         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
820
821         RTE_SET_USED(event);
822
823         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
824         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
825
826         dev->is_ca_internal_port = 1;
827         cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
828
829         return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
830 }
831
832 static int
833 cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
834                             const struct rte_cryptodev *cdev,
835                             int32_t queue_pair_id)
836 {
837         CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
838         CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
839
840         return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
841 }
842
843 static struct eventdev_ops cn10k_sso_dev_ops = {
844         .dev_infos_get = cn10k_sso_info_get,
845         .dev_configure = cn10k_sso_dev_configure,
846         .queue_def_conf = cnxk_sso_queue_def_conf,
847         .queue_setup = cnxk_sso_queue_setup,
848         .queue_release = cnxk_sso_queue_release,
849         .port_def_conf = cnxk_sso_port_def_conf,
850         .port_setup = cn10k_sso_port_setup,
851         .port_release = cn10k_sso_port_release,
852         .port_link = cn10k_sso_port_link,
853         .port_unlink = cn10k_sso_port_unlink,
854         .timeout_ticks = cnxk_sso_timeout_ticks,
855
856         .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
857         .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
858         .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
859         .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
860         .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
861
862         .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
863
864         .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
865         .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
866         .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
867
868         .timer_adapter_caps_get = cnxk_tim_caps_get,
869
870         .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
871         .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
872         .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
873
874         .dump = cnxk_sso_dump,
875         .dev_start = cn10k_sso_start,
876         .dev_stop = cn10k_sso_stop,
877         .dev_close = cn10k_sso_close,
878         .dev_selftest = cn10k_sso_selftest,
879 };
880
881 static int
882 cn10k_sso_init(struct rte_eventdev *event_dev)
883 {
884         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
885         int rc;
886
887         if (RTE_CACHE_LINE_SIZE != 64) {
888                 plt_err("Driver not compiled for CN10K");
889                 return -EFAULT;
890         }
891
892         rc = roc_plt_init();
893         if (rc < 0) {
894                 plt_err("Failed to initialize platform model");
895                 return rc;
896         }
897
898         event_dev->dev_ops = &cn10k_sso_dev_ops;
899         /* For secondary processes, the primary has done all the work */
900         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
901                 cn10k_sso_fp_fns_set(event_dev);
902                 return 0;
903         }
904
905         rc = cnxk_sso_init(event_dev);
906         if (rc < 0)
907                 return rc;
908
909         cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
910         if (!dev->max_event_ports || !dev->max_event_queues) {
911                 plt_err("Not enough eventdev resource queues=%d ports=%d",
912                         dev->max_event_queues, dev->max_event_ports);
913                 cnxk_sso_fini(event_dev);
914                 return -ENODEV;
915         }
916
917         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
918                     event_dev->data->name, dev->max_event_queues,
919                     dev->max_event_ports);
920
921         return 0;
922 }
923
924 static int
925 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
926 {
927         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
928                                        sizeof(struct cnxk_sso_evdev),
929                                        cn10k_sso_init);
930 }
931
932 static const struct rte_pci_id cn10k_pci_sso_map[] = {
933         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
934         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
935         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
936         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
937         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
938         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
939         {
940                 .vendor_id = 0,
941         },
942 };
943
944 static struct rte_pci_driver cn10k_pci_sso = {
945         .id_table = cn10k_pci_sso_map,
946         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
947         .probe = cn10k_sso_probe,
948         .remove = cnxk_sso_remove,
949 };
950
951 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
952 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
953 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
954 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
955                               CNXK_SSO_GGRP_QOS "=<string>"
956                               CNXK_SSO_FORCE_BP "=1"
957                               CN10K_SSO_GW_MODE "=<int>"
958                               CNXK_TIM_DISABLE_NPA "=1"
959                               CNXK_TIM_CHNK_SLOTS "=<int>"
960                               CNXK_TIM_RINGS_LMT "=<int>"
961                               CNXK_TIM_STATS_ENA "=1"
962                               CNXK_TIM_EXT_CLK "=<string>");