event/cnxk: add timer stats
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 static void
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
14 {
15         ws->tag_op = base + SSOW_LF_GWS_TAG;
16         ws->wqp_op = base + SSOW_LF_GWS_WQP;
17         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
21 }
22
23 static int
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
25 {
26         struct cnxk_sso_evdev *dev = arg;
27         struct cn9k_sso_hws_dual *dws;
28         struct cn9k_sso_hws *ws;
29         int rc;
30
31         if (dev->dual_ws) {
32                 dws = port;
33                 rc = roc_sso_hws_link(&dev->sso,
34                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
35                                       nb_link);
36                 rc |= roc_sso_hws_link(&dev->sso,
37                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
38                                        map, nb_link);
39         } else {
40                 ws = port;
41                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
42         }
43
44         return rc;
45 }
46
47 static int
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
49 {
50         struct cnxk_sso_evdev *dev = arg;
51         struct cn9k_sso_hws_dual *dws;
52         struct cn9k_sso_hws *ws;
53         int rc;
54
55         if (dev->dual_ws) {
56                 dws = port;
57                 rc = roc_sso_hws_unlink(&dev->sso,
58                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
59                                         map, nb_link);
60                 rc |= roc_sso_hws_unlink(&dev->sso,
61                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
62                                          map, nb_link);
63         } else {
64                 ws = port;
65                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
66         }
67
68         return rc;
69 }
70
71 static void
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn9k_sso_hws_dual *dws;
76         struct cn9k_sso_hws *ws;
77         uint64_t val;
78
79         /* Set get_work tmo for HWS */
80         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
81         if (dev->dual_ws) {
82                 dws = hws;
83                 rte_memcpy(dws->grps_base, grps_base,
84                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85                 dws->fc_mem = dev->fc_mem;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 rte_memcpy(ws->grps_base, grps_base,
93                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94                 ws->fc_mem = dev->fc_mem;
95                 ws->xaq_lmt = dev->xaq_lmt;
96
97                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
98         }
99 }
100
101 static void
102 cn9k_sso_hws_release(void *arg, void *hws)
103 {
104         struct cnxk_sso_evdev *dev = arg;
105         struct cn9k_sso_hws_dual *dws;
106         struct cn9k_sso_hws *ws;
107         int i;
108
109         if (dev->dual_ws) {
110                 dws = hws;
111                 for (i = 0; i < dev->nb_event_queues; i++) {
112                         roc_sso_hws_unlink(&dev->sso,
113                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114                                            (uint16_t *)&i, 1);
115                         roc_sso_hws_unlink(&dev->sso,
116                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
117                                            (uint16_t *)&i, 1);
118                 }
119                 memset(dws, 0, sizeof(*dws));
120         } else {
121                 ws = hws;
122                 for (i = 0; i < dev->nb_event_queues; i++)
123                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124                                            (uint16_t *)&i, 1);
125                 memset(ws, 0, sizeof(*ws));
126         }
127 }
128
129 static void
130 cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
131                           cnxk_handle_event_t fn, void *arg)
132 {
133         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
134         struct cn9k_sso_hws_dual *dws;
135         struct cn9k_sso_hws_state *st;
136         struct cn9k_sso_hws *ws;
137         uint64_t cq_ds_cnt = 1;
138         uint64_t aq_cnt = 1;
139         uint64_t ds_cnt = 1;
140         struct rte_event ev;
141         uintptr_t ws_base;
142         uint64_t val, req;
143
144         plt_write64(0, base + SSO_LF_GGRP_QCTL);
145
146         req = queue_id;     /* GGRP ID */
147         req |= BIT_ULL(18); /* Grouped */
148         req |= BIT_ULL(16); /* WAIT */
149
150         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
151         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
152         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
153         cq_ds_cnt &= 0x3FFF3FFF0000;
154
155         if (dev->dual_ws) {
156                 dws = hws;
157                 st = &dws->ws_state[0];
158                 ws_base = dws->base[0];
159         } else {
160                 ws = hws;
161                 st = (struct cn9k_sso_hws_state *)ws;
162                 ws_base = ws->base;
163         }
164
165         while (aq_cnt || cq_ds_cnt || ds_cnt) {
166                 plt_write64(req, st->getwrk_op);
167                 cn9k_sso_hws_get_work_empty(st, &ev);
168                 if (fn != NULL && ev.u64 != 0)
169                         fn(arg, ev);
170                 if (ev.sched_type != SSO_TT_EMPTY)
171                         cnxk_sso_hws_swtag_flush(st->tag_op,
172                                                  st->swtag_flush_op);
173                 do {
174                         val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
175                 } while (val & BIT_ULL(56));
176                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
177                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
178                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
179                 /* Extract cq and ds count */
180                 cq_ds_cnt &= 0x3FFF3FFF0000;
181         }
182
183         plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
184 }
185
186 static void
187 cn9k_sso_hws_reset(void *arg, void *hws)
188 {
189         struct cnxk_sso_evdev *dev = arg;
190         struct cn9k_sso_hws_dual *dws;
191         struct cn9k_sso_hws *ws;
192         uint64_t pend_state;
193         uint8_t pend_tt;
194         uintptr_t base;
195         uint64_t tag;
196         uint8_t i;
197
198         dws = hws;
199         ws = hws;
200         for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
201                 base = dev->dual_ws ? dws->base[i] : ws->base;
202                 /* Wait till getwork/swtp/waitw/desched completes. */
203                 do {
204                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
205                 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
206                                        BIT_ULL(56)));
207
208                 tag = plt_read64(base + SSOW_LF_GWS_TAG);
209                 pend_tt = (tag >> 32) & 0x3;
210                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
211                         if (pend_tt == SSO_TT_ATOMIC ||
212                             pend_tt == SSO_TT_ORDERED)
213                                 cnxk_sso_hws_swtag_untag(
214                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
215                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
216                 }
217
218                 /* Wait for desched to complete. */
219                 do {
220                         pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
221                 } while (pend_state & BIT_ULL(58));
222         }
223 }
224
225 void
226 cn9k_sso_set_rsrc(void *arg)
227 {
228         struct cnxk_sso_evdev *dev = arg;
229
230         if (dev->dual_ws)
231                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
232         else
233                 dev->max_event_ports = dev->sso.max_hws;
234         dev->max_event_queues =
235                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
236                               RTE_EVENT_MAX_QUEUES_PER_DEV :
237                               dev->sso.max_hwgrp;
238 }
239
240 static int
241 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
242 {
243         struct cnxk_sso_evdev *dev = arg;
244
245         if (dev->dual_ws)
246                 hws = hws * CN9K_DUAL_WS_NB_WS;
247
248         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
249 }
250
251 static void
252 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
253 {
254         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
255
256         event_dev->enqueue = cn9k_sso_hws_enq;
257         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
258         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
259         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
260
261         event_dev->dequeue = cn9k_sso_hws_deq;
262         event_dev->dequeue_burst = cn9k_sso_hws_deq_burst;
263         if (dev->deq_tmo_ns) {
264                 event_dev->dequeue = cn9k_sso_hws_tmo_deq;
265                 event_dev->dequeue_burst = cn9k_sso_hws_tmo_deq_burst;
266         }
267
268         if (dev->dual_ws) {
269                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
270                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
271                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
272                 event_dev->enqueue_forward_burst =
273                         cn9k_sso_hws_dual_enq_fwd_burst;
274
275                 event_dev->dequeue = cn9k_sso_hws_dual_deq;
276                 event_dev->dequeue_burst = cn9k_sso_hws_dual_deq_burst;
277                 if (dev->deq_tmo_ns) {
278                         event_dev->dequeue = cn9k_sso_hws_dual_tmo_deq;
279                         event_dev->dequeue_burst =
280                                 cn9k_sso_hws_dual_tmo_deq_burst;
281                 }
282         }
283 }
284
285 static void *
286 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
287 {
288         struct cnxk_sso_evdev *dev = arg;
289         struct cn9k_sso_hws_dual *dws;
290         struct cn9k_sso_hws *ws;
291         void *data;
292
293         if (dev->dual_ws) {
294                 dws = rte_zmalloc("cn9k_dual_ws",
295                                   sizeof(struct cn9k_sso_hws_dual) +
296                                           RTE_CACHE_LINE_SIZE,
297                                   RTE_CACHE_LINE_SIZE);
298                 if (dws == NULL) {
299                         plt_err("Failed to alloc memory for port=%d", port_id);
300                         return NULL;
301                 }
302
303                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
304                 dws->base[0] = roc_sso_hws_base_get(
305                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
306                 dws->base[1] = roc_sso_hws_base_get(
307                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
308                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
309                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
310                 dws->hws_id = port_id;
311                 dws->swtag_req = 0;
312                 dws->vws = 0;
313
314                 data = dws;
315         } else {
316                 /* Allocate event port memory */
317                 ws = rte_zmalloc("cn9k_ws",
318                                  sizeof(struct cn9k_sso_hws) +
319                                          RTE_CACHE_LINE_SIZE,
320                                  RTE_CACHE_LINE_SIZE);
321                 if (ws == NULL) {
322                         plt_err("Failed to alloc memory for port=%d", port_id);
323                         return NULL;
324                 }
325
326                 /* First cache line is reserved for cookie */
327                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
328                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
329                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
330                 ws->hws_id = port_id;
331                 ws->swtag_req = 0;
332
333                 data = ws;
334         }
335
336         return data;
337 }
338
339 static void
340 cn9k_sso_info_get(struct rte_eventdev *event_dev,
341                   struct rte_event_dev_info *dev_info)
342 {
343         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
344
345         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
346         cnxk_sso_info_get(dev, dev_info);
347 }
348
349 static int
350 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
351 {
352         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
353         int rc;
354
355         rc = cnxk_sso_dev_validate(event_dev);
356         if (rc < 0) {
357                 plt_err("Invalid event device configuration");
358                 return -EINVAL;
359         }
360
361         roc_sso_rsrc_fini(&dev->sso);
362
363         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
364         if (rc < 0) {
365                 plt_err("Failed to initialize SSO resources");
366                 return -ENODEV;
367         }
368
369         rc = cnxk_sso_xaq_allocate(dev);
370         if (rc < 0)
371                 goto cnxk_rsrc_fini;
372
373         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
374                                     cn9k_sso_hws_setup);
375         if (rc < 0)
376                 goto cnxk_rsrc_fini;
377
378         /* Restore any prior port-queue mapping. */
379         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
380
381         dev->configured = 1;
382         rte_mb();
383
384         return 0;
385 cnxk_rsrc_fini:
386         roc_sso_rsrc_fini(&dev->sso);
387         dev->nb_event_ports = 0;
388         return rc;
389 }
390
391 static int
392 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
393                     const struct rte_event_port_conf *port_conf)
394 {
395
396         RTE_SET_USED(port_conf);
397         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
398 }
399
400 static void
401 cn9k_sso_port_release(void *port)
402 {
403         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
404         struct cnxk_sso_evdev *dev;
405
406         if (port == NULL)
407                 return;
408
409         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
410         if (!gws_cookie->configured)
411                 goto free;
412
413         cn9k_sso_hws_release(dev, port);
414         memset(gws_cookie, 0, sizeof(*gws_cookie));
415 free:
416         rte_free(gws_cookie);
417 }
418
419 static int
420 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
421                    const uint8_t queues[], const uint8_t priorities[],
422                    uint16_t nb_links)
423 {
424         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
425         uint16_t hwgrp_ids[nb_links];
426         uint16_t link;
427
428         RTE_SET_USED(priorities);
429         for (link = 0; link < nb_links; link++)
430                 hwgrp_ids[link] = queues[link];
431         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
432
433         return (int)nb_links;
434 }
435
436 static int
437 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
438                      uint8_t queues[], uint16_t nb_unlinks)
439 {
440         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
441         uint16_t hwgrp_ids[nb_unlinks];
442         uint16_t unlink;
443
444         for (unlink = 0; unlink < nb_unlinks; unlink++)
445                 hwgrp_ids[unlink] = queues[unlink];
446         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
447
448         return (int)nb_unlinks;
449 }
450
451 static int
452 cn9k_sso_start(struct rte_eventdev *event_dev)
453 {
454         int rc;
455
456         rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
457                             cn9k_sso_hws_flush_events);
458         if (rc < 0)
459                 return rc;
460
461         cn9k_sso_fp_fns_set(event_dev);
462
463         return rc;
464 }
465
466 static void
467 cn9k_sso_stop(struct rte_eventdev *event_dev)
468 {
469         cnxk_sso_stop(event_dev, cn9k_sso_hws_reset, cn9k_sso_hws_flush_events);
470 }
471
472 static int
473 cn9k_sso_close(struct rte_eventdev *event_dev)
474 {
475         return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
476 }
477
478 static int
479 cn9k_sso_selftest(void)
480 {
481         return cnxk_sso_selftest(RTE_STR(event_cn9k));
482 }
483
484 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
485         .dev_infos_get = cn9k_sso_info_get,
486         .dev_configure = cn9k_sso_dev_configure,
487         .queue_def_conf = cnxk_sso_queue_def_conf,
488         .queue_setup = cnxk_sso_queue_setup,
489         .queue_release = cnxk_sso_queue_release,
490         .port_def_conf = cnxk_sso_port_def_conf,
491         .port_setup = cn9k_sso_port_setup,
492         .port_release = cn9k_sso_port_release,
493         .port_link = cn9k_sso_port_link,
494         .port_unlink = cn9k_sso_port_unlink,
495         .timeout_ticks = cnxk_sso_timeout_ticks,
496
497         .timer_adapter_caps_get = cnxk_tim_caps_get,
498
499         .dump = cnxk_sso_dump,
500         .dev_start = cn9k_sso_start,
501         .dev_stop = cn9k_sso_stop,
502         .dev_close = cn9k_sso_close,
503         .dev_selftest = cn9k_sso_selftest,
504 };
505
506 static int
507 cn9k_sso_init(struct rte_eventdev *event_dev)
508 {
509         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
510         int rc;
511
512         if (RTE_CACHE_LINE_SIZE != 128) {
513                 plt_err("Driver not compiled for CN9K");
514                 return -EFAULT;
515         }
516
517         rc = roc_plt_init();
518         if (rc < 0) {
519                 plt_err("Failed to initialize platform model");
520                 return rc;
521         }
522
523         event_dev->dev_ops = &cn9k_sso_dev_ops;
524         /* For secondary processes, the primary has done all the work */
525         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
526                 cn9k_sso_fp_fns_set(event_dev);
527                 return 0;
528         }
529
530         rc = cnxk_sso_init(event_dev);
531         if (rc < 0)
532                 return rc;
533
534         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
535         if (!dev->max_event_ports || !dev->max_event_queues) {
536                 plt_err("Not enough eventdev resource queues=%d ports=%d",
537                         dev->max_event_queues, dev->max_event_ports);
538                 cnxk_sso_fini(event_dev);
539                 return -ENODEV;
540         }
541
542         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
543                     event_dev->data->name, dev->max_event_queues,
544                     dev->max_event_ports);
545
546         return 0;
547 }
548
549 static int
550 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
551 {
552         return rte_event_pmd_pci_probe(
553                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
554 }
555
556 static const struct rte_pci_id cn9k_pci_sso_map[] = {
557         {
558                 .vendor_id = 0,
559         },
560 };
561
562 static struct rte_pci_driver cn9k_pci_sso = {
563         .id_table = cn9k_pci_sso_map,
564         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
565         .probe = cn9k_sso_probe,
566         .remove = cnxk_sso_remove,
567 };
568
569 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
570 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
571 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
572 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
573                               CNXK_SSO_GGRP_QOS "=<string>"
574                               CN9K_SSO_SINGLE_WS "=1"
575                               CNXK_TIM_DISABLE_NPA "=1"
576                               CNXK_TIM_CHNK_SLOTS "=<int>"
577                               CNXK_TIM_RINGS_LMT "=<int>"
578                               CNXK_TIM_STATS_ENA "=1");