event/cnxk: add timer cancel
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_eventdev.h"
6
7 void
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9                   struct rte_event_dev_info *dev_info)
10 {
11
12         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14         dev_info->max_event_queues = dev->max_event_queues;
15         dev_info->max_event_queue_flows = (1ULL << 20);
16         dev_info->max_event_queue_priority_levels = 8;
17         dev_info->max_event_priority_levels = 1;
18         dev_info->max_event_ports = dev->max_event_ports;
19         dev_info->max_event_port_dequeue_depth = 1;
20         dev_info->max_event_port_enqueue_depth = 1;
21         dev_info->max_num_events = dev->max_num_events;
22         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
29 }
30
31 int
32 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
33 {
34         char pool_name[RTE_MEMZONE_NAMESIZE];
35         uint32_t xaq_cnt, npa_aura_id;
36         const struct rte_memzone *mz;
37         struct npa_aura_s *aura;
38         static int reconfig_cnt;
39         int rc;
40
41         if (dev->xaq_pool) {
42                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
43                 if (rc < 0) {
44                         plt_err("Failed to release XAQ %d", rc);
45                         return rc;
46                 }
47                 rte_mempool_free(dev->xaq_pool);
48                 dev->xaq_pool = NULL;
49         }
50
51         /*
52          * Allocate memory for Add work backpressure.
53          */
54         mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
55         if (mz == NULL)
56                 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
57                                                  sizeof(struct npa_aura_s) +
58                                                          RTE_CACHE_LINE_SIZE,
59                                                  0, 0, RTE_CACHE_LINE_SIZE);
60         if (mz == NULL) {
61                 plt_err("Failed to allocate mem for fcmem");
62                 return -ENOMEM;
63         }
64
65         dev->fc_iova = mz->iova;
66         dev->fc_mem = mz->addr;
67
68         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
69                                      RTE_CACHE_LINE_SIZE);
70         memset(aura, 0, sizeof(struct npa_aura_s));
71
72         aura->fc_ena = 1;
73         aura->fc_addr = dev->fc_iova;
74         aura->fc_hyst_bits = 0; /* Store count on all updates */
75
76         /* Taken from HRM 14.3.3(4) */
77         xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
78         if (dev->xae_cnt)
79                 xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
80         else if (dev->adptr_xae_cnt)
81                 xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
82                            (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
83         else
84                 xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
85                            (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
86
87         plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
88         /* Setup XAQ based on number of nb queues. */
89         snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
90         dev->xaq_pool = (void *)rte_mempool_create_empty(
91                 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
92                 rte_socket_id(), 0);
93
94         if (dev->xaq_pool == NULL) {
95                 plt_err("Unable to create empty mempool.");
96                 rte_memzone_free(mz);
97                 return -ENOMEM;
98         }
99
100         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
101                                         rte_mbuf_platform_mempool_ops(), aura);
102         if (rc != 0) {
103                 plt_err("Unable to set xaqpool ops.");
104                 goto alloc_fail;
105         }
106
107         rc = rte_mempool_populate_default(dev->xaq_pool);
108         if (rc < 0) {
109                 plt_err("Unable to set populate xaqpool.");
110                 goto alloc_fail;
111         }
112         reconfig_cnt++;
113         /* When SW does addwork (enqueue) check if there is space in XAQ by
114          * comparing fc_addr above against the xaq_lmt calculated below.
115          * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
116          * to request XAQ to cache them even before enqueue is called.
117          */
118         dev->xaq_lmt =
119                 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
120         dev->nb_xaq_cfg = xaq_cnt;
121
122         npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
123         return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
124                                        dev->nb_event_queues);
125 alloc_fail:
126         rte_mempool_free(dev->xaq_pool);
127         rte_memzone_free(mz);
128         return rc;
129 }
130
131 int
132 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
133 {
134         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
135         int rc = 0;
136
137         if (event_dev->data->dev_started)
138                 event_dev->dev_ops->dev_stop(event_dev);
139
140         rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
141         if (rc < 0) {
142                 plt_err("Failed to release XAQ %d", rc);
143                 return rc;
144         }
145
146         rte_mempool_free(dev->xaq_pool);
147         dev->xaq_pool = NULL;
148         rc = cnxk_sso_xaq_allocate(dev);
149         if (rc < 0) {
150                 plt_err("Failed to alloc XAQ %d", rc);
151                 return rc;
152         }
153
154         rte_mb();
155         if (event_dev->data->dev_started)
156                 event_dev->dev_ops->dev_start(event_dev);
157
158         return 0;
159 }
160
161 int
162 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
163                        cnxk_sso_init_hws_mem_t init_hws_fn,
164                        cnxk_sso_hws_setup_t setup_hws_fn)
165 {
166         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
167         int i;
168
169         for (i = 0; i < dev->nb_event_ports; i++) {
170                 struct cnxk_sso_hws_cookie *ws_cookie;
171                 void *ws;
172
173                 /* Free memory prior to re-allocation if needed */
174                 if (event_dev->data->ports[i] != NULL)
175                         ws = event_dev->data->ports[i];
176                 else
177                         ws = init_hws_fn(dev, i);
178                 if (ws == NULL)
179                         goto hws_fini;
180                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
181                 ws_cookie->event_dev = event_dev;
182                 ws_cookie->configured = 1;
183                 event_dev->data->ports[i] = ws;
184                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
185                                     i, setup_hws_fn);
186         }
187
188         return 0;
189 hws_fini:
190         for (i = i - 1; i >= 0; i--) {
191                 event_dev->data->ports[i] = NULL;
192                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
193         }
194         return -ENOMEM;
195 }
196
197 void
198 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
199                        cnxk_sso_link_t link_fn)
200 {
201         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
202         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
203         int i, j;
204
205         for (i = 0; i < dev->nb_event_ports; i++) {
206                 uint16_t nb_hwgrp = 0;
207
208                 links_map = event_dev->data->links_map;
209                 /* Point links_map to this port specific area */
210                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
211
212                 for (j = 0; j < dev->nb_event_queues; j++) {
213                         if (links_map[j] == 0xdead)
214                                 continue;
215                         hwgrp[nb_hwgrp] = j;
216                         nb_hwgrp++;
217                 }
218
219                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
220         }
221 }
222
223 int
224 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
225 {
226         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
227         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
228         uint32_t deq_tmo_ns;
229         int rc;
230
231         deq_tmo_ns = conf->dequeue_timeout_ns;
232
233         if (deq_tmo_ns == 0)
234                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
235         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
236             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
237                 plt_err("Unsupported dequeue timeout requested");
238                 return -EINVAL;
239         }
240
241         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
242                 dev->is_timeout_deq = 1;
243
244         dev->deq_tmo_ns = deq_tmo_ns;
245
246         if (!conf->nb_event_queues || !conf->nb_event_ports ||
247             conf->nb_event_ports > dev->max_event_ports ||
248             conf->nb_event_queues > dev->max_event_queues) {
249                 plt_err("Unsupported event queues/ports requested");
250                 return -EINVAL;
251         }
252
253         if (conf->nb_event_port_dequeue_depth > 1) {
254                 plt_err("Unsupported event port deq depth requested");
255                 return -EINVAL;
256         }
257
258         if (conf->nb_event_port_enqueue_depth > 1) {
259                 plt_err("Unsupported event port enq depth requested");
260                 return -EINVAL;
261         }
262
263         if (dev->xaq_pool) {
264                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
265                 if (rc < 0) {
266                         plt_err("Failed to release XAQ %d", rc);
267                         return rc;
268                 }
269                 rte_mempool_free(dev->xaq_pool);
270                 dev->xaq_pool = NULL;
271         }
272
273         dev->nb_event_queues = conf->nb_event_queues;
274         dev->nb_event_ports = conf->nb_event_ports;
275
276         return 0;
277 }
278
279 void
280 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
281                         struct rte_event_queue_conf *queue_conf)
282 {
283         RTE_SET_USED(event_dev);
284         RTE_SET_USED(queue_id);
285
286         queue_conf->nb_atomic_flows = (1ULL << 20);
287         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
288         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
289         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
290 }
291
292 int
293 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
294                      const struct rte_event_queue_conf *queue_conf)
295 {
296         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
297
298         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
299         /* Normalize <0-255> to <0-7> */
300         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
301                                           queue_conf->priority / 32);
302 }
303
304 void
305 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
306 {
307         RTE_SET_USED(event_dev);
308         RTE_SET_USED(queue_id);
309 }
310
311 void
312 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
313                        struct rte_event_port_conf *port_conf)
314 {
315         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
316
317         RTE_SET_USED(port_id);
318         port_conf->new_event_threshold = dev->max_num_events;
319         port_conf->dequeue_depth = 1;
320         port_conf->enqueue_depth = 1;
321 }
322
323 int
324 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
325                     cnxk_sso_hws_setup_t hws_setup_fn)
326 {
327         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
328         uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
329         uint16_t q;
330
331         plt_sso_dbg("Port=%d", port_id);
332         if (event_dev->data->ports[port_id] == NULL) {
333                 plt_err("Invalid port Id %d", port_id);
334                 return -EINVAL;
335         }
336
337         for (q = 0; q < dev->nb_event_queues; q++) {
338                 grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
339                 if (grps_base[q] == 0) {
340                         plt_err("Failed to get grp[%d] base addr", q);
341                         return -EINVAL;
342                 }
343         }
344
345         hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
346         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
347         rte_mb();
348
349         return 0;
350 }
351
352 int
353 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
354                        uint64_t *tmo_ticks)
355 {
356         RTE_SET_USED(event_dev);
357         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
358
359         return 0;
360 }
361
362 void
363 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
364 {
365         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
366
367         roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
368 }
369
370 static void
371 cnxk_handle_event(void *arg, struct rte_event event)
372 {
373         struct rte_eventdev *event_dev = arg;
374
375         if (event_dev->dev_ops->dev_stop_flush != NULL)
376                 event_dev->dev_ops->dev_stop_flush(
377                         event_dev->data->dev_id, event,
378                         event_dev->data->dev_stop_flush_arg);
379 }
380
381 static void
382 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
383                  cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
384 {
385         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
386         uintptr_t hwgrp_base;
387         uint16_t i;
388         void *ws;
389
390         for (i = 0; i < dev->nb_event_ports; i++) {
391                 ws = event_dev->data->ports[i];
392                 reset_fn(dev, ws);
393         }
394
395         rte_mb();
396         ws = event_dev->data->ports[0];
397
398         for (i = 0; i < dev->nb_event_queues; i++) {
399                 /* Consume all the events through HWS0 */
400                 hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
401                 flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
402                 /* Enable/Disable SSO GGRP */
403                 plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
404         }
405 }
406
407 int
408 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
409                cnxk_sso_hws_flush_t flush_fn)
410 {
411         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
412         struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
413         int i, rc;
414
415         plt_sso_dbg();
416         for (i = 0; i < dev->qos_queue_cnt; i++) {
417                 qos->hwgrp = dev->qos_parse_data[i].queue;
418                 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
419                 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
420                 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
421         }
422         rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
423                                       dev->xae_cnt);
424         if (rc < 0) {
425                 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
426                 return -EINVAL;
427         }
428         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
429         rte_mb();
430
431         return 0;
432 }
433
434 void
435 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
436               cnxk_sso_hws_flush_t flush_fn)
437 {
438         plt_sso_dbg();
439         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
440         rte_mb();
441 }
442
443 int
444 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
445 {
446         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
447         uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
448         uint16_t i;
449         void *ws;
450
451         if (!dev->configured)
452                 return 0;
453
454         for (i = 0; i < dev->nb_event_queues; i++)
455                 all_queues[i] = i;
456
457         for (i = 0; i < dev->nb_event_ports; i++) {
458                 ws = event_dev->data->ports[i];
459                 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
460                 rte_free(cnxk_sso_hws_get_cookie(ws));
461                 event_dev->data->ports[i] = NULL;
462         }
463
464         roc_sso_rsrc_fini(&dev->sso);
465         rte_mempool_free(dev->xaq_pool);
466         rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
467
468         dev->fc_iova = 0;
469         dev->fc_mem = NULL;
470         dev->xaq_pool = NULL;
471         dev->configured = false;
472         dev->is_timeout_deq = 0;
473         dev->nb_event_ports = 0;
474         dev->max_num_events = -1;
475         dev->nb_event_queues = 0;
476         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
477         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
478
479         return 0;
480 }
481
482 static void
483 parse_queue_param(char *value, void *opaque)
484 {
485         struct cnxk_sso_qos queue_qos = {0};
486         uint8_t *val = (uint8_t *)&queue_qos;
487         struct cnxk_sso_evdev *dev = opaque;
488         char *tok = strtok(value, "-");
489         struct cnxk_sso_qos *old_ptr;
490
491         if (!strlen(value))
492                 return;
493
494         while (tok != NULL) {
495                 *val = atoi(tok);
496                 tok = strtok(NULL, "-");
497                 val++;
498         }
499
500         if (val != (&queue_qos.iaq_prcnt + 1)) {
501                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
502                 return;
503         }
504
505         dev->qos_queue_cnt++;
506         old_ptr = dev->qos_parse_data;
507         dev->qos_parse_data = rte_realloc(
508                 dev->qos_parse_data,
509                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
510         if (dev->qos_parse_data == NULL) {
511                 dev->qos_parse_data = old_ptr;
512                 dev->qos_queue_cnt--;
513                 return;
514         }
515         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
516 }
517
518 static void
519 parse_qos_list(const char *value, void *opaque)
520 {
521         char *s = strdup(value);
522         char *start = NULL;
523         char *end = NULL;
524         char *f = s;
525
526         while (*s) {
527                 if (*s == '[')
528                         start = s;
529                 else if (*s == ']')
530                         end = s;
531
532                 if (start && start < end) {
533                         *end = 0;
534                         parse_queue_param(start + 1, opaque);
535                         s = end;
536                         start = end;
537                 }
538                 s++;
539         }
540
541         free(f);
542 }
543
544 static int
545 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
546 {
547         RTE_SET_USED(key);
548
549         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
550          * isn't allowed. Everything is expressed in percentages, 0 represents
551          * default.
552          */
553         parse_qos_list(value, opaque);
554
555         return 0;
556 }
557
558 static void
559 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
560 {
561         struct rte_kvargs *kvlist;
562         uint8_t single_ws = 0;
563
564         if (devargs == NULL)
565                 return;
566         kvlist = rte_kvargs_parse(devargs->args, NULL);
567         if (kvlist == NULL)
568                 return;
569
570         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
571                            &dev->xae_cnt);
572         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
573                            dev);
574         rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_value,
575                            &single_ws);
576         rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value,
577                            &dev->gw_mode);
578         dev->dual_ws = !single_ws;
579         rte_kvargs_free(kvlist);
580 }
581
582 int
583 cnxk_sso_init(struct rte_eventdev *event_dev)
584 {
585         const struct rte_memzone *mz = NULL;
586         struct rte_pci_device *pci_dev;
587         struct cnxk_sso_evdev *dev;
588         int rc;
589
590         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
591                                  SOCKET_ID_ANY, 0);
592         if (mz == NULL) {
593                 plt_err("Failed to create eventdev memzone");
594                 return -ENOMEM;
595         }
596
597         dev = cnxk_sso_pmd_priv(event_dev);
598         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
599         dev->sso.pci_dev = pci_dev;
600
601         *(uint64_t *)mz->addr = (uint64_t)dev;
602         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
603
604         /* Initialize the base cnxk_dev object */
605         rc = roc_sso_dev_init(&dev->sso);
606         if (rc < 0) {
607                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
608                 goto error;
609         }
610
611         dev->is_timeout_deq = 0;
612         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
613         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
614         dev->max_num_events = -1;
615         dev->nb_event_queues = 0;
616         dev->nb_event_ports = 0;
617
618         cnxk_tim_init(&dev->sso);
619
620         return 0;
621
622 error:
623         rte_memzone_free(mz);
624         return rc;
625 }
626
627 int
628 cnxk_sso_fini(struct rte_eventdev *event_dev)
629 {
630         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
631
632         /* For secondary processes, nothing to be done */
633         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
634                 return 0;
635
636         cnxk_tim_fini();
637         roc_sso_rsrc_fini(&dev->sso);
638         roc_sso_dev_fini(&dev->sso);
639
640         return 0;
641 }
642
643 int
644 cnxk_sso_remove(struct rte_pci_device *pci_dev)
645 {
646         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
647 }