event/cnxk: store and reuse workslot status
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_cryptodev_ops.h"
6 #include "cnxk_eventdev.h"
7
8 static int
9 crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
10                         struct cnxk_cpt_qp *qp)
11 {
12         char name[RTE_MEMPOOL_NAMESIZE];
13         uint32_t cache_size, nb_req;
14         unsigned int req_size;
15
16         snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
17                  cdev->data->dev_id, qp->lf.lf_id);
18         req_size = sizeof(struct cpt_inflight_req);
19         cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
20         nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
21         qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
22                                            0, NULL, NULL, NULL, NULL,
23                                            rte_socket_id(), 0);
24         if (qp->ca.req_mp == NULL)
25                 return -ENOMEM;
26
27         qp->ca.enabled = true;
28
29         return 0;
30 }
31
32 int
33 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
34                            const struct rte_cryptodev *cdev,
35                            int32_t queue_pair_id)
36 {
37         struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
38         uint32_t adptr_xae_cnt = 0;
39         struct cnxk_cpt_qp *qp;
40         int ret;
41
42         if (queue_pair_id == -1) {
43                 uint16_t qp_id;
44
45                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
46                         qp = cdev->data->queue_pairs[qp_id];
47                         ret = crypto_adapter_qp_setup(cdev, qp);
48                         if (ret) {
49                                 cnxk_crypto_adapter_qp_del(cdev, -1);
50                                 return ret;
51                         }
52                         adptr_xae_cnt += qp->ca.req_mp->size;
53                 }
54         } else {
55                 qp = cdev->data->queue_pairs[queue_pair_id];
56                 ret = crypto_adapter_qp_setup(cdev, qp);
57                 if (ret)
58                         return ret;
59                 adptr_xae_cnt = qp->ca.req_mp->size;
60         }
61
62         /* Update crypto adapter XAE count */
63         sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
64         cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
65
66         return 0;
67 }
68
69 static int
70 crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
71 {
72         rte_mempool_free(qp->ca.req_mp);
73         qp->ca.enabled = false;
74
75         return 0;
76 }
77
78 int
79 cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
80                            int32_t queue_pair_id)
81 {
82         struct cnxk_cpt_qp *qp;
83
84         if (queue_pair_id == -1) {
85                 uint16_t qp_id;
86
87                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
88                         qp = cdev->data->queue_pairs[qp_id];
89                         if (qp->ca.enabled)
90                                 crypto_adapter_qp_free(qp);
91                 }
92         } else {
93                 qp = cdev->data->queue_pairs[queue_pair_id];
94                 if (qp->ca.enabled)
95                         crypto_adapter_qp_free(qp);
96         }
97
98         return 0;
99 }
100
101 void
102 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
103                   struct rte_event_dev_info *dev_info)
104 {
105
106         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
107         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
108         dev_info->max_event_queues = dev->max_event_queues;
109         dev_info->max_event_queue_flows = (1ULL << 20);
110         dev_info->max_event_queue_priority_levels = 8;
111         dev_info->max_event_priority_levels = 1;
112         dev_info->max_event_ports = dev->max_event_ports;
113         dev_info->max_event_port_dequeue_depth = 1;
114         dev_info->max_event_port_enqueue_depth = 1;
115         dev_info->max_num_events = dev->max_num_events;
116         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
117                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
118                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
119                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
120                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
121                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
122                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
123                                   RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
124 }
125
126 int
127 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
128 {
129         uint32_t xae_cnt;
130         int rc;
131
132         xae_cnt = dev->sso.iue;
133         if (dev->xae_cnt)
134                 xae_cnt += dev->xae_cnt;
135         if (dev->adptr_xae_cnt)
136                 xae_cnt += (dev->adptr_xae_cnt);
137
138         plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
139         rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
140         if (rc < 0) {
141                 plt_err("Failed to configure XAQ aura");
142                 return rc;
143         }
144         dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
145         dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
146
147         return roc_sso_hwgrp_alloc_xaq(
148                 &dev->sso,
149                 roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
150                 dev->nb_event_queues);
151 }
152
153 int
154 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
155 {
156         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
157         int rc = 0;
158
159         if (event_dev->data->dev_started)
160                 event_dev->dev_ops->dev_stop(event_dev);
161
162         rc = cnxk_sso_xaq_allocate(dev);
163         if (rc < 0) {
164                 plt_err("Failed to alloc XAQ %d", rc);
165                 return rc;
166         }
167
168         rte_mb();
169         if (event_dev->data->dev_started)
170                 event_dev->dev_ops->dev_start(event_dev);
171
172         return 0;
173 }
174
175 int
176 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
177                        cnxk_sso_init_hws_mem_t init_hws_fn,
178                        cnxk_sso_hws_setup_t setup_hws_fn)
179 {
180         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
181         int i;
182
183         for (i = 0; i < dev->nb_event_ports; i++) {
184                 struct cnxk_sso_hws_cookie *ws_cookie;
185                 void *ws;
186
187                 /* Free memory prior to re-allocation if needed */
188                 if (event_dev->data->ports[i] != NULL)
189                         ws = event_dev->data->ports[i];
190                 else
191                         ws = init_hws_fn(dev, i);
192                 if (ws == NULL)
193                         goto hws_fini;
194                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
195                 ws_cookie->event_dev = event_dev;
196                 ws_cookie->configured = 1;
197                 event_dev->data->ports[i] = ws;
198                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
199                                     i, setup_hws_fn);
200         }
201
202         return 0;
203 hws_fini:
204         for (i = i - 1; i >= 0; i--) {
205                 event_dev->data->ports[i] = NULL;
206                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
207         }
208         return -ENOMEM;
209 }
210
211 void
212 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
213                        cnxk_sso_link_t link_fn)
214 {
215         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
216         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
217         int i, j;
218
219         for (i = 0; i < dev->nb_event_ports; i++) {
220                 uint16_t nb_hwgrp = 0;
221
222                 links_map = event_dev->data->links_map;
223                 /* Point links_map to this port specific area */
224                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
225
226                 for (j = 0; j < dev->nb_event_queues; j++) {
227                         if (links_map[j] == 0xdead)
228                                 continue;
229                         hwgrp[nb_hwgrp] = j;
230                         nb_hwgrp++;
231                 }
232
233                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
234         }
235 }
236
237 int
238 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
239 {
240         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
241         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
242         uint32_t deq_tmo_ns;
243
244         deq_tmo_ns = conf->dequeue_timeout_ns;
245
246         if (deq_tmo_ns == 0)
247                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
248         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
249             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
250                 plt_err("Unsupported dequeue timeout requested");
251                 return -EINVAL;
252         }
253
254         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
255                 dev->is_timeout_deq = 1;
256
257         dev->deq_tmo_ns = deq_tmo_ns;
258
259         if (!conf->nb_event_queues || !conf->nb_event_ports ||
260             conf->nb_event_ports > dev->max_event_ports ||
261             conf->nb_event_queues > dev->max_event_queues) {
262                 plt_err("Unsupported event queues/ports requested");
263                 return -EINVAL;
264         }
265
266         if (conf->nb_event_port_dequeue_depth > 1) {
267                 plt_err("Unsupported event port deq depth requested");
268                 return -EINVAL;
269         }
270
271         if (conf->nb_event_port_enqueue_depth > 1) {
272                 plt_err("Unsupported event port enq depth requested");
273                 return -EINVAL;
274         }
275
276         roc_sso_rsrc_fini(&dev->sso);
277         roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
278
279         dev->nb_event_queues = conf->nb_event_queues;
280         dev->nb_event_ports = conf->nb_event_ports;
281
282         return 0;
283 }
284
285 void
286 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
287                         struct rte_event_queue_conf *queue_conf)
288 {
289         RTE_SET_USED(event_dev);
290         RTE_SET_USED(queue_id);
291
292         queue_conf->nb_atomic_flows = (1ULL << 20);
293         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
294         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
295         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
296 }
297
298 int
299 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
300                      const struct rte_event_queue_conf *queue_conf)
301 {
302         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
303
304         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
305         /* Normalize <0-255> to <0-7> */
306         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
307                                           queue_conf->priority / 32);
308 }
309
310 void
311 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
312 {
313         RTE_SET_USED(event_dev);
314         RTE_SET_USED(queue_id);
315 }
316
317 void
318 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
319                        struct rte_event_port_conf *port_conf)
320 {
321         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
322
323         RTE_SET_USED(port_id);
324         port_conf->new_event_threshold = dev->max_num_events;
325         port_conf->dequeue_depth = 1;
326         port_conf->enqueue_depth = 1;
327 }
328
329 int
330 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
331                     cnxk_sso_hws_setup_t hws_setup_fn)
332 {
333         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
334         uintptr_t grp_base = 0;
335
336         plt_sso_dbg("Port=%d", port_id);
337         if (event_dev->data->ports[port_id] == NULL) {
338                 plt_err("Invalid port Id %d", port_id);
339                 return -EINVAL;
340         }
341
342         grp_base = roc_sso_hwgrp_base_get(&dev->sso, 0);
343         if (grp_base == 0) {
344                 plt_err("Failed to get grp base addr");
345                 return -EINVAL;
346         }
347
348         hws_setup_fn(dev, event_dev->data->ports[port_id], grp_base);
349         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
350         rte_mb();
351
352         return 0;
353 }
354
355 int
356 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
357                        uint64_t *tmo_ticks)
358 {
359         RTE_SET_USED(event_dev);
360         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
361
362         return 0;
363 }
364
365 void
366 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
367 {
368         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
369
370         roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
371 }
372
373 static void
374 cnxk_handle_event(void *arg, struct rte_event event)
375 {
376         struct rte_eventdev *event_dev = arg;
377
378         if (event_dev->dev_ops->dev_stop_flush != NULL)
379                 event_dev->dev_ops->dev_stop_flush(
380                         event_dev->data->dev_id, event,
381                         event_dev->data->dev_stop_flush_arg);
382 }
383
384 static void
385 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
386                  cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
387 {
388         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
389         uintptr_t hwgrp_base;
390         uint16_t i;
391         void *ws;
392
393         for (i = 0; i < dev->nb_event_ports; i++) {
394                 ws = event_dev->data->ports[i];
395                 reset_fn(dev, ws);
396         }
397
398         rte_mb();
399         ws = event_dev->data->ports[0];
400
401         for (i = 0; i < dev->nb_event_queues; i++) {
402                 /* Consume all the events through HWS0 */
403                 hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
404                 flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
405                 /* Enable/Disable SSO GGRP */
406                 plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
407         }
408 }
409
410 int
411 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
412                cnxk_sso_hws_flush_t flush_fn)
413 {
414         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
415         struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
416         int i, rc;
417
418         plt_sso_dbg();
419         for (i = 0; i < dev->qos_queue_cnt; i++) {
420                 qos->hwgrp = dev->qos_parse_data[i].queue;
421                 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
422                 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
423                 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
424         }
425         rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
426                                       dev->xae_cnt);
427         if (rc < 0) {
428                 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
429                 return -EINVAL;
430         }
431         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
432         rte_mb();
433
434         return 0;
435 }
436
437 void
438 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
439               cnxk_sso_hws_flush_t flush_fn)
440 {
441         plt_sso_dbg();
442         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
443         rte_mb();
444 }
445
446 int
447 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
448 {
449         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
450         uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
451         uint16_t i;
452         void *ws;
453
454         if (!dev->configured)
455                 return 0;
456
457         for (i = 0; i < dev->nb_event_queues; i++)
458                 all_queues[i] = i;
459
460         for (i = 0; i < dev->nb_event_ports; i++) {
461                 ws = event_dev->data->ports[i];
462                 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
463                 rte_free(cnxk_sso_hws_get_cookie(ws));
464                 event_dev->data->ports[i] = NULL;
465         }
466
467         roc_sso_rsrc_fini(&dev->sso);
468
469         dev->fc_iova = 0;
470         dev->configured = false;
471         dev->is_timeout_deq = 0;
472         dev->nb_event_ports = 0;
473         dev->max_num_events = -1;
474         dev->nb_event_queues = 0;
475         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
476         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
477
478         return 0;
479 }
480
481 static void
482 parse_queue_param(char *value, void *opaque)
483 {
484         struct cnxk_sso_qos queue_qos = {0};
485         uint16_t *val = (uint16_t *)&queue_qos;
486         struct cnxk_sso_evdev *dev = opaque;
487         char *tok = strtok(value, "-");
488         struct cnxk_sso_qos *old_ptr;
489
490         if (!strlen(value))
491                 return;
492
493         while (tok != NULL) {
494                 *val = atoi(tok);
495                 tok = strtok(NULL, "-");
496                 val++;
497         }
498
499         if (val != (&queue_qos.iaq_prcnt + 1)) {
500                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
501                 return;
502         }
503
504         dev->qos_queue_cnt++;
505         old_ptr = dev->qos_parse_data;
506         dev->qos_parse_data = rte_realloc(
507                 dev->qos_parse_data,
508                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
509         if (dev->qos_parse_data == NULL) {
510                 dev->qos_parse_data = old_ptr;
511                 dev->qos_queue_cnt--;
512                 return;
513         }
514         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
515 }
516
517 static void
518 parse_qos_list(const char *value, void *opaque)
519 {
520         char *s = strdup(value);
521         char *start = NULL;
522         char *end = NULL;
523         char *f = s;
524
525         while (*s) {
526                 if (*s == '[')
527                         start = s;
528                 else if (*s == ']')
529                         end = s;
530
531                 if (start && start < end) {
532                         *end = 0;
533                         parse_queue_param(start + 1, opaque);
534                         s = end;
535                         start = end;
536                 }
537                 s++;
538         }
539
540         free(f);
541 }
542
543 static int
544 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
545 {
546         RTE_SET_USED(key);
547
548         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
549          * isn't allowed. Everything is expressed in percentages, 0 represents
550          * default.
551          */
552         parse_qos_list(value, opaque);
553
554         return 0;
555 }
556
557 static void
558 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
559 {
560         struct rte_kvargs *kvlist;
561         uint8_t single_ws = 0;
562
563         if (devargs == NULL)
564                 return;
565         kvlist = rte_kvargs_parse(devargs->args, NULL);
566         if (kvlist == NULL)
567                 return;
568
569         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
570                            &dev->xae_cnt);
571         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
572                            dev);
573         rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag,
574                            &dev->force_ena_bp);
575         rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
576                            &single_ws);
577         rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag,
578                            &dev->gw_mode);
579         dev->dual_ws = !single_ws;
580         rte_kvargs_free(kvlist);
581 }
582
583 int
584 cnxk_sso_init(struct rte_eventdev *event_dev)
585 {
586         const struct rte_memzone *mz = NULL;
587         struct rte_pci_device *pci_dev;
588         struct cnxk_sso_evdev *dev;
589         int rc;
590
591         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
592                                  SOCKET_ID_ANY, 0);
593         if (mz == NULL) {
594                 plt_err("Failed to create eventdev memzone");
595                 return -ENOMEM;
596         }
597
598         dev = cnxk_sso_pmd_priv(event_dev);
599         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
600         dev->sso.pci_dev = pci_dev;
601
602         *(uint64_t *)mz->addr = (uint64_t)dev;
603         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
604
605         /* Initialize the base cnxk_dev object */
606         rc = roc_sso_dev_init(&dev->sso);
607         if (rc < 0) {
608                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
609                 goto error;
610         }
611
612         dev->is_timeout_deq = 0;
613         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
614         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
615         dev->max_num_events = -1;
616         dev->nb_event_queues = 0;
617         dev->nb_event_ports = 0;
618
619         cnxk_tim_init(&dev->sso);
620
621         return 0;
622
623 error:
624         rte_memzone_free(mz);
625         return rc;
626 }
627
628 int
629 cnxk_sso_fini(struct rte_eventdev *event_dev)
630 {
631         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
632
633         /* For secondary processes, nothing to be done */
634         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
635                 return 0;
636
637         cnxk_tim_fini();
638         roc_sso_rsrc_fini(&dev->sso);
639         roc_sso_dev_fini(&dev->sso);
640
641         return 0;
642 }
643
644 int
645 cnxk_sso_remove(struct rte_pci_device *pci_dev)
646 {
647         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
648 }