net/ice: fix TM hierarchy commit flag reset
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_cryptodev_ops.h"
6 #include "cnxk_eventdev.h"
7
8 static int
9 crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
10                         struct cnxk_cpt_qp *qp)
11 {
12         char name[RTE_MEMPOOL_NAMESIZE];
13         uint32_t cache_size, nb_req;
14         unsigned int req_size;
15
16         snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
17                  cdev->data->dev_id, qp->lf.lf_id);
18         req_size = sizeof(struct cpt_inflight_req);
19         cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
20         nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
21         qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
22                                            0, NULL, NULL, NULL, NULL,
23                                            rte_socket_id(), 0);
24         if (qp->ca.req_mp == NULL)
25                 return -ENOMEM;
26
27         qp->ca.enabled = true;
28
29         return 0;
30 }
31
32 int
33 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
34                            const struct rte_cryptodev *cdev,
35                            int32_t queue_pair_id)
36 {
37         struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
38         uint32_t adptr_xae_cnt = 0;
39         struct cnxk_cpt_qp *qp;
40         int ret;
41
42         if (queue_pair_id == -1) {
43                 uint16_t qp_id;
44
45                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
46                         qp = cdev->data->queue_pairs[qp_id];
47                         ret = crypto_adapter_qp_setup(cdev, qp);
48                         if (ret) {
49                                 cnxk_crypto_adapter_qp_del(cdev, -1);
50                                 return ret;
51                         }
52                         adptr_xae_cnt += qp->ca.req_mp->size;
53                 }
54         } else {
55                 qp = cdev->data->queue_pairs[queue_pair_id];
56                 ret = crypto_adapter_qp_setup(cdev, qp);
57                 if (ret)
58                         return ret;
59                 adptr_xae_cnt = qp->ca.req_mp->size;
60         }
61
62         /* Update crypto adapter XAE count */
63         sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
64         cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
65
66         return 0;
67 }
68
69 static int
70 crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
71 {
72         rte_mempool_free(qp->ca.req_mp);
73         qp->ca.enabled = false;
74
75         return 0;
76 }
77
78 int
79 cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
80                            int32_t queue_pair_id)
81 {
82         struct cnxk_cpt_qp *qp;
83
84         if (queue_pair_id == -1) {
85                 uint16_t qp_id;
86
87                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
88                         qp = cdev->data->queue_pairs[qp_id];
89                         if (qp->ca.enabled)
90                                 crypto_adapter_qp_free(qp);
91                 }
92         } else {
93                 qp = cdev->data->queue_pairs[queue_pair_id];
94                 if (qp->ca.enabled)
95                         crypto_adapter_qp_free(qp);
96         }
97
98         return 0;
99 }
100
101 void
102 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
103                   struct rte_event_dev_info *dev_info)
104 {
105
106         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
107         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
108         dev_info->max_event_queues = dev->max_event_queues;
109         dev_info->max_event_queue_flows = (1ULL << 20);
110         dev_info->max_event_queue_priority_levels = 8;
111         dev_info->max_event_priority_levels = 1;
112         dev_info->max_event_ports = dev->max_event_ports;
113         dev_info->max_event_port_dequeue_depth = 1;
114         dev_info->max_event_port_enqueue_depth = 1;
115         dev_info->max_num_events = dev->max_num_events;
116         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
117                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
118                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
119                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
120                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
121                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
122                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
123 }
124
125 int
126 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
127 {
128         char pool_name[RTE_MEMZONE_NAMESIZE];
129         uint32_t xaq_cnt, npa_aura_id;
130         const struct rte_memzone *mz;
131         struct npa_aura_s *aura;
132         static int reconfig_cnt;
133         int rc;
134
135         if (dev->xaq_pool) {
136                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
137                 if (rc < 0) {
138                         plt_err("Failed to release XAQ %d", rc);
139                         return rc;
140                 }
141                 rte_mempool_free(dev->xaq_pool);
142                 dev->xaq_pool = NULL;
143         }
144
145         /*
146          * Allocate memory for Add work backpressure.
147          */
148         mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
149         if (mz == NULL)
150                 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
151                                                  sizeof(struct npa_aura_s) +
152                                                          RTE_CACHE_LINE_SIZE,
153                                                  0, 0, RTE_CACHE_LINE_SIZE);
154         if (mz == NULL) {
155                 plt_err("Failed to allocate mem for fcmem");
156                 return -ENOMEM;
157         }
158
159         dev->fc_iova = mz->iova;
160         dev->fc_mem = mz->addr;
161
162         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
163                                      RTE_CACHE_LINE_SIZE);
164         memset(aura, 0, sizeof(struct npa_aura_s));
165
166         aura->fc_ena = 1;
167         aura->fc_addr = dev->fc_iova;
168         aura->fc_hyst_bits = 0; /* Store count on all updates */
169
170         /* Taken from HRM 14.3.3(4) */
171         xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
172         if (dev->xae_cnt)
173                 xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
174         else if (dev->adptr_xae_cnt)
175                 xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
176                            (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
177         else
178                 xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
179                            (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
180
181         plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
182         /* Setup XAQ based on number of nb queues. */
183         snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
184         dev->xaq_pool = (void *)rte_mempool_create_empty(
185                 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
186                 rte_socket_id(), 0);
187
188         if (dev->xaq_pool == NULL) {
189                 plt_err("Unable to create empty mempool.");
190                 rte_memzone_free(mz);
191                 return -ENOMEM;
192         }
193
194         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
195                                         rte_mbuf_platform_mempool_ops(), aura);
196         if (rc != 0) {
197                 plt_err("Unable to set xaqpool ops.");
198                 goto alloc_fail;
199         }
200
201         rc = rte_mempool_populate_default(dev->xaq_pool);
202         if (rc < 0) {
203                 plt_err("Unable to set populate xaqpool.");
204                 goto alloc_fail;
205         }
206         reconfig_cnt++;
207         /* When SW does addwork (enqueue) check if there is space in XAQ by
208          * comparing fc_addr above against the xaq_lmt calculated below.
209          * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
210          * to request XAQ to cache them even before enqueue is called.
211          */
212         dev->xaq_lmt =
213                 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
214         dev->nb_xaq_cfg = xaq_cnt;
215
216         npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
217         return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
218                                        dev->nb_event_queues);
219 alloc_fail:
220         rte_mempool_free(dev->xaq_pool);
221         rte_memzone_free(mz);
222         return rc;
223 }
224
225 int
226 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
227 {
228         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
229         int rc = 0;
230
231         if (event_dev->data->dev_started)
232                 event_dev->dev_ops->dev_stop(event_dev);
233
234         rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
235         if (rc < 0) {
236                 plt_err("Failed to release XAQ %d", rc);
237                 return rc;
238         }
239
240         rte_mempool_free(dev->xaq_pool);
241         dev->xaq_pool = NULL;
242         rc = cnxk_sso_xaq_allocate(dev);
243         if (rc < 0) {
244                 plt_err("Failed to alloc XAQ %d", rc);
245                 return rc;
246         }
247
248         rte_mb();
249         if (event_dev->data->dev_started)
250                 event_dev->dev_ops->dev_start(event_dev);
251
252         return 0;
253 }
254
255 int
256 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
257                        cnxk_sso_init_hws_mem_t init_hws_fn,
258                        cnxk_sso_hws_setup_t setup_hws_fn)
259 {
260         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
261         int i;
262
263         for (i = 0; i < dev->nb_event_ports; i++) {
264                 struct cnxk_sso_hws_cookie *ws_cookie;
265                 void *ws;
266
267                 /* Free memory prior to re-allocation if needed */
268                 if (event_dev->data->ports[i] != NULL)
269                         ws = event_dev->data->ports[i];
270                 else
271                         ws = init_hws_fn(dev, i);
272                 if (ws == NULL)
273                         goto hws_fini;
274                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
275                 ws_cookie->event_dev = event_dev;
276                 ws_cookie->configured = 1;
277                 event_dev->data->ports[i] = ws;
278                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
279                                     i, setup_hws_fn);
280         }
281
282         return 0;
283 hws_fini:
284         for (i = i - 1; i >= 0; i--) {
285                 event_dev->data->ports[i] = NULL;
286                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
287         }
288         return -ENOMEM;
289 }
290
291 void
292 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
293                        cnxk_sso_link_t link_fn)
294 {
295         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
296         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
297         int i, j;
298
299         for (i = 0; i < dev->nb_event_ports; i++) {
300                 uint16_t nb_hwgrp = 0;
301
302                 links_map = event_dev->data->links_map;
303                 /* Point links_map to this port specific area */
304                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
305
306                 for (j = 0; j < dev->nb_event_queues; j++) {
307                         if (links_map[j] == 0xdead)
308                                 continue;
309                         hwgrp[nb_hwgrp] = j;
310                         nb_hwgrp++;
311                 }
312
313                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
314         }
315 }
316
317 int
318 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
319 {
320         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
321         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
322         uint32_t deq_tmo_ns;
323         int rc;
324
325         deq_tmo_ns = conf->dequeue_timeout_ns;
326
327         if (deq_tmo_ns == 0)
328                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
329         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
330             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
331                 plt_err("Unsupported dequeue timeout requested");
332                 return -EINVAL;
333         }
334
335         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
336                 dev->is_timeout_deq = 1;
337
338         dev->deq_tmo_ns = deq_tmo_ns;
339
340         if (!conf->nb_event_queues || !conf->nb_event_ports ||
341             conf->nb_event_ports > dev->max_event_ports ||
342             conf->nb_event_queues > dev->max_event_queues) {
343                 plt_err("Unsupported event queues/ports requested");
344                 return -EINVAL;
345         }
346
347         if (conf->nb_event_port_dequeue_depth > 1) {
348                 plt_err("Unsupported event port deq depth requested");
349                 return -EINVAL;
350         }
351
352         if (conf->nb_event_port_enqueue_depth > 1) {
353                 plt_err("Unsupported event port enq depth requested");
354                 return -EINVAL;
355         }
356
357         if (dev->xaq_pool) {
358                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
359                 if (rc < 0) {
360                         plt_err("Failed to release XAQ %d", rc);
361                         return rc;
362                 }
363                 rte_mempool_free(dev->xaq_pool);
364                 dev->xaq_pool = NULL;
365         }
366
367         dev->nb_event_queues = conf->nb_event_queues;
368         dev->nb_event_ports = conf->nb_event_ports;
369
370         return 0;
371 }
372
373 void
374 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
375                         struct rte_event_queue_conf *queue_conf)
376 {
377         RTE_SET_USED(event_dev);
378         RTE_SET_USED(queue_id);
379
380         queue_conf->nb_atomic_flows = (1ULL << 20);
381         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
382         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
383         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
384 }
385
386 int
387 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
388                      const struct rte_event_queue_conf *queue_conf)
389 {
390         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
391
392         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
393         /* Normalize <0-255> to <0-7> */
394         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
395                                           queue_conf->priority / 32);
396 }
397
398 void
399 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
400 {
401         RTE_SET_USED(event_dev);
402         RTE_SET_USED(queue_id);
403 }
404
405 void
406 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
407                        struct rte_event_port_conf *port_conf)
408 {
409         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
410
411         RTE_SET_USED(port_id);
412         port_conf->new_event_threshold = dev->max_num_events;
413         port_conf->dequeue_depth = 1;
414         port_conf->enqueue_depth = 1;
415 }
416
417 int
418 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
419                     cnxk_sso_hws_setup_t hws_setup_fn)
420 {
421         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
422         uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
423         uint16_t q;
424
425         plt_sso_dbg("Port=%d", port_id);
426         if (event_dev->data->ports[port_id] == NULL) {
427                 plt_err("Invalid port Id %d", port_id);
428                 return -EINVAL;
429         }
430
431         for (q = 0; q < dev->nb_event_queues; q++) {
432                 grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
433                 if (grps_base[q] == 0) {
434                         plt_err("Failed to get grp[%d] base addr", q);
435                         return -EINVAL;
436                 }
437         }
438
439         hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
440         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
441         rte_mb();
442
443         return 0;
444 }
445
446 int
447 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
448                        uint64_t *tmo_ticks)
449 {
450         RTE_SET_USED(event_dev);
451         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
452
453         return 0;
454 }
455
456 void
457 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
458 {
459         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
460
461         roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
462 }
463
464 static void
465 cnxk_handle_event(void *arg, struct rte_event event)
466 {
467         struct rte_eventdev *event_dev = arg;
468
469         if (event_dev->dev_ops->dev_stop_flush != NULL)
470                 event_dev->dev_ops->dev_stop_flush(
471                         event_dev->data->dev_id, event,
472                         event_dev->data->dev_stop_flush_arg);
473 }
474
475 static void
476 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
477                  cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
478 {
479         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
480         uintptr_t hwgrp_base;
481         uint16_t i;
482         void *ws;
483
484         for (i = 0; i < dev->nb_event_ports; i++) {
485                 ws = event_dev->data->ports[i];
486                 reset_fn(dev, ws);
487         }
488
489         rte_mb();
490         ws = event_dev->data->ports[0];
491
492         for (i = 0; i < dev->nb_event_queues; i++) {
493                 /* Consume all the events through HWS0 */
494                 hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
495                 flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
496                 /* Enable/Disable SSO GGRP */
497                 plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
498         }
499 }
500
501 int
502 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
503                cnxk_sso_hws_flush_t flush_fn)
504 {
505         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
506         struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
507         int i, rc;
508
509         plt_sso_dbg();
510         for (i = 0; i < dev->qos_queue_cnt; i++) {
511                 qos->hwgrp = dev->qos_parse_data[i].queue;
512                 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
513                 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
514                 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
515         }
516         rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
517                                       dev->xae_cnt);
518         if (rc < 0) {
519                 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
520                 return -EINVAL;
521         }
522         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
523         rte_mb();
524
525         return 0;
526 }
527
528 void
529 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
530               cnxk_sso_hws_flush_t flush_fn)
531 {
532         plt_sso_dbg();
533         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
534         rte_mb();
535 }
536
537 int
538 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
539 {
540         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
541         uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
542         uint16_t i;
543         void *ws;
544
545         if (!dev->configured)
546                 return 0;
547
548         for (i = 0; i < dev->nb_event_queues; i++)
549                 all_queues[i] = i;
550
551         for (i = 0; i < dev->nb_event_ports; i++) {
552                 ws = event_dev->data->ports[i];
553                 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
554                 rte_free(cnxk_sso_hws_get_cookie(ws));
555                 event_dev->data->ports[i] = NULL;
556         }
557
558         roc_sso_rsrc_fini(&dev->sso);
559         rte_mempool_free(dev->xaq_pool);
560         rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
561
562         dev->fc_iova = 0;
563         dev->fc_mem = NULL;
564         dev->xaq_pool = NULL;
565         dev->configured = false;
566         dev->is_timeout_deq = 0;
567         dev->nb_event_ports = 0;
568         dev->max_num_events = -1;
569         dev->nb_event_queues = 0;
570         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
571         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
572
573         return 0;
574 }
575
576 static void
577 parse_queue_param(char *value, void *opaque)
578 {
579         struct cnxk_sso_qos queue_qos = {0};
580         uint8_t *val = (uint8_t *)&queue_qos;
581         struct cnxk_sso_evdev *dev = opaque;
582         char *tok = strtok(value, "-");
583         struct cnxk_sso_qos *old_ptr;
584
585         if (!strlen(value))
586                 return;
587
588         while (tok != NULL) {
589                 *val = atoi(tok);
590                 tok = strtok(NULL, "-");
591                 val++;
592         }
593
594         if (val != (&queue_qos.iaq_prcnt + 1)) {
595                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
596                 return;
597         }
598
599         dev->qos_queue_cnt++;
600         old_ptr = dev->qos_parse_data;
601         dev->qos_parse_data = rte_realloc(
602                 dev->qos_parse_data,
603                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
604         if (dev->qos_parse_data == NULL) {
605                 dev->qos_parse_data = old_ptr;
606                 dev->qos_queue_cnt--;
607                 return;
608         }
609         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
610 }
611
612 static void
613 parse_qos_list(const char *value, void *opaque)
614 {
615         char *s = strdup(value);
616         char *start = NULL;
617         char *end = NULL;
618         char *f = s;
619
620         while (*s) {
621                 if (*s == '[')
622                         start = s;
623                 else if (*s == ']')
624                         end = s;
625
626                 if (start && start < end) {
627                         *end = 0;
628                         parse_queue_param(start + 1, opaque);
629                         s = end;
630                         start = end;
631                 }
632                 s++;
633         }
634
635         free(f);
636 }
637
638 static int
639 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
640 {
641         RTE_SET_USED(key);
642
643         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
644          * isn't allowed. Everything is expressed in percentages, 0 represents
645          * default.
646          */
647         parse_qos_list(value, opaque);
648
649         return 0;
650 }
651
652 static void
653 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
654 {
655         struct rte_kvargs *kvlist;
656         uint8_t single_ws = 0;
657
658         if (devargs == NULL)
659                 return;
660         kvlist = rte_kvargs_parse(devargs->args, NULL);
661         if (kvlist == NULL)
662                 return;
663
664         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
665                            &dev->xae_cnt);
666         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
667                            dev);
668         rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag,
669                            &dev->force_ena_bp);
670         rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
671                            &single_ws);
672         rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag,
673                            &dev->gw_mode);
674         dev->dual_ws = !single_ws;
675         rte_kvargs_free(kvlist);
676 }
677
678 int
679 cnxk_sso_init(struct rte_eventdev *event_dev)
680 {
681         const struct rte_memzone *mz = NULL;
682         struct rte_pci_device *pci_dev;
683         struct cnxk_sso_evdev *dev;
684         int rc;
685
686         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
687                                  SOCKET_ID_ANY, 0);
688         if (mz == NULL) {
689                 plt_err("Failed to create eventdev memzone");
690                 return -ENOMEM;
691         }
692
693         dev = cnxk_sso_pmd_priv(event_dev);
694         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
695         dev->sso.pci_dev = pci_dev;
696
697         *(uint64_t *)mz->addr = (uint64_t)dev;
698         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
699
700         /* Initialize the base cnxk_dev object */
701         rc = roc_sso_dev_init(&dev->sso);
702         if (rc < 0) {
703                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
704                 goto error;
705         }
706
707         dev->is_timeout_deq = 0;
708         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
709         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
710         dev->max_num_events = -1;
711         dev->nb_event_queues = 0;
712         dev->nb_event_ports = 0;
713
714         cnxk_tim_init(&dev->sso);
715
716         return 0;
717
718 error:
719         rte_memzone_free(mz);
720         return rc;
721 }
722
723 int
724 cnxk_sso_fini(struct rte_eventdev *event_dev)
725 {
726         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
727
728         /* For secondary processes, nothing to be done */
729         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
730                 return 0;
731
732         cnxk_tim_fini();
733         roc_sso_rsrc_fini(&dev->sso);
734         roc_sso_dev_fini(&dev->sso);
735
736         return 0;
737 }
738
739 int
740 cnxk_sso_remove(struct rte_pci_device *pci_dev)
741 {
742         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
743 }