b66f241ef87dc41b7e996b84cd1a871c25698905
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_cryptodev_ops.h"
6 #include "cnxk_eventdev.h"
7
8 static int
9 crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
10                         struct cnxk_cpt_qp *qp)
11 {
12         char name[RTE_MEMPOOL_NAMESIZE];
13         uint32_t cache_size, nb_req;
14         unsigned int req_size;
15
16         snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
17                  cdev->data->dev_id, qp->lf.lf_id);
18         req_size = sizeof(struct cpt_inflight_req);
19         cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
20         nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
21         qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
22                                            0, NULL, NULL, NULL, NULL,
23                                            rte_socket_id(), 0);
24         if (qp->ca.req_mp == NULL)
25                 return -ENOMEM;
26
27         qp->ca.enabled = true;
28
29         return 0;
30 }
31
32 int
33 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
34                            const struct rte_cryptodev *cdev,
35                            int32_t queue_pair_id)
36 {
37         struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
38         uint32_t adptr_xae_cnt = 0;
39         struct cnxk_cpt_qp *qp;
40         int ret;
41
42         if (queue_pair_id == -1) {
43                 uint16_t qp_id;
44
45                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
46                         qp = cdev->data->queue_pairs[qp_id];
47                         ret = crypto_adapter_qp_setup(cdev, qp);
48                         if (ret) {
49                                 cnxk_crypto_adapter_qp_del(cdev, -1);
50                                 return ret;
51                         }
52                         adptr_xae_cnt += qp->ca.req_mp->size;
53                 }
54         } else {
55                 qp = cdev->data->queue_pairs[queue_pair_id];
56                 ret = crypto_adapter_qp_setup(cdev, qp);
57                 if (ret)
58                         return ret;
59                 adptr_xae_cnt = qp->ca.req_mp->size;
60         }
61
62         /* Update crypto adapter XAE count */
63         sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
64         cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
65
66         return 0;
67 }
68
69 static int
70 crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
71 {
72         rte_mempool_free(qp->ca.req_mp);
73         qp->ca.enabled = false;
74
75         return 0;
76 }
77
78 int
79 cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
80                            int32_t queue_pair_id)
81 {
82         struct cnxk_cpt_qp *qp;
83
84         if (queue_pair_id == -1) {
85                 uint16_t qp_id;
86
87                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
88                         qp = cdev->data->queue_pairs[qp_id];
89                         if (qp->ca.enabled)
90                                 crypto_adapter_qp_free(qp);
91                 }
92         } else {
93                 qp = cdev->data->queue_pairs[queue_pair_id];
94                 if (qp->ca.enabled)
95                         crypto_adapter_qp_free(qp);
96         }
97
98         return 0;
99 }
100
101 void
102 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
103                   struct rte_event_dev_info *dev_info)
104 {
105
106         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
107         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
108         dev_info->max_event_queues = dev->max_event_queues;
109         dev_info->max_event_queue_flows = (1ULL << 20);
110         dev_info->max_event_queue_priority_levels = 8;
111         dev_info->max_event_priority_levels = 1;
112         dev_info->max_event_ports = dev->max_event_ports;
113         dev_info->max_event_port_dequeue_depth = 1;
114         dev_info->max_event_port_enqueue_depth = 1;
115         dev_info->max_num_events = dev->max_num_events;
116         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
117                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
118                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
119                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
120                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
121                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
122                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
123                                   RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
124                                   RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
125 }
126
127 int
128 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
129 {
130         uint32_t xae_cnt;
131         int rc;
132
133         xae_cnt = dev->sso.iue;
134         if (dev->xae_cnt)
135                 xae_cnt += dev->xae_cnt;
136         if (dev->adptr_xae_cnt)
137                 xae_cnt += (dev->adptr_xae_cnt);
138
139         plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
140         rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
141         if (rc < 0) {
142                 plt_err("Failed to configure XAQ aura");
143                 return rc;
144         }
145         dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
146         dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
147
148         return roc_sso_hwgrp_alloc_xaq(
149                 &dev->sso,
150                 roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
151                 dev->nb_event_queues);
152 }
153
154 int
155 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
156 {
157         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
158         int rc = 0;
159
160         if (event_dev->data->dev_started)
161                 event_dev->dev_ops->dev_stop(event_dev);
162
163         rc = cnxk_sso_xaq_allocate(dev);
164         if (rc < 0) {
165                 plt_err("Failed to alloc XAQ %d", rc);
166                 return rc;
167         }
168
169         rte_mb();
170         if (event_dev->data->dev_started)
171                 event_dev->dev_ops->dev_start(event_dev);
172
173         return 0;
174 }
175
176 int
177 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
178                        cnxk_sso_init_hws_mem_t init_hws_fn,
179                        cnxk_sso_hws_setup_t setup_hws_fn)
180 {
181         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
182         int i;
183
184         for (i = 0; i < dev->nb_event_ports; i++) {
185                 struct cnxk_sso_hws_cookie *ws_cookie;
186                 void *ws;
187
188                 /* Free memory prior to re-allocation if needed */
189                 if (event_dev->data->ports[i] != NULL)
190                         ws = event_dev->data->ports[i];
191                 else
192                         ws = init_hws_fn(dev, i);
193                 if (ws == NULL)
194                         goto hws_fini;
195                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
196                 ws_cookie->event_dev = event_dev;
197                 ws_cookie->configured = 1;
198                 event_dev->data->ports[i] = ws;
199                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
200                                     i, setup_hws_fn);
201         }
202
203         return 0;
204 hws_fini:
205         for (i = i - 1; i >= 0; i--) {
206                 event_dev->data->ports[i] = NULL;
207                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
208         }
209         return -ENOMEM;
210 }
211
212 void
213 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
214                        cnxk_sso_link_t link_fn)
215 {
216         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
217         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
218         int i, j;
219
220         for (i = 0; i < dev->nb_event_ports; i++) {
221                 uint16_t nb_hwgrp = 0;
222
223                 links_map = event_dev->data->links_map;
224                 /* Point links_map to this port specific area */
225                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
226
227                 for (j = 0; j < dev->nb_event_queues; j++) {
228                         if (links_map[j] == 0xdead)
229                                 continue;
230                         hwgrp[nb_hwgrp] = j;
231                         nb_hwgrp++;
232                 }
233
234                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
235         }
236 }
237
238 int
239 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
240 {
241         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
242         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
243         uint32_t deq_tmo_ns;
244
245         deq_tmo_ns = conf->dequeue_timeout_ns;
246
247         if (deq_tmo_ns == 0)
248                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
249         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
250             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
251                 plt_err("Unsupported dequeue timeout requested");
252                 return -EINVAL;
253         }
254
255         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
256                 dev->is_timeout_deq = 1;
257
258         dev->deq_tmo_ns = deq_tmo_ns;
259
260         if (!conf->nb_event_queues || !conf->nb_event_ports ||
261             conf->nb_event_ports > dev->max_event_ports ||
262             conf->nb_event_queues > dev->max_event_queues) {
263                 plt_err("Unsupported event queues/ports requested");
264                 return -EINVAL;
265         }
266
267         if (conf->nb_event_port_dequeue_depth > 1) {
268                 plt_err("Unsupported event port deq depth requested");
269                 return -EINVAL;
270         }
271
272         if (conf->nb_event_port_enqueue_depth > 1) {
273                 plt_err("Unsupported event port enq depth requested");
274                 return -EINVAL;
275         }
276
277         roc_sso_rsrc_fini(&dev->sso);
278         roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
279
280         dev->nb_event_queues = conf->nb_event_queues;
281         dev->nb_event_ports = conf->nb_event_ports;
282
283         return 0;
284 }
285
286 void
287 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
288                         struct rte_event_queue_conf *queue_conf)
289 {
290         RTE_SET_USED(event_dev);
291         RTE_SET_USED(queue_id);
292
293         queue_conf->nb_atomic_flows = (1ULL << 20);
294         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
295         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
296         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
297 }
298
299 int
300 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
301                      const struct rte_event_queue_conf *queue_conf)
302 {
303         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
304         uint8_t priority, weight, affinity;
305
306         /* Default weight and affinity */
307         dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
308         dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
309
310         priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0,
311                                       RTE_EVENT_DEV_PRIORITY_LOWEST,
312                                       CNXK_SSO_PRIORITY_CNT);
313         weight = CNXK_QOS_NORMALIZE(
314                 dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
315                 RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
316         affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
317                                       RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
318                                       CNXK_SSO_AFFINITY_CNT);
319
320         plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
321                     priority, weight, affinity);
322
323         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
324                                           priority);
325 }
326
327 void
328 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
329 {
330         RTE_SET_USED(event_dev);
331         RTE_SET_USED(queue_id);
332 }
333
334 int
335 cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
336                              uint32_t attr_id, uint32_t *attr_value)
337 {
338         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
339
340         if (attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT)
341                 *attr_value = dev->mlt_prio[queue_id].weight;
342         else if (attr_id == RTE_EVENT_QUEUE_ATTR_AFFINITY)
343                 *attr_value = dev->mlt_prio[queue_id].affinity;
344         else
345                 return -EINVAL;
346
347         return 0;
348 }
349
350 int
351 cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
352                              uint32_t attr_id, uint64_t attr_value)
353 {
354         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
355         uint8_t priority, weight, affinity;
356         struct rte_event_queue_conf *conf;
357
358         conf = &event_dev->data->queues_cfg[queue_id];
359
360         switch (attr_id) {
361         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
362                 conf->priority = attr_value;
363                 break;
364         case RTE_EVENT_QUEUE_ATTR_WEIGHT:
365                 dev->mlt_prio[queue_id].weight = attr_value;
366                 break;
367         case RTE_EVENT_QUEUE_ATTR_AFFINITY:
368                 dev->mlt_prio[queue_id].affinity = attr_value;
369                 break;
370         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
371         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
372         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
373         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
374                 /* FALLTHROUGH */
375                 plt_sso_dbg("Unsupported attribute id %u", attr_id);
376                 return -ENOTSUP;
377         default:
378                 plt_err("Invalid attribute id %u", attr_id);
379                 return -EINVAL;
380         }
381
382         priority = CNXK_QOS_NORMALIZE(conf->priority, 0,
383                                       RTE_EVENT_DEV_PRIORITY_LOWEST,
384                                       CNXK_SSO_PRIORITY_CNT);
385         weight = CNXK_QOS_NORMALIZE(
386                 dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
387                 RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
388         affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
389                                       RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
390                                       CNXK_SSO_AFFINITY_CNT);
391
392         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
393                                           priority);
394 }
395
396 void
397 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
398                        struct rte_event_port_conf *port_conf)
399 {
400         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
401
402         RTE_SET_USED(port_id);
403         port_conf->new_event_threshold = dev->max_num_events;
404         port_conf->dequeue_depth = 1;
405         port_conf->enqueue_depth = 1;
406 }
407
408 int
409 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
410                     cnxk_sso_hws_setup_t hws_setup_fn)
411 {
412         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
413         uintptr_t grp_base = 0;
414
415         plt_sso_dbg("Port=%d", port_id);
416         if (event_dev->data->ports[port_id] == NULL) {
417                 plt_err("Invalid port Id %d", port_id);
418                 return -EINVAL;
419         }
420
421         grp_base = roc_sso_hwgrp_base_get(&dev->sso, 0);
422         if (grp_base == 0) {
423                 plt_err("Failed to get grp base addr");
424                 return -EINVAL;
425         }
426
427         hws_setup_fn(dev, event_dev->data->ports[port_id], grp_base);
428         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
429         rte_mb();
430
431         return 0;
432 }
433
434 int
435 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
436                        uint64_t *tmo_ticks)
437 {
438         RTE_SET_USED(event_dev);
439         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
440
441         return 0;
442 }
443
444 void
445 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
446 {
447         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
448
449         roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
450 }
451
452 static void
453 cnxk_handle_event(void *arg, struct rte_event event)
454 {
455         struct rte_eventdev *event_dev = arg;
456
457         if (event_dev->dev_ops->dev_stop_flush != NULL)
458                 event_dev->dev_ops->dev_stop_flush(
459                         event_dev->data->dev_id, event,
460                         event_dev->data->dev_stop_flush_arg);
461 }
462
463 static void
464 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
465                  cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
466 {
467         uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt;
468         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
469         uintptr_t hwgrp_base;
470         uint8_t queue_id, i;
471         void *ws;
472
473         for (i = 0; i < dev->nb_event_ports; i++) {
474                 ws = event_dev->data->ports[i];
475                 reset_fn(dev, ws);
476         }
477
478         rte_mb();
479
480         /* Consume all the events through HWS0 */
481         ws = event_dev->data->ports[0];
482
483         /* Starting list of queues to flush */
484         pend_cnt = dev->nb_event_queues;
485         for (i = 0; i < dev->nb_event_queues; i++)
486                 pend_list[i] = i;
487
488         while (pend_cnt) {
489                 new_pcnt = 0;
490                 for (i = 0; i < pend_cnt; i++) {
491                         queue_id = pend_list[i];
492                         hwgrp_base =
493                                 roc_sso_hwgrp_base_get(&dev->sso, queue_id);
494                         if (flush_fn(ws, queue_id, hwgrp_base,
495                                      cnxk_handle_event, event_dev)) {
496                                 pend_list[new_pcnt++] = queue_id;
497                                 continue;
498                         }
499                         /* Enable/Disable SSO GGRP */
500                         plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
501                 }
502                 pend_cnt = new_pcnt;
503         }
504 }
505
506 int
507 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
508                cnxk_sso_hws_flush_t flush_fn)
509 {
510         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
511         struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
512         int i, rc;
513
514         plt_sso_dbg();
515         for (i = 0; i < dev->qos_queue_cnt; i++) {
516                 qos->hwgrp = dev->qos_parse_data[i].queue;
517                 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
518                 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
519                 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
520         }
521         rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
522                                       dev->xae_cnt);
523         if (rc < 0) {
524                 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
525                 return -EINVAL;
526         }
527         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
528         rte_mb();
529
530         return 0;
531 }
532
533 void
534 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
535               cnxk_sso_hws_flush_t flush_fn)
536 {
537         plt_sso_dbg();
538         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
539         rte_mb();
540 }
541
542 int
543 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
544 {
545         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
546         uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
547         uint16_t i;
548         void *ws;
549
550         if (!dev->configured)
551                 return 0;
552
553         for (i = 0; i < dev->nb_event_queues; i++)
554                 all_queues[i] = i;
555
556         for (i = 0; i < dev->nb_event_ports; i++) {
557                 ws = event_dev->data->ports[i];
558                 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
559                 rte_free(cnxk_sso_hws_get_cookie(ws));
560                 event_dev->data->ports[i] = NULL;
561         }
562
563         roc_sso_rsrc_fini(&dev->sso);
564
565         dev->fc_iova = 0;
566         dev->configured = false;
567         dev->is_timeout_deq = 0;
568         dev->nb_event_ports = 0;
569         dev->max_num_events = -1;
570         dev->nb_event_queues = 0;
571         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
572         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
573
574         return 0;
575 }
576
577 static void
578 parse_queue_param(char *value, void *opaque)
579 {
580         struct cnxk_sso_qos queue_qos = {0};
581         uint16_t *val = (uint16_t *)&queue_qos;
582         struct cnxk_sso_evdev *dev = opaque;
583         char *tok = strtok(value, "-");
584         struct cnxk_sso_qos *old_ptr;
585
586         if (!strlen(value))
587                 return;
588
589         while (tok != NULL) {
590                 *val = atoi(tok);
591                 tok = strtok(NULL, "-");
592                 val++;
593         }
594
595         if (val != (&queue_qos.iaq_prcnt + 1)) {
596                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
597                 return;
598         }
599
600         dev->qos_queue_cnt++;
601         old_ptr = dev->qos_parse_data;
602         dev->qos_parse_data = rte_realloc(
603                 dev->qos_parse_data,
604                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
605         if (dev->qos_parse_data == NULL) {
606                 dev->qos_parse_data = old_ptr;
607                 dev->qos_queue_cnt--;
608                 return;
609         }
610         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
611 }
612
613 static void
614 parse_qos_list(const char *value, void *opaque)
615 {
616         char *s = strdup(value);
617         char *start = NULL;
618         char *end = NULL;
619         char *f = s;
620
621         while (*s) {
622                 if (*s == '[')
623                         start = s;
624                 else if (*s == ']')
625                         end = s;
626
627                 if (start && start < end) {
628                         *end = 0;
629                         parse_queue_param(start + 1, opaque);
630                         s = end;
631                         start = end;
632                 }
633                 s++;
634         }
635
636         free(f);
637 }
638
639 static int
640 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
641 {
642         RTE_SET_USED(key);
643
644         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
645          * isn't allowed. Everything is expressed in percentages, 0 represents
646          * default.
647          */
648         parse_qos_list(value, opaque);
649
650         return 0;
651 }
652
653 static void
654 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
655 {
656         struct rte_kvargs *kvlist;
657         uint8_t single_ws = 0;
658
659         if (devargs == NULL)
660                 return;
661         kvlist = rte_kvargs_parse(devargs->args, NULL);
662         if (kvlist == NULL)
663                 return;
664
665         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
666                            &dev->xae_cnt);
667         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
668                            dev);
669         rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag,
670                            &dev->force_ena_bp);
671         rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
672                            &single_ws);
673         rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag,
674                            &dev->gw_mode);
675         dev->dual_ws = !single_ws;
676         rte_kvargs_free(kvlist);
677 }
678
679 int
680 cnxk_sso_init(struct rte_eventdev *event_dev)
681 {
682         const struct rte_memzone *mz = NULL;
683         struct rte_pci_device *pci_dev;
684         struct cnxk_sso_evdev *dev;
685         int rc;
686
687         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
688                                  SOCKET_ID_ANY, 0);
689         if (mz == NULL) {
690                 plt_err("Failed to create eventdev memzone");
691                 return -ENOMEM;
692         }
693
694         dev = cnxk_sso_pmd_priv(event_dev);
695         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
696         dev->sso.pci_dev = pci_dev;
697
698         *(uint64_t *)mz->addr = (uint64_t)dev;
699         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
700
701         /* Initialize the base cnxk_dev object */
702         rc = roc_sso_dev_init(&dev->sso);
703         if (rc < 0) {
704                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
705                 goto error;
706         }
707
708         dev->is_timeout_deq = 0;
709         dev->min_dequeue_timeout_ns = 0;
710         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
711         dev->max_num_events = -1;
712         dev->nb_event_queues = 0;
713         dev->nb_event_ports = 0;
714
715         cnxk_tim_init(&dev->sso);
716
717         return 0;
718
719 error:
720         rte_memzone_free(mz);
721         return rc;
722 }
723
724 int
725 cnxk_sso_fini(struct rte_eventdev *event_dev)
726 {
727         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
728
729         /* For secondary processes, nothing to be done */
730         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
731                 return 0;
732
733         cnxk_tim_fini();
734         roc_sso_rsrc_fini(&dev->sso);
735         roc_sso_dev_fini(&dev->sso);
736
737         return 0;
738 }
739
740 int
741 cnxk_sso_remove(struct rte_pci_device *pci_dev)
742 {
743         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
744 }