97dcf7b66e92a733e24bfb766a8a4400a7568e9d
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_cryptodev_ops.h"
6 #include "cnxk_eventdev.h"
7
8 static int
9 crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
10                         struct cnxk_cpt_qp *qp)
11 {
12         char name[RTE_MEMPOOL_NAMESIZE];
13         uint32_t cache_size, nb_req;
14         unsigned int req_size;
15         uint32_t nb_desc_min;
16
17         /*
18          * Update CPT FC threshold. Decrement by hardware burst size to allow
19          * simultaneous enqueue from all available cores.
20          */
21         if (roc_model_is_cn10k())
22                 nb_desc_min = rte_lcore_count() * 32;
23         else
24                 nb_desc_min = rte_lcore_count() * 2;
25
26         if (qp->lmtline.fc_thresh < nb_desc_min) {
27                 plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores",
28                         rte_lcore_count());
29                 return -ENOSPC;
30         }
31
32         qp->lmtline.fc_thresh -= nb_desc_min;
33
34         snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
35                  cdev->data->dev_id, qp->lf.lf_id);
36         req_size = sizeof(struct cpt_inflight_req);
37         cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
38         nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
39         qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
40                                            0, NULL, NULL, NULL, NULL,
41                                            rte_socket_id(), 0);
42         if (qp->ca.req_mp == NULL)
43                 return -ENOMEM;
44
45         qp->ca.enabled = true;
46
47         return 0;
48 }
49
50 int
51 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
52                            const struct rte_cryptodev *cdev,
53                            int32_t queue_pair_id)
54 {
55         struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
56         uint32_t adptr_xae_cnt = 0;
57         struct cnxk_cpt_qp *qp;
58         int ret;
59
60         if (queue_pair_id == -1) {
61                 uint16_t qp_id;
62
63                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
64                         qp = cdev->data->queue_pairs[qp_id];
65                         ret = crypto_adapter_qp_setup(cdev, qp);
66                         if (ret) {
67                                 cnxk_crypto_adapter_qp_del(cdev, -1);
68                                 return ret;
69                         }
70                         adptr_xae_cnt += qp->ca.req_mp->size;
71                 }
72         } else {
73                 qp = cdev->data->queue_pairs[queue_pair_id];
74                 ret = crypto_adapter_qp_setup(cdev, qp);
75                 if (ret)
76                         return ret;
77                 adptr_xae_cnt = qp->ca.req_mp->size;
78         }
79
80         /* Update crypto adapter XAE count */
81         sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
82         cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
83
84         return 0;
85 }
86
87 static int
88 crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
89 {
90         int ret;
91
92         rte_mempool_free(qp->ca.req_mp);
93         qp->ca.enabled = false;
94
95         ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id);
96         if (ret < 0) {
97                 plt_err("Could not reset lmtline for queue pair %d",
98                         qp->lf.lf_id);
99                 return ret;
100         }
101
102         return 0;
103 }
104
105 int
106 cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
107                            int32_t queue_pair_id)
108 {
109         struct cnxk_cpt_qp *qp;
110
111         if (queue_pair_id == -1) {
112                 uint16_t qp_id;
113
114                 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
115                         qp = cdev->data->queue_pairs[qp_id];
116                         if (qp->ca.enabled)
117                                 crypto_adapter_qp_free(qp);
118                 }
119         } else {
120                 qp = cdev->data->queue_pairs[queue_pair_id];
121                 if (qp->ca.enabled)
122                         crypto_adapter_qp_free(qp);
123         }
124
125         return 0;
126 }
127
128 void
129 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
130                   struct rte_event_dev_info *dev_info)
131 {
132
133         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
134         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
135         dev_info->max_event_queues = dev->max_event_queues;
136         dev_info->max_event_queue_flows = (1ULL << 20);
137         dev_info->max_event_queue_priority_levels = 8;
138         dev_info->max_event_priority_levels = 1;
139         dev_info->max_event_ports = dev->max_event_ports;
140         dev_info->max_event_port_dequeue_depth = 1;
141         dev_info->max_event_port_enqueue_depth = 1;
142         dev_info->max_num_events = dev->max_num_events;
143         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
144                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
145                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
146                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
147                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
148                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
149                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
150                                   RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
151                                   RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
152 }
153
154 int
155 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
156 {
157         uint32_t xae_cnt;
158         int rc;
159
160         xae_cnt = dev->sso.iue;
161         if (dev->xae_cnt)
162                 xae_cnt += dev->xae_cnt;
163         if (dev->adptr_xae_cnt)
164                 xae_cnt += (dev->adptr_xae_cnt);
165
166         plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
167         rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
168         if (rc < 0) {
169                 plt_err("Failed to configure XAQ aura");
170                 return rc;
171         }
172         dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
173         dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
174
175         return roc_sso_hwgrp_alloc_xaq(
176                 &dev->sso,
177                 roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
178                 dev->nb_event_queues);
179 }
180
181 int
182 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
183 {
184         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
185         int rc = 0;
186
187         if (event_dev->data->dev_started)
188                 event_dev->dev_ops->dev_stop(event_dev);
189
190         rc = cnxk_sso_xaq_allocate(dev);
191         if (rc < 0) {
192                 plt_err("Failed to alloc XAQ %d", rc);
193                 return rc;
194         }
195
196         rte_mb();
197         if (event_dev->data->dev_started)
198                 event_dev->dev_ops->dev_start(event_dev);
199
200         return 0;
201 }
202
203 int
204 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
205                        cnxk_sso_init_hws_mem_t init_hws_fn,
206                        cnxk_sso_hws_setup_t setup_hws_fn)
207 {
208         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
209         int i;
210
211         for (i = 0; i < dev->nb_event_ports; i++) {
212                 struct cnxk_sso_hws_cookie *ws_cookie;
213                 void *ws;
214
215                 /* Free memory prior to re-allocation if needed */
216                 if (event_dev->data->ports[i] != NULL)
217                         ws = event_dev->data->ports[i];
218                 else
219                         ws = init_hws_fn(dev, i);
220                 if (ws == NULL)
221                         goto hws_fini;
222                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
223                 ws_cookie->event_dev = event_dev;
224                 ws_cookie->configured = 1;
225                 event_dev->data->ports[i] = ws;
226                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
227                                     i, setup_hws_fn);
228         }
229
230         return 0;
231 hws_fini:
232         for (i = i - 1; i >= 0; i--) {
233                 event_dev->data->ports[i] = NULL;
234                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
235         }
236         return -ENOMEM;
237 }
238
239 void
240 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
241                        cnxk_sso_link_t link_fn)
242 {
243         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
244         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
245         int i, j;
246
247         for (i = 0; i < dev->nb_event_ports; i++) {
248                 uint16_t nb_hwgrp = 0;
249
250                 links_map = event_dev->data->links_map;
251                 /* Point links_map to this port specific area */
252                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
253
254                 for (j = 0; j < dev->nb_event_queues; j++) {
255                         if (links_map[j] == 0xdead)
256                                 continue;
257                         hwgrp[nb_hwgrp] = j;
258                         nb_hwgrp++;
259                 }
260
261                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
262         }
263 }
264
265 int
266 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
267 {
268         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
269         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
270         uint32_t deq_tmo_ns;
271
272         deq_tmo_ns = conf->dequeue_timeout_ns;
273
274         if (deq_tmo_ns == 0)
275                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
276         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
277             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
278                 plt_err("Unsupported dequeue timeout requested");
279                 return -EINVAL;
280         }
281
282         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
283                 dev->is_timeout_deq = 1;
284
285         dev->deq_tmo_ns = deq_tmo_ns;
286
287         if (!conf->nb_event_queues || !conf->nb_event_ports ||
288             conf->nb_event_ports > dev->max_event_ports ||
289             conf->nb_event_queues > dev->max_event_queues) {
290                 plt_err("Unsupported event queues/ports requested");
291                 return -EINVAL;
292         }
293
294         if (conf->nb_event_port_dequeue_depth > 1) {
295                 plt_err("Unsupported event port deq depth requested");
296                 return -EINVAL;
297         }
298
299         if (conf->nb_event_port_enqueue_depth > 1) {
300                 plt_err("Unsupported event port enq depth requested");
301                 return -EINVAL;
302         }
303
304         roc_sso_rsrc_fini(&dev->sso);
305         roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
306
307         dev->nb_event_queues = conf->nb_event_queues;
308         dev->nb_event_ports = conf->nb_event_ports;
309
310         return 0;
311 }
312
313 void
314 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
315                         struct rte_event_queue_conf *queue_conf)
316 {
317         RTE_SET_USED(event_dev);
318         RTE_SET_USED(queue_id);
319
320         queue_conf->nb_atomic_flows = (1ULL << 20);
321         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
322         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
323         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
324 }
325
326 int
327 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
328                      const struct rte_event_queue_conf *queue_conf)
329 {
330         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
331         uint8_t priority, weight, affinity;
332
333         /* Default weight and affinity */
334         dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
335         dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
336
337         priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0,
338                                       RTE_EVENT_DEV_PRIORITY_LOWEST,
339                                       CNXK_SSO_PRIORITY_CNT);
340         weight = CNXK_QOS_NORMALIZE(
341                 dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
342                 RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
343         affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
344                                       RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
345                                       CNXK_SSO_AFFINITY_CNT);
346
347         plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
348                     priority, weight, affinity);
349
350         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
351                                           priority);
352 }
353
354 void
355 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
356 {
357         RTE_SET_USED(event_dev);
358         RTE_SET_USED(queue_id);
359 }
360
361 int
362 cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
363                              uint32_t attr_id, uint32_t *attr_value)
364 {
365         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
366
367         if (attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT)
368                 *attr_value = dev->mlt_prio[queue_id].weight;
369         else if (attr_id == RTE_EVENT_QUEUE_ATTR_AFFINITY)
370                 *attr_value = dev->mlt_prio[queue_id].affinity;
371         else
372                 return -EINVAL;
373
374         return 0;
375 }
376
377 int
378 cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
379                              uint32_t attr_id, uint64_t attr_value)
380 {
381         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
382         uint8_t priority, weight, affinity;
383         struct rte_event_queue_conf *conf;
384
385         conf = &event_dev->data->queues_cfg[queue_id];
386
387         switch (attr_id) {
388         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
389                 conf->priority = attr_value;
390                 break;
391         case RTE_EVENT_QUEUE_ATTR_WEIGHT:
392                 dev->mlt_prio[queue_id].weight = attr_value;
393                 break;
394         case RTE_EVENT_QUEUE_ATTR_AFFINITY:
395                 dev->mlt_prio[queue_id].affinity = attr_value;
396                 break;
397         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
398         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
399         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
400         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
401                 /* FALLTHROUGH */
402                 plt_sso_dbg("Unsupported attribute id %u", attr_id);
403                 return -ENOTSUP;
404         default:
405                 plt_err("Invalid attribute id %u", attr_id);
406                 return -EINVAL;
407         }
408
409         priority = CNXK_QOS_NORMALIZE(conf->priority, 0,
410                                       RTE_EVENT_DEV_PRIORITY_LOWEST,
411                                       CNXK_SSO_PRIORITY_CNT);
412         weight = CNXK_QOS_NORMALIZE(
413                 dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
414                 RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
415         affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
416                                       RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
417                                       CNXK_SSO_AFFINITY_CNT);
418
419         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
420                                           priority);
421 }
422
423 void
424 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
425                        struct rte_event_port_conf *port_conf)
426 {
427         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
428
429         RTE_SET_USED(port_id);
430         port_conf->new_event_threshold = dev->max_num_events;
431         port_conf->dequeue_depth = 1;
432         port_conf->enqueue_depth = 1;
433 }
434
435 int
436 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
437                     cnxk_sso_hws_setup_t hws_setup_fn)
438 {
439         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
440         uintptr_t grp_base = 0;
441
442         plt_sso_dbg("Port=%d", port_id);
443         if (event_dev->data->ports[port_id] == NULL) {
444                 plt_err("Invalid port Id %d", port_id);
445                 return -EINVAL;
446         }
447
448         grp_base = roc_sso_hwgrp_base_get(&dev->sso, 0);
449         if (grp_base == 0) {
450                 plt_err("Failed to get grp base addr");
451                 return -EINVAL;
452         }
453
454         hws_setup_fn(dev, event_dev->data->ports[port_id], grp_base);
455         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
456         rte_mb();
457
458         return 0;
459 }
460
461 int
462 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
463                        uint64_t *tmo_ticks)
464 {
465         RTE_SET_USED(event_dev);
466         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
467
468         return 0;
469 }
470
471 void
472 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
473 {
474         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
475
476         roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
477 }
478
479 static void
480 cnxk_handle_event(void *arg, struct rte_event event)
481 {
482         struct rte_eventdev *event_dev = arg;
483
484         if (event_dev->dev_ops->dev_stop_flush != NULL)
485                 event_dev->dev_ops->dev_stop_flush(
486                         event_dev->data->dev_id, event,
487                         event_dev->data->dev_stop_flush_arg);
488 }
489
490 static void
491 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
492                  cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
493 {
494         uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt;
495         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
496         uintptr_t hwgrp_base;
497         uint8_t queue_id, i;
498         void *ws;
499
500         for (i = 0; i < dev->nb_event_ports; i++) {
501                 ws = event_dev->data->ports[i];
502                 reset_fn(dev, ws);
503         }
504
505         rte_mb();
506
507         /* Consume all the events through HWS0 */
508         ws = event_dev->data->ports[0];
509
510         /* Starting list of queues to flush */
511         pend_cnt = dev->nb_event_queues;
512         for (i = 0; i < dev->nb_event_queues; i++)
513                 pend_list[i] = i;
514
515         while (pend_cnt) {
516                 new_pcnt = 0;
517                 for (i = 0; i < pend_cnt; i++) {
518                         queue_id = pend_list[i];
519                         hwgrp_base =
520                                 roc_sso_hwgrp_base_get(&dev->sso, queue_id);
521                         if (flush_fn(ws, queue_id, hwgrp_base,
522                                      cnxk_handle_event, event_dev)) {
523                                 pend_list[new_pcnt++] = queue_id;
524                                 continue;
525                         }
526                         /* Enable/Disable SSO GGRP */
527                         plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
528                 }
529                 pend_cnt = new_pcnt;
530         }
531 }
532
533 int
534 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
535                cnxk_sso_hws_flush_t flush_fn)
536 {
537         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
538         struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
539         int i, rc;
540
541         plt_sso_dbg();
542         for (i = 0; i < dev->qos_queue_cnt; i++) {
543                 qos[i].hwgrp = dev->qos_parse_data[i].queue;
544                 qos[i].iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
545                 qos[i].taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
546                 qos[i].xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
547         }
548         rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
549                                       dev->xae_cnt);
550         if (rc < 0) {
551                 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
552                 return -EINVAL;
553         }
554         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
555         rte_mb();
556
557         return 0;
558 }
559
560 void
561 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
562               cnxk_sso_hws_flush_t flush_fn)
563 {
564         plt_sso_dbg();
565         cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
566         rte_mb();
567 }
568
569 int
570 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
571 {
572         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
573         uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
574         uint16_t i;
575         void *ws;
576
577         if (!dev->configured)
578                 return 0;
579
580         for (i = 0; i < dev->nb_event_queues; i++)
581                 all_queues[i] = i;
582
583         for (i = 0; i < dev->nb_event_ports; i++) {
584                 ws = event_dev->data->ports[i];
585                 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
586                 rte_free(cnxk_sso_hws_get_cookie(ws));
587                 event_dev->data->ports[i] = NULL;
588         }
589
590         roc_sso_rsrc_fini(&dev->sso);
591
592         dev->fc_iova = 0;
593         dev->configured = false;
594         dev->is_timeout_deq = 0;
595         dev->nb_event_ports = 0;
596         dev->max_num_events = -1;
597         dev->nb_event_queues = 0;
598         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
599         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
600
601         return 0;
602 }
603
604 static void
605 parse_queue_param(char *value, void *opaque)
606 {
607         struct cnxk_sso_qos queue_qos = {0};
608         uint16_t *val = (uint16_t *)&queue_qos;
609         struct cnxk_sso_evdev *dev = opaque;
610         char *tok = strtok(value, "-");
611         struct cnxk_sso_qos *old_ptr;
612
613         if (!strlen(value))
614                 return;
615
616         while (tok != NULL) {
617                 *val = atoi(tok);
618                 tok = strtok(NULL, "-");
619                 val++;
620         }
621
622         if (val != (&queue_qos.iaq_prcnt + 1)) {
623                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
624                 return;
625         }
626
627         dev->qos_queue_cnt++;
628         old_ptr = dev->qos_parse_data;
629         dev->qos_parse_data = rte_realloc(
630                 dev->qos_parse_data,
631                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
632         if (dev->qos_parse_data == NULL) {
633                 dev->qos_parse_data = old_ptr;
634                 dev->qos_queue_cnt--;
635                 return;
636         }
637         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
638 }
639
640 static void
641 parse_qos_list(const char *value, void *opaque)
642 {
643         char *s = strdup(value);
644         char *start = NULL;
645         char *end = NULL;
646         char *f = s;
647
648         while (*s) {
649                 if (*s == '[')
650                         start = s;
651                 else if (*s == ']')
652                         end = s;
653
654                 if (start && start < end) {
655                         *end = 0;
656                         parse_queue_param(start + 1, opaque);
657                         s = end;
658                         start = end;
659                 }
660                 s++;
661         }
662
663         free(f);
664 }
665
666 static int
667 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
668 {
669         RTE_SET_USED(key);
670
671         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
672          * isn't allowed. Everything is expressed in percentages, 0 represents
673          * default.
674          */
675         parse_qos_list(value, opaque);
676
677         return 0;
678 }
679
680 static void
681 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
682 {
683         struct rte_kvargs *kvlist;
684         uint8_t single_ws = 0;
685
686         if (devargs == NULL)
687                 return;
688         kvlist = rte_kvargs_parse(devargs->args, NULL);
689         if (kvlist == NULL)
690                 return;
691
692         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
693                            &dev->xae_cnt);
694         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
695                            dev);
696         rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag,
697                            &dev->force_ena_bp);
698         rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
699                            &single_ws);
700         rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag,
701                            &dev->gw_mode);
702         dev->dual_ws = !single_ws;
703         rte_kvargs_free(kvlist);
704 }
705
706 int
707 cnxk_sso_init(struct rte_eventdev *event_dev)
708 {
709         const struct rte_memzone *mz = NULL;
710         struct rte_pci_device *pci_dev;
711         struct cnxk_sso_evdev *dev;
712         int rc;
713
714         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
715                                  SOCKET_ID_ANY, 0);
716         if (mz == NULL) {
717                 plt_err("Failed to create eventdev memzone");
718                 return -ENOMEM;
719         }
720
721         dev = cnxk_sso_pmd_priv(event_dev);
722         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
723         dev->sso.pci_dev = pci_dev;
724
725         *(uint64_t *)mz->addr = (uint64_t)dev;
726         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
727
728         /* Initialize the base cnxk_dev object */
729         rc = roc_sso_dev_init(&dev->sso);
730         if (rc < 0) {
731                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
732                 goto error;
733         }
734
735         dev->is_timeout_deq = 0;
736         dev->min_dequeue_timeout_ns = 0;
737         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
738         dev->max_num_events = -1;
739         dev->nb_event_queues = 0;
740         dev->nb_event_ports = 0;
741
742         cnxk_tim_init(&dev->sso);
743
744         return 0;
745
746 error:
747         rte_memzone_free(mz);
748         return rc;
749 }
750
751 int
752 cnxk_sso_fini(struct rte_eventdev *event_dev)
753 {
754         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
755
756         /* For secondary processes, nothing to be done */
757         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
758                 return 0;
759
760         cnxk_tim_fini();
761         roc_sso_rsrc_fini(&dev->sso);
762         roc_sso_dev_fini(&dev->sso);
763
764         return 0;
765 }
766
767 int
768 cnxk_sso_remove(struct rte_pci_device *pci_dev)
769 {
770         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
771 }