1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_eventdev.h"
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9 struct rte_event_dev_info *dev_info)
12 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14 dev_info->max_event_queues = dev->max_event_queues;
15 dev_info->max_event_queue_flows = (1ULL << 20);
16 dev_info->max_event_queue_priority_levels = 8;
17 dev_info->max_event_priority_levels = 1;
18 dev_info->max_event_ports = dev->max_event_ports;
19 dev_info->max_event_port_dequeue_depth = 1;
20 dev_info->max_event_port_enqueue_depth = 1;
21 dev_info->max_num_events = dev->max_num_events;
22 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
32 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
34 char pool_name[RTE_MEMZONE_NAMESIZE];
35 uint32_t xaq_cnt, npa_aura_id;
36 const struct rte_memzone *mz;
37 struct npa_aura_s *aura;
38 static int reconfig_cnt;
42 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
44 plt_err("Failed to release XAQ %d", rc);
47 rte_mempool_free(dev->xaq_pool);
52 * Allocate memory for Add work backpressure.
54 mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
56 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
57 sizeof(struct npa_aura_s) +
59 0, 0, RTE_CACHE_LINE_SIZE);
61 plt_err("Failed to allocate mem for fcmem");
65 dev->fc_iova = mz->iova;
66 dev->fc_mem = mz->addr;
68 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
70 memset(aura, 0, sizeof(struct npa_aura_s));
73 aura->fc_addr = dev->fc_iova;
74 aura->fc_hyst_bits = 0; /* Store count on all updates */
76 /* Taken from HRM 14.3.3(4) */
77 xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
79 xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
80 else if (dev->adptr_xae_cnt)
81 xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
82 (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
84 xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
85 (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
87 plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
88 /* Setup XAQ based on number of nb queues. */
89 snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
90 dev->xaq_pool = (void *)rte_mempool_create_empty(
91 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
94 if (dev->xaq_pool == NULL) {
95 plt_err("Unable to create empty mempool.");
100 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
101 rte_mbuf_platform_mempool_ops(), aura);
103 plt_err("Unable to set xaqpool ops.");
107 rc = rte_mempool_populate_default(dev->xaq_pool);
109 plt_err("Unable to set populate xaqpool.");
113 /* When SW does addwork (enqueue) check if there is space in XAQ by
114 * comparing fc_addr above against the xaq_lmt calculated below.
115 * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
116 * to request XAQ to cache them even before enqueue is called.
119 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
120 dev->nb_xaq_cfg = xaq_cnt;
122 npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
123 return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
124 dev->nb_event_queues);
126 rte_mempool_free(dev->xaq_pool);
127 rte_memzone_free(mz);
132 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
134 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
137 if (event_dev->data->dev_started)
138 event_dev->dev_ops->dev_stop(event_dev);
140 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
142 plt_err("Failed to release XAQ %d", rc);
146 rte_mempool_free(dev->xaq_pool);
147 dev->xaq_pool = NULL;
148 rc = cnxk_sso_xaq_allocate(dev);
150 plt_err("Failed to alloc XAQ %d", rc);
155 if (event_dev->data->dev_started)
156 event_dev->dev_ops->dev_start(event_dev);
162 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
163 cnxk_sso_init_hws_mem_t init_hws_fn,
164 cnxk_sso_hws_setup_t setup_hws_fn)
166 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
169 for (i = 0; i < dev->nb_event_ports; i++) {
170 struct cnxk_sso_hws_cookie *ws_cookie;
173 /* Free memory prior to re-allocation if needed */
174 if (event_dev->data->ports[i] != NULL)
175 ws = event_dev->data->ports[i];
177 ws = init_hws_fn(dev, i);
180 ws_cookie = cnxk_sso_hws_get_cookie(ws);
181 ws_cookie->event_dev = event_dev;
182 ws_cookie->configured = 1;
183 event_dev->data->ports[i] = ws;
184 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
190 for (i = i - 1; i >= 0; i--) {
191 event_dev->data->ports[i] = NULL;
192 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
198 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
199 cnxk_sso_link_t link_fn)
201 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
202 uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
205 for (i = 0; i < dev->nb_event_ports; i++) {
206 uint16_t nb_hwgrp = 0;
208 links_map = event_dev->data->links_map;
209 /* Point links_map to this port specific area */
210 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
212 for (j = 0; j < dev->nb_event_queues; j++) {
213 if (links_map[j] == 0xdead)
219 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
224 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
226 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
227 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
231 deq_tmo_ns = conf->dequeue_timeout_ns;
234 deq_tmo_ns = dev->min_dequeue_timeout_ns;
235 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
236 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
237 plt_err("Unsupported dequeue timeout requested");
241 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
242 dev->is_timeout_deq = 1;
244 dev->deq_tmo_ns = deq_tmo_ns;
246 if (!conf->nb_event_queues || !conf->nb_event_ports ||
247 conf->nb_event_ports > dev->max_event_ports ||
248 conf->nb_event_queues > dev->max_event_queues) {
249 plt_err("Unsupported event queues/ports requested");
253 if (conf->nb_event_port_dequeue_depth > 1) {
254 plt_err("Unsupported event port deq depth requested");
258 if (conf->nb_event_port_enqueue_depth > 1) {
259 plt_err("Unsupported event port enq depth requested");
264 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
266 plt_err("Failed to release XAQ %d", rc);
269 rte_mempool_free(dev->xaq_pool);
270 dev->xaq_pool = NULL;
273 dev->nb_event_queues = conf->nb_event_queues;
274 dev->nb_event_ports = conf->nb_event_ports;
280 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
281 struct rte_event_queue_conf *queue_conf)
283 RTE_SET_USED(event_dev);
284 RTE_SET_USED(queue_id);
286 queue_conf->nb_atomic_flows = (1ULL << 20);
287 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
288 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
289 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
293 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
294 const struct rte_event_queue_conf *queue_conf)
296 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
298 plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
299 /* Normalize <0-255> to <0-7> */
300 return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
301 queue_conf->priority / 32);
305 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
307 RTE_SET_USED(event_dev);
308 RTE_SET_USED(queue_id);
312 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
313 struct rte_event_port_conf *port_conf)
315 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
317 RTE_SET_USED(port_id);
318 port_conf->new_event_threshold = dev->max_num_events;
319 port_conf->dequeue_depth = 1;
320 port_conf->enqueue_depth = 1;
324 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
325 cnxk_sso_hws_setup_t hws_setup_fn)
327 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
328 uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
331 plt_sso_dbg("Port=%d", port_id);
332 if (event_dev->data->ports[port_id] == NULL) {
333 plt_err("Invalid port Id %d", port_id);
337 for (q = 0; q < dev->nb_event_queues; q++) {
338 grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
339 if (grps_base[q] == 0) {
340 plt_err("Failed to get grp[%d] base addr", q);
345 hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
346 plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
353 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
356 RTE_SET_USED(event_dev);
357 *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
363 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
365 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
367 roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
371 cnxk_handle_event(void *arg, struct rte_event event)
373 struct rte_eventdev *event_dev = arg;
375 if (event_dev->dev_ops->dev_stop_flush != NULL)
376 event_dev->dev_ops->dev_stop_flush(
377 event_dev->data->dev_id, event,
378 event_dev->data->dev_stop_flush_arg);
382 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
383 cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
385 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
386 uintptr_t hwgrp_base;
390 for (i = 0; i < dev->nb_event_ports; i++) {
391 ws = event_dev->data->ports[i];
396 ws = event_dev->data->ports[0];
398 for (i = 0; i < dev->nb_event_queues; i++) {
399 /* Consume all the events through HWS0 */
400 hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
401 flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
402 /* Enable/Disable SSO GGRP */
403 plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
408 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
409 cnxk_sso_hws_flush_t flush_fn)
411 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
412 struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
416 for (i = 0; i < dev->qos_queue_cnt; i++) {
417 qos->hwgrp = dev->qos_parse_data[i].queue;
418 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
419 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
420 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
422 rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
425 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
428 cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
435 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
436 cnxk_sso_hws_flush_t flush_fn)
439 cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
444 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
446 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
447 uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
451 if (!dev->configured)
454 for (i = 0; i < dev->nb_event_queues; i++)
457 for (i = 0; i < dev->nb_event_ports; i++) {
458 ws = event_dev->data->ports[i];
459 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
460 rte_free(cnxk_sso_hws_get_cookie(ws));
461 event_dev->data->ports[i] = NULL;
464 roc_sso_rsrc_fini(&dev->sso);
465 rte_mempool_free(dev->xaq_pool);
466 rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
470 dev->xaq_pool = NULL;
471 dev->configured = false;
472 dev->is_timeout_deq = 0;
473 dev->nb_event_ports = 0;
474 dev->max_num_events = -1;
475 dev->nb_event_queues = 0;
476 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
477 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
483 parse_queue_param(char *value, void *opaque)
485 struct cnxk_sso_qos queue_qos = {0};
486 uint8_t *val = (uint8_t *)&queue_qos;
487 struct cnxk_sso_evdev *dev = opaque;
488 char *tok = strtok(value, "-");
489 struct cnxk_sso_qos *old_ptr;
494 while (tok != NULL) {
496 tok = strtok(NULL, "-");
500 if (val != (&queue_qos.iaq_prcnt + 1)) {
501 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
505 dev->qos_queue_cnt++;
506 old_ptr = dev->qos_parse_data;
507 dev->qos_parse_data = rte_realloc(
509 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
510 if (dev->qos_parse_data == NULL) {
511 dev->qos_parse_data = old_ptr;
512 dev->qos_queue_cnt--;
515 dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
519 parse_qos_list(const char *value, void *opaque)
521 char *s = strdup(value);
532 if (start && start < end) {
534 parse_queue_param(start + 1, opaque);
545 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
549 /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
550 * isn't allowed. Everything is expressed in percentages, 0 represents
553 parse_qos_list(value, opaque);
559 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
561 struct rte_kvargs *kvlist;
562 uint8_t single_ws = 0;
566 kvlist = rte_kvargs_parse(devargs->args, NULL);
570 rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
572 rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
574 rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_value,
576 rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value,
578 dev->dual_ws = !single_ws;
579 rte_kvargs_free(kvlist);
583 cnxk_sso_init(struct rte_eventdev *event_dev)
585 const struct rte_memzone *mz = NULL;
586 struct rte_pci_device *pci_dev;
587 struct cnxk_sso_evdev *dev;
590 mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
593 plt_err("Failed to create eventdev memzone");
597 dev = cnxk_sso_pmd_priv(event_dev);
598 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
599 dev->sso.pci_dev = pci_dev;
601 *(uint64_t *)mz->addr = (uint64_t)dev;
602 cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
604 /* Initialize the base cnxk_dev object */
605 rc = roc_sso_dev_init(&dev->sso);
607 plt_err("Failed to initialize RoC SSO rc=%d", rc);
611 dev->is_timeout_deq = 0;
612 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
613 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
614 dev->max_num_events = -1;
615 dev->nb_event_queues = 0;
616 dev->nb_event_ports = 0;
618 cnxk_tim_init(&dev->sso);
623 rte_memzone_free(mz);
628 cnxk_sso_fini(struct rte_eventdev *event_dev)
630 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
632 /* For secondary processes, nothing to be done */
633 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
637 roc_sso_rsrc_fini(&dev->sso);
638 roc_sso_dev_fini(&dev->sso);
644 cnxk_sso_remove(struct rte_pci_device *pci_dev)
646 return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);