1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_kvargs.h>
12 #include <rte_mbuf_pool_ops.h>
15 #include "otx2_evdev.h"
18 otx2_sso_info_get(struct rte_eventdev *event_dev,
19 struct rte_event_dev_info *dev_info)
21 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
23 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
24 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
25 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
26 dev_info->max_event_queues = dev->max_event_queues;
27 dev_info->max_event_queue_flows = (1ULL << 20);
28 dev_info->max_event_queue_priority_levels = 8;
29 dev_info->max_event_priority_levels = 1;
30 dev_info->max_event_ports = dev->max_event_ports;
31 dev_info->max_event_port_dequeue_depth = 1;
32 dev_info->max_event_port_enqueue_depth = 1;
33 dev_info->max_num_events = dev->max_num_events;
34 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
35 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
36 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
37 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
38 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
39 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
43 sso_port_link_modify(struct otx2_ssogws *ws, uint8_t queue, uint8_t enable)
45 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
49 val |= 0ULL << 12; /* SET 0 */
50 val |= 0x8000800080000000; /* Dont modify rest of the masks */
51 val |= (uint64_t)enable << 14; /* Enable/Disable Membership. */
53 otx2_write64(val, base + SSOW_LF_GWS_GRPMSK_CHG);
57 otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
58 const uint8_t queues[], const uint8_t priorities[],
64 RTE_SET_USED(event_dev);
65 RTE_SET_USED(priorities);
66 for (link = 0; link < nb_links; link++) {
67 struct otx2_ssogws *ws = port;
70 sso_port_link_modify(ws, queues[link], true);
72 sso_func_trace("Port=%d nb_links=%d", port_id, nb_links);
78 otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
79 uint8_t queues[], uint16_t nb_unlinks)
84 RTE_SET_USED(event_dev);
85 for (unlink = 0; unlink < nb_unlinks; unlink++) {
86 struct otx2_ssogws *ws = port;
89 sso_port_link_modify(ws, queues[unlink], false);
91 sso_func_trace("Port=%d nb_unlinks=%d", port_id, nb_unlinks);
93 return (int)nb_unlinks;
97 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
98 uint16_t nb_lf, uint8_t attach)
101 struct rsrc_attach_req *req;
103 req = otx2_mbox_alloc_msg_attach_resources(mbox);
115 if (otx2_mbox_process(mbox) < 0)
118 struct rsrc_detach_req *req;
120 req = otx2_mbox_alloc_msg_detach_resources(mbox);
132 if (otx2_mbox_process(mbox) < 0)
140 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
141 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
150 struct sso_lf_alloc_req *req_ggrp;
151 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
152 req_ggrp->hwgrps = nb_lf;
157 struct ssow_lf_alloc_req *req_hws;
158 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
159 req_hws->hws = nb_lf;
169 struct sso_lf_free_req *req_ggrp;
170 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
171 req_ggrp->hwgrps = nb_lf;
176 struct ssow_lf_free_req *req_hws;
177 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
178 req_hws->hws = nb_lf;
186 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
190 if (alloc && type == SSO_LF_GGRP) {
191 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
193 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
194 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
195 dev->iue = rsp_ggrp->in_unit_entries;
202 otx2_sso_port_release(void *port)
208 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
210 RTE_SET_USED(event_dev);
211 RTE_SET_USED(queue_id);
215 sso_clr_links(const struct rte_eventdev *event_dev)
217 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
220 for (i = 0; i < dev->nb_event_ports; i++) {
221 struct otx2_ssogws *ws;
223 ws = event_dev->data->ports[i];
224 for (j = 0; j < dev->nb_event_queues; j++)
225 sso_port_link_modify(ws, j, false);
230 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
232 ws->tag_op = base + SSOW_LF_GWS_TAG;
233 ws->wqp_op = base + SSOW_LF_GWS_WQP;
234 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK;
235 ws->swtp_op = base + SSOW_LF_GWS_SWTP;
236 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
237 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
241 sso_configure_ports(const struct rte_eventdev *event_dev)
243 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
244 struct otx2_mbox *mbox = dev->mbox;
248 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
250 nb_lf = dev->nb_event_ports;
251 /* Ask AF to attach required LFs. */
252 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
254 otx2_err("Failed to attach SSO GWS LF");
258 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
259 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
260 otx2_err("Failed to init SSO GWS LF");
264 for (i = 0; i < nb_lf; i++) {
265 struct otx2_ssogws *ws;
268 /* Free memory prior to re-allocation if needed */
269 if (event_dev->data->ports[i] != NULL) {
270 ws = event_dev->data->ports[i];
275 /* Allocate event port memory */
276 ws = rte_zmalloc_socket("otx2_sso_ws",
277 sizeof(struct otx2_ssogws),
279 event_dev->data->socket_id);
281 otx2_err("Failed to alloc memory for port=%d", i);
287 base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
288 sso_set_port_ops(ws, base);
290 event_dev->data->ports[i] = ws;
294 sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
295 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
302 sso_configure_queues(const struct rte_eventdev *event_dev)
304 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
305 struct otx2_mbox *mbox = dev->mbox;
309 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
311 nb_lf = dev->nb_event_queues;
312 /* Ask AF to attach required LFs. */
313 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
315 otx2_err("Failed to attach SSO GGRP LF");
319 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
320 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
321 otx2_err("Failed to init SSO GGRP LF");
329 sso_xaq_allocate(struct otx2_sso_evdev *dev)
331 const struct rte_memzone *mz;
332 struct npa_aura_s *aura;
333 static int reconfig_cnt;
334 char pool_name[RTE_MEMZONE_NAMESIZE];
339 rte_mempool_free(dev->xaq_pool);
342 * Allocate memory for Add work backpressure.
344 mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
346 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
348 sizeof(struct npa_aura_s),
350 RTE_MEMZONE_IOVA_CONTIG,
353 otx2_err("Failed to allocate mem for fcmem");
357 dev->fc_iova = mz->iova;
358 dev->fc_mem = mz->addr;
360 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
361 memset(aura, 0, sizeof(struct npa_aura_s));
364 aura->fc_addr = dev->fc_iova;
365 aura->fc_hyst_bits = 0; /* Store count on all updates */
367 /* Taken from HRM 14.3.3(4) */
368 xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
370 xaq_cnt += dev->xae_cnt / dev->xae_waes;
372 xaq_cnt += (dev->iue / dev->xae_waes) +
373 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
375 otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
376 /* Setup XAQ based on number of nb queues. */
377 snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
378 dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
379 xaq_cnt, dev->xaq_buf_size, 0, 0,
382 if (dev->xaq_pool == NULL) {
383 otx2_err("Unable to create empty mempool.");
384 rte_memzone_free(mz);
388 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
389 rte_mbuf_platform_mempool_ops(), aura);
391 otx2_err("Unable to set xaqpool ops.");
395 rc = rte_mempool_populate_default(dev->xaq_pool);
397 otx2_err("Unable to set populate xaqpool.");
401 /* When SW does addwork (enqueue) check if there is space in XAQ by
402 * comparing fc_addr above against the xaq_lmt calculated below.
403 * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
404 * to request XAQ to cache them even before enqueue is called.
406 dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
407 dev->nb_event_queues);
408 dev->nb_xaq_cfg = xaq_cnt;
412 rte_mempool_free(dev->xaq_pool);
413 rte_memzone_free(mz);
418 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
420 struct otx2_mbox *mbox = dev->mbox;
421 struct sso_hw_setconfig *req;
423 otx2_sso_dbg("Configuring XAQ for GGRPs");
424 req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
425 req->npa_pf_func = otx2_npa_pf_func_get();
426 req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
427 req->hwgrps = dev->nb_event_queues;
429 return otx2_mbox_process(mbox);
433 sso_lf_teardown(struct otx2_sso_evdev *dev,
434 enum otx2_sso_lf_type lf_type)
440 nb_lf = dev->nb_event_queues;
443 nb_lf = dev->nb_event_ports;
449 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
450 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
454 otx2_sso_configure(const struct rte_eventdev *event_dev)
456 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
457 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
462 deq_tmo_ns = conf->dequeue_timeout_ns;
465 deq_tmo_ns = dev->min_dequeue_timeout_ns;
467 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
468 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
469 otx2_err("Unsupported dequeue timeout requested");
473 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
474 dev->is_timeout_deq = 1;
476 dev->deq_tmo_ns = deq_tmo_ns;
478 if (conf->nb_event_ports > dev->max_event_ports ||
479 conf->nb_event_queues > dev->max_event_queues) {
480 otx2_err("Unsupported event queues/ports requested");
484 if (conf->nb_event_port_dequeue_depth > 1) {
485 otx2_err("Unsupported event port deq depth requested");
489 if (conf->nb_event_port_enqueue_depth > 1) {
490 otx2_err("Unsupported event port enq depth requested");
494 if (dev->nb_event_queues) {
495 /* Finit any previous queues. */
496 sso_lf_teardown(dev, SSO_LF_GGRP);
498 if (dev->nb_event_ports) {
499 /* Finit any previous ports. */
500 sso_lf_teardown(dev, SSO_LF_GWS);
503 dev->nb_event_queues = conf->nb_event_queues;
504 dev->nb_event_ports = conf->nb_event_ports;
506 if (sso_configure_ports(event_dev)) {
507 otx2_err("Failed to configure event ports");
511 if (sso_configure_queues(event_dev) < 0) {
512 otx2_err("Failed to configure event queues");
517 if (sso_xaq_allocate(dev) < 0) {
519 goto teardown_hwggrp;
522 /* Clear any prior port-queue mapping. */
523 sso_clr_links(event_dev);
524 rc = sso_ggrp_alloc_xaq(dev);
526 otx2_err("Failed to alloc xaq to ggrp %d", rc);
527 goto teardown_hwggrp;
535 sso_lf_teardown(dev, SSO_LF_GGRP);
537 sso_lf_teardown(dev, SSO_LF_GWS);
538 dev->nb_event_queues = 0;
539 dev->nb_event_ports = 0;
545 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
546 struct rte_event_queue_conf *queue_conf)
548 RTE_SET_USED(event_dev);
549 RTE_SET_USED(queue_id);
551 queue_conf->nb_atomic_flows = (1ULL << 20);
552 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
553 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
554 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
558 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
559 const struct rte_event_queue_conf *queue_conf)
561 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
562 struct otx2_mbox *mbox = dev->mbox;
563 struct sso_grp_priority *req;
566 sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
568 req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
571 req->affinity = 0xFF;
572 /* Normalize <0-255> to <0-7> */
573 req->priority = queue_conf->priority / 32;
575 rc = otx2_mbox_process(mbox);
577 otx2_err("Failed to set priority queue=%d", queue_id);
585 otx2_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
586 struct rte_event_port_conf *port_conf)
588 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
590 RTE_SET_USED(port_id);
591 port_conf->new_event_threshold = dev->max_num_events;
592 port_conf->dequeue_depth = 1;
593 port_conf->enqueue_depth = 1;
597 otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
598 const struct rte_event_port_conf *port_conf)
600 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
601 uintptr_t grps_base[OTX2_SSO_MAX_VHGRP] = {0};
605 sso_func_trace("Port=%d", port_id);
606 RTE_SET_USED(port_conf);
608 if (event_dev->data->ports[port_id] == NULL) {
609 otx2_err("Invalid port Id %d", port_id);
613 for (q = 0; q < dev->nb_event_queues; q++) {
614 grps_base[q] = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 | q << 12);
615 if (grps_base[q] == 0) {
616 otx2_err("Failed to get grp[%d] base addr", q);
621 /* Set get_work timeout for HWS */
622 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
624 struct otx2_ssogws *ws = event_dev->data->ports[port_id];
625 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
627 rte_memcpy(ws->grps_base, grps_base,
628 sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
629 ws->fc_mem = dev->fc_mem;
630 ws->xaq_lmt = dev->xaq_lmt;
631 otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
633 otx2_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
639 otx2_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
642 RTE_SET_USED(event_dev);
643 *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
648 /* Initialize and register event driver with DPDK Application */
649 static struct rte_eventdev_ops otx2_sso_ops = {
650 .dev_infos_get = otx2_sso_info_get,
651 .dev_configure = otx2_sso_configure,
652 .queue_def_conf = otx2_sso_queue_def_conf,
653 .queue_setup = otx2_sso_queue_setup,
654 .queue_release = otx2_sso_queue_release,
655 .port_def_conf = otx2_sso_port_def_conf,
656 .port_setup = otx2_sso_port_setup,
657 .port_release = otx2_sso_port_release,
658 .port_link = otx2_sso_port_link,
659 .port_unlink = otx2_sso_port_unlink,
660 .timeout_ticks = otx2_sso_timeout_ticks,
663 #define OTX2_SSO_XAE_CNT "xae_cnt"
666 sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
668 struct rte_kvargs *kvlist;
672 kvlist = rte_kvargs_parse(devargs->args, NULL);
676 rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
679 rte_kvargs_free(kvlist);
683 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
685 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
686 sizeof(struct otx2_sso_evdev),
691 otx2_sso_remove(struct rte_pci_device *pci_dev)
693 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
696 static const struct rte_pci_id pci_sso_map[] = {
698 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
699 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
706 static struct rte_pci_driver pci_sso = {
707 .id_table = pci_sso_map,
708 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
709 .probe = otx2_sso_probe,
710 .remove = otx2_sso_remove,
714 otx2_sso_init(struct rte_eventdev *event_dev)
716 struct free_rsrcs_rsp *rsrc_cnt;
717 struct rte_pci_device *pci_dev;
718 struct otx2_sso_evdev *dev;
721 event_dev->dev_ops = &otx2_sso_ops;
722 /* For secondary processes, the primary has done all the work */
723 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
726 dev = sso_pmd_priv(event_dev);
728 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
730 /* Initialize the base otx2_dev object */
731 rc = otx2_dev_init(pci_dev, dev);
733 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
737 /* Get SSO and SSOW MSIX rsrc cnt */
738 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
739 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
741 otx2_err("Unable to get free rsrc count");
742 goto otx2_dev_uninit;
744 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
745 rsrc_cnt->ssow, rsrc_cnt->npa);
747 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
748 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
749 /* Grab the NPA LF if required */
750 rc = otx2_npa_lf_init(pci_dev, dev);
752 otx2_err("Unable to init NPA lf. It might not be provisioned");
753 goto otx2_dev_uninit;
756 dev->drv_inited = true;
757 dev->is_timeout_deq = 0;
758 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
759 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
760 dev->max_num_events = -1;
761 dev->nb_event_queues = 0;
762 dev->nb_event_ports = 0;
764 if (!dev->max_event_ports || !dev->max_event_queues) {
765 otx2_err("Not enough eventdev resource queues=%d ports=%d",
766 dev->max_event_queues, dev->max_event_ports);
768 goto otx2_npa_lf_uninit;
771 sso_parse_devargs(dev, pci_dev->device.devargs);
773 otx2_sso_pf_func_set(dev->pf_func);
774 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
775 event_dev->data->name, dev->max_event_queues,
776 dev->max_event_ports);
784 otx2_dev_fini(pci_dev, dev);
790 otx2_sso_fini(struct rte_eventdev *event_dev)
792 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
793 struct rte_pci_device *pci_dev;
795 /* For secondary processes, nothing to be done */
796 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
799 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
801 if (!dev->drv_inited)
804 dev->drv_inited = false;
808 if (otx2_npa_lf_active(dev)) {
809 otx2_info("Common resource in use by other devices");
813 otx2_dev_fini(pci_dev, dev);
818 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
819 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
820 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
821 RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>");