1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
13 #include "otx2_evdev.h"
16 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
18 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
19 sizeof(struct otx2_sso_evdev),
24 otx2_sso_remove(struct rte_pci_device *pci_dev)
26 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
29 static const struct rte_pci_id pci_sso_map[] = {
31 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
32 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
39 static struct rte_pci_driver pci_sso = {
40 .id_table = pci_sso_map,
41 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
42 .probe = otx2_sso_probe,
43 .remove = otx2_sso_remove,
47 otx2_sso_init(struct rte_eventdev *event_dev)
49 struct free_rsrcs_rsp *rsrc_cnt;
50 struct rte_pci_device *pci_dev;
51 struct otx2_sso_evdev *dev;
54 /* For secondary processes, the primary has done all the work */
55 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
58 dev = sso_pmd_priv(event_dev);
60 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
62 /* Initialize the base otx2_dev object */
63 rc = otx2_dev_init(pci_dev, dev);
65 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
69 /* Get SSO and SSOW MSIX rsrc cnt */
70 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
71 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
73 otx2_err("Unable to get free rsrc count");
76 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
77 rsrc_cnt->ssow, rsrc_cnt->npa);
79 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
80 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
81 /* Grab the NPA LF if required */
82 rc = otx2_npa_lf_init(pci_dev, dev);
84 otx2_err("Unable to init NPA lf. It might not be provisioned");
88 dev->drv_inited = true;
89 dev->is_timeout_deq = 0;
90 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
91 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
92 dev->max_num_events = -1;
93 dev->nb_event_queues = 0;
94 dev->nb_event_ports = 0;
96 if (!dev->max_event_ports || !dev->max_event_queues) {
97 otx2_err("Not enough eventdev resource queues=%d ports=%d",
98 dev->max_event_queues, dev->max_event_ports);
100 goto otx2_npa_lf_uninit;
103 otx2_sso_pf_func_set(dev->pf_func);
104 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
105 event_dev->data->name, dev->max_event_queues,
106 dev->max_event_ports);
114 otx2_dev_fini(pci_dev, dev);
120 otx2_sso_fini(struct rte_eventdev *event_dev)
122 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
123 struct rte_pci_device *pci_dev;
125 /* For secondary processes, nothing to be done */
126 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
129 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
131 if (!dev->drv_inited)
134 dev->drv_inited = false;
138 if (otx2_npa_lf_active(dev)) {
139 otx2_info("Common resource in use by other devices");
143 otx2_dev_fini(pci_dev, dev);
148 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
149 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
150 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");