--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_eventdev.h"
+
+static void
+cn10k_sso_set_rsrc(void *arg)
+{
+ struct cnxk_sso_evdev *dev = arg;
+
+ dev->max_event_ports = dev->sso.max_hws;
+ dev->max_event_queues =
+ dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
+ RTE_EVENT_MAX_QUEUES_PER_DEV :
+ dev->sso.max_hwgrp;
+}
+
+static void
+cn10k_sso_info_get(struct rte_eventdev *event_dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
+ cnxk_sso_info_get(dev, dev_info);
+}
+
+static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+ .dev_infos_get = cn10k_sso_info_get,
+};
+
+static int
+cn10k_sso_init(struct rte_eventdev *event_dev)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int rc;
+
+ if (RTE_CACHE_LINE_SIZE != 64) {
+ plt_err("Driver not compiled for CN9K");
+ return -EFAULT;
+ }
+
+ rc = roc_plt_init();
+ if (rc < 0) {
+ plt_err("Failed to initialize platform model");
+ return rc;
+ }
+
+ event_dev->dev_ops = &cn10k_sso_dev_ops;
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rc = cnxk_sso_init(event_dev);
+ if (rc < 0)
+ return rc;
+
+ cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
+ if (!dev->max_event_ports || !dev->max_event_queues) {
+ plt_err("Not enough eventdev resource queues=%d ports=%d",
+ dev->max_event_queues, dev->max_event_ports);
+ cnxk_sso_fini(event_dev);
+ return -ENODEV;
+ }
+
+ plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
+ event_dev->data->name, dev->max_event_queues,
+ dev->max_event_ports);
+
+ return 0;
+}
+
+static int
+cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+ sizeof(struct cnxk_sso_evdev),
+ cn10k_sso_init);
+}
+
+static const struct rte_pci_id cn10k_pci_sso_map[] = {
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
+ CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver cn10k_pci_sso = {
+ .id_table = cn10k_pci_sso_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = cn10k_sso_probe,
+ .remove = cnxk_sso_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
+RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
+RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_eventdev.h"
+
+#define CN9K_DUAL_WS_NB_WS 2
+#define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
+
+static void
+cn9k_sso_set_rsrc(void *arg)
+{
+ struct cnxk_sso_evdev *dev = arg;
+
+ if (dev->dual_ws)
+ dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
+ else
+ dev->max_event_ports = dev->sso.max_hws;
+ dev->max_event_queues =
+ dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
+ RTE_EVENT_MAX_QUEUES_PER_DEV :
+ dev->sso.max_hwgrp;
+}
+
+static void
+cn9k_sso_info_get(struct rte_eventdev *event_dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+ dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
+ cnxk_sso_info_get(dev, dev_info);
+}
+
+static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+ .dev_infos_get = cn9k_sso_info_get,
+};
+
+static int
+cn9k_sso_init(struct rte_eventdev *event_dev)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int rc;
+
+ if (RTE_CACHE_LINE_SIZE != 128) {
+ plt_err("Driver not compiled for CN9K");
+ return -EFAULT;
+ }
+
+ rc = roc_plt_init();
+ if (rc < 0) {
+ plt_err("Failed to initialize platform model");
+ return rc;
+ }
+
+ event_dev->dev_ops = &cn9k_sso_dev_ops;
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rc = cnxk_sso_init(event_dev);
+ if (rc < 0)
+ return rc;
+
+ cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
+ if (!dev->max_event_ports || !dev->max_event_queues) {
+ plt_err("Not enough eventdev resource queues=%d ports=%d",
+ dev->max_event_queues, dev->max_event_ports);
+ cnxk_sso_fini(event_dev);
+ return -ENODEV;
+ }
+
+ plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
+ event_dev->data->name, dev->max_event_queues,
+ dev->max_event_ports);
+
+ return 0;
+}
+
+static int
+cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ return rte_event_pmd_pci_probe(
+ pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
+}
+
+static const struct rte_pci_id cn9k_pci_sso_map[] = {
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver cn9k_pci_sso = {
+ .id_table = cn9k_pci_sso_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = cn9k_sso_probe,
+ .remove = cnxk_sso_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
+RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
+RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");