event/cnxk: add platform specific device probe
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Tue, 4 May 2021 00:26:55 +0000 (05:56 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 4 May 2021 03:14:24 +0000 (05:14 +0200)
Add platform specific event device probe and remove, also add
event device info get function.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
drivers/event/cnxk/cn10k_eventdev.c [new file with mode: 0644]
drivers/event/cnxk/cn9k_eventdev.c [new file with mode: 0644]
drivers/event/cnxk/cnxk_eventdev.h
drivers/event/cnxk/meson.build

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
new file mode 100644 (file)
index 0000000..1216aca
--- /dev/null
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_eventdev.h"
+
+static void
+cn10k_sso_set_rsrc(void *arg)
+{
+       struct cnxk_sso_evdev *dev = arg;
+
+       dev->max_event_ports = dev->sso.max_hws;
+       dev->max_event_queues =
+               dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
+                             RTE_EVENT_MAX_QUEUES_PER_DEV :
+                             dev->sso.max_hwgrp;
+}
+
+static void
+cn10k_sso_info_get(struct rte_eventdev *event_dev,
+                  struct rte_event_dev_info *dev_info)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+       dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
+       cnxk_sso_info_get(dev, dev_info);
+}
+
+static struct rte_eventdev_ops cn10k_sso_dev_ops = {
+       .dev_infos_get = cn10k_sso_info_get,
+};
+
+static int
+cn10k_sso_init(struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       int rc;
+
+       if (RTE_CACHE_LINE_SIZE != 64) {
+               plt_err("Driver not compiled for CN9K");
+               return -EFAULT;
+       }
+
+       rc = roc_plt_init();
+       if (rc < 0) {
+               plt_err("Failed to initialize platform model");
+               return rc;
+       }
+
+       event_dev->dev_ops = &cn10k_sso_dev_ops;
+       /* For secondary processes, the primary has done all the work */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       rc = cnxk_sso_init(event_dev);
+       if (rc < 0)
+               return rc;
+
+       cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
+       if (!dev->max_event_ports || !dev->max_event_queues) {
+               plt_err("Not enough eventdev resource queues=%d ports=%d",
+                       dev->max_event_queues, dev->max_event_ports);
+               cnxk_sso_fini(event_dev);
+               return -ENODEV;
+       }
+
+       plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
+                   event_dev->data->name, dev->max_event_queues,
+                   dev->max_event_ports);
+
+       return 0;
+}
+
+static int
+cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+       return rte_event_pmd_pci_probe(pci_drv, pci_dev,
+                                      sizeof(struct cnxk_sso_evdev),
+                                      cn10k_sso_init);
+}
+
+static const struct rte_pci_id cn10k_pci_sso_map[] = {
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
+       CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
+       {
+               .vendor_id = 0,
+       },
+};
+
+static struct rte_pci_driver cn10k_pci_sso = {
+       .id_table = cn10k_pci_sso_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+       .probe = cn10k_sso_probe,
+       .remove = cnxk_sso_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
+RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
+RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
new file mode 100644 (file)
index 0000000..988d242
--- /dev/null
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_eventdev.h"
+
+#define CN9K_DUAL_WS_NB_WS         2
+#define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
+
+static void
+cn9k_sso_set_rsrc(void *arg)
+{
+       struct cnxk_sso_evdev *dev = arg;
+
+       if (dev->dual_ws)
+               dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
+       else
+               dev->max_event_ports = dev->sso.max_hws;
+       dev->max_event_queues =
+               dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
+                             RTE_EVENT_MAX_QUEUES_PER_DEV :
+                             dev->sso.max_hwgrp;
+}
+
+static void
+cn9k_sso_info_get(struct rte_eventdev *event_dev,
+                 struct rte_event_dev_info *dev_info)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+       dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
+       cnxk_sso_info_get(dev, dev_info);
+}
+
+static struct rte_eventdev_ops cn9k_sso_dev_ops = {
+       .dev_infos_get = cn9k_sso_info_get,
+};
+
+static int
+cn9k_sso_init(struct rte_eventdev *event_dev)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       int rc;
+
+       if (RTE_CACHE_LINE_SIZE != 128) {
+               plt_err("Driver not compiled for CN9K");
+               return -EFAULT;
+       }
+
+       rc = roc_plt_init();
+       if (rc < 0) {
+               plt_err("Failed to initialize platform model");
+               return rc;
+       }
+
+       event_dev->dev_ops = &cn9k_sso_dev_ops;
+       /* For secondary processes, the primary has done all the work */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       rc = cnxk_sso_init(event_dev);
+       if (rc < 0)
+               return rc;
+
+       cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
+       if (!dev->max_event_ports || !dev->max_event_queues) {
+               plt_err("Not enough eventdev resource queues=%d ports=%d",
+                       dev->max_event_queues, dev->max_event_ports);
+               cnxk_sso_fini(event_dev);
+               return -ENODEV;
+       }
+
+       plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
+                   event_dev->data->name, dev->max_event_queues,
+                   dev->max_event_ports);
+
+       return 0;
+}
+
+static int
+cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+       return rte_event_pmd_pci_probe(
+               pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
+}
+
+static const struct rte_pci_id cn9k_pci_sso_map[] = {
+       {
+               .vendor_id = 0,
+       },
+};
+
+static struct rte_pci_driver cn9k_pci_sso = {
+       .id_table = cn9k_pci_sso_map,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+       .probe = cn9k_sso_probe,
+       .remove = cnxk_sso_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
+RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
+RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
index 9745bfd..6bdf0b3 100644 (file)
@@ -25,6 +25,8 @@ struct cnxk_sso_evdev {
        uint32_t min_dequeue_timeout_ns;
        uint32_t max_dequeue_timeout_ns;
        int32_t max_num_events;
+       /* CN9K */
+       uint8_t dual_ws;
 } __rte_cache_aligned;
 
 static inline struct cnxk_sso_evdev *
index 5754378..1931fd1 100644 (file)
@@ -8,6 +8,10 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
     subdir_done()
 endif
 
-sources = files('cnxk_eventdev.c')
+sources = files(
+        'cn9k_eventdev.c',
+        'cn10k_eventdev.c',
+        'cnxk_eventdev.c',
+)
 
 deps += ['bus_pci', 'common_cnxk']