1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
13 #include "otx2_evdev.h"
16 otx2_sso_info_get(struct rte_eventdev *event_dev,
17 struct rte_event_dev_info *dev_info)
19 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
21 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
22 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
23 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
24 dev_info->max_event_queues = dev->max_event_queues;
25 dev_info->max_event_queue_flows = (1ULL << 20);
26 dev_info->max_event_queue_priority_levels = 8;
27 dev_info->max_event_priority_levels = 1;
28 dev_info->max_event_ports = dev->max_event_ports;
29 dev_info->max_event_port_dequeue_depth = 1;
30 dev_info->max_event_port_enqueue_depth = 1;
31 dev_info->max_num_events = dev->max_num_events;
32 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
33 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
34 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
35 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
36 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
37 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
41 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
42 uint16_t nb_lf, uint8_t attach)
45 struct rsrc_attach_req *req;
47 req = otx2_mbox_alloc_msg_attach_resources(mbox);
59 if (otx2_mbox_process(mbox) < 0)
62 struct rsrc_detach_req *req;
64 req = otx2_mbox_alloc_msg_detach_resources(mbox);
76 if (otx2_mbox_process(mbox) < 0)
84 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
85 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
94 struct sso_lf_alloc_req *req_ggrp;
95 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
96 req_ggrp->hwgrps = nb_lf;
101 struct ssow_lf_alloc_req *req_hws;
102 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
103 req_hws->hws = nb_lf;
113 struct sso_lf_free_req *req_ggrp;
114 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
115 req_ggrp->hwgrps = nb_lf;
120 struct ssow_lf_free_req *req_hws;
121 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
122 req_hws->hws = nb_lf;
130 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
134 if (alloc && type == SSO_LF_GGRP) {
135 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
137 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
138 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
139 dev->iue = rsp_ggrp->in_unit_entries;
146 sso_configure_ports(const struct rte_eventdev *event_dev)
148 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
149 struct otx2_mbox *mbox = dev->mbox;
153 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
155 nb_lf = dev->nb_event_ports;
156 /* Ask AF to attach required LFs. */
157 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
159 otx2_err("Failed to attach SSO GWS LF");
163 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
164 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
165 otx2_err("Failed to init SSO GWS LF");
173 sso_configure_queues(const struct rte_eventdev *event_dev)
175 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
176 struct otx2_mbox *mbox = dev->mbox;
180 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
182 nb_lf = dev->nb_event_queues;
183 /* Ask AF to attach required LFs. */
184 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
186 otx2_err("Failed to attach SSO GGRP LF");
190 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
191 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
192 otx2_err("Failed to init SSO GGRP LF");
200 sso_lf_teardown(struct otx2_sso_evdev *dev,
201 enum otx2_sso_lf_type lf_type)
207 nb_lf = dev->nb_event_queues;
210 nb_lf = dev->nb_event_ports;
216 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
217 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
221 otx2_sso_configure(const struct rte_eventdev *event_dev)
223 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
224 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
229 deq_tmo_ns = conf->dequeue_timeout_ns;
232 deq_tmo_ns = dev->min_dequeue_timeout_ns;
234 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
235 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
236 otx2_err("Unsupported dequeue timeout requested");
240 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
241 dev->is_timeout_deq = 1;
243 dev->deq_tmo_ns = deq_tmo_ns;
245 if (conf->nb_event_ports > dev->max_event_ports ||
246 conf->nb_event_queues > dev->max_event_queues) {
247 otx2_err("Unsupported event queues/ports requested");
251 if (conf->nb_event_port_dequeue_depth > 1) {
252 otx2_err("Unsupported event port deq depth requested");
256 if (conf->nb_event_port_enqueue_depth > 1) {
257 otx2_err("Unsupported event port enq depth requested");
261 if (dev->nb_event_queues) {
262 /* Finit any previous queues. */
263 sso_lf_teardown(dev, SSO_LF_GGRP);
265 if (dev->nb_event_ports) {
266 /* Finit any previous ports. */
267 sso_lf_teardown(dev, SSO_LF_GWS);
270 dev->nb_event_queues = conf->nb_event_queues;
271 dev->nb_event_ports = conf->nb_event_ports;
273 if (sso_configure_ports(event_dev)) {
274 otx2_err("Failed to configure event ports");
278 if (sso_configure_queues(event_dev) < 0) {
279 otx2_err("Failed to configure event queues");
290 sso_lf_teardown(dev, SSO_LF_GWS);
291 dev->nb_event_queues = 0;
292 dev->nb_event_ports = 0;
297 /* Initialize and register event driver with DPDK Application */
298 static struct rte_eventdev_ops otx2_sso_ops = {
299 .dev_infos_get = otx2_sso_info_get,
300 .dev_configure = otx2_sso_configure,
304 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
306 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
307 sizeof(struct otx2_sso_evdev),
312 otx2_sso_remove(struct rte_pci_device *pci_dev)
314 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
317 static const struct rte_pci_id pci_sso_map[] = {
319 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
320 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
327 static struct rte_pci_driver pci_sso = {
328 .id_table = pci_sso_map,
329 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
330 .probe = otx2_sso_probe,
331 .remove = otx2_sso_remove,
335 otx2_sso_init(struct rte_eventdev *event_dev)
337 struct free_rsrcs_rsp *rsrc_cnt;
338 struct rte_pci_device *pci_dev;
339 struct otx2_sso_evdev *dev;
342 event_dev->dev_ops = &otx2_sso_ops;
343 /* For secondary processes, the primary has done all the work */
344 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
347 dev = sso_pmd_priv(event_dev);
349 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
351 /* Initialize the base otx2_dev object */
352 rc = otx2_dev_init(pci_dev, dev);
354 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
358 /* Get SSO and SSOW MSIX rsrc cnt */
359 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
360 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
362 otx2_err("Unable to get free rsrc count");
363 goto otx2_dev_uninit;
365 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
366 rsrc_cnt->ssow, rsrc_cnt->npa);
368 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
369 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
370 /* Grab the NPA LF if required */
371 rc = otx2_npa_lf_init(pci_dev, dev);
373 otx2_err("Unable to init NPA lf. It might not be provisioned");
374 goto otx2_dev_uninit;
377 dev->drv_inited = true;
378 dev->is_timeout_deq = 0;
379 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
380 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
381 dev->max_num_events = -1;
382 dev->nb_event_queues = 0;
383 dev->nb_event_ports = 0;
385 if (!dev->max_event_ports || !dev->max_event_queues) {
386 otx2_err("Not enough eventdev resource queues=%d ports=%d",
387 dev->max_event_queues, dev->max_event_ports);
389 goto otx2_npa_lf_uninit;
392 otx2_sso_pf_func_set(dev->pf_func);
393 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
394 event_dev->data->name, dev->max_event_queues,
395 dev->max_event_ports);
403 otx2_dev_fini(pci_dev, dev);
409 otx2_sso_fini(struct rte_eventdev *event_dev)
411 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
412 struct rte_pci_device *pci_dev;
414 /* For secondary processes, nothing to be done */
415 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
418 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
420 if (!dev->drv_inited)
423 dev->drv_inited = false;
427 if (otx2_npa_lf_active(dev)) {
428 otx2_info("Common resource in use by other devices");
432 otx2_dev_fini(pci_dev, dev);
437 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
438 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
439 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");