1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
13 #include "otx2_evdev.h"
16 otx2_sso_info_get(struct rte_eventdev *event_dev,
17 struct rte_event_dev_info *dev_info)
19 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
21 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
22 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
23 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
24 dev_info->max_event_queues = dev->max_event_queues;
25 dev_info->max_event_queue_flows = (1ULL << 20);
26 dev_info->max_event_queue_priority_levels = 8;
27 dev_info->max_event_priority_levels = 1;
28 dev_info->max_event_ports = dev->max_event_ports;
29 dev_info->max_event_port_dequeue_depth = 1;
30 dev_info->max_event_port_enqueue_depth = 1;
31 dev_info->max_num_events = dev->max_num_events;
32 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
33 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
34 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
35 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
36 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
37 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
41 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
42 uint16_t nb_lf, uint8_t attach)
45 struct rsrc_attach_req *req;
47 req = otx2_mbox_alloc_msg_attach_resources(mbox);
59 if (otx2_mbox_process(mbox) < 0)
62 struct rsrc_detach_req *req;
64 req = otx2_mbox_alloc_msg_detach_resources(mbox);
76 if (otx2_mbox_process(mbox) < 0)
84 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
85 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
94 struct sso_lf_alloc_req *req_ggrp;
95 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
96 req_ggrp->hwgrps = nb_lf;
101 struct ssow_lf_alloc_req *req_hws;
102 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
103 req_hws->hws = nb_lf;
113 struct sso_lf_free_req *req_ggrp;
114 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
115 req_ggrp->hwgrps = nb_lf;
120 struct ssow_lf_free_req *req_hws;
121 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
122 req_hws->hws = nb_lf;
130 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
134 if (alloc && type == SSO_LF_GGRP) {
135 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
137 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
138 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
139 dev->iue = rsp_ggrp->in_unit_entries;
146 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
148 RTE_SET_USED(event_dev);
149 RTE_SET_USED(queue_id);
153 sso_configure_ports(const struct rte_eventdev *event_dev)
155 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
156 struct otx2_mbox *mbox = dev->mbox;
160 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
162 nb_lf = dev->nb_event_ports;
163 /* Ask AF to attach required LFs. */
164 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
166 otx2_err("Failed to attach SSO GWS LF");
170 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
171 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
172 otx2_err("Failed to init SSO GWS LF");
180 sso_configure_queues(const struct rte_eventdev *event_dev)
182 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
183 struct otx2_mbox *mbox = dev->mbox;
187 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
189 nb_lf = dev->nb_event_queues;
190 /* Ask AF to attach required LFs. */
191 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
193 otx2_err("Failed to attach SSO GGRP LF");
197 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
198 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
199 otx2_err("Failed to init SSO GGRP LF");
207 sso_lf_teardown(struct otx2_sso_evdev *dev,
208 enum otx2_sso_lf_type lf_type)
214 nb_lf = dev->nb_event_queues;
217 nb_lf = dev->nb_event_ports;
223 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
224 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
228 otx2_sso_configure(const struct rte_eventdev *event_dev)
230 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
231 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
236 deq_tmo_ns = conf->dequeue_timeout_ns;
239 deq_tmo_ns = dev->min_dequeue_timeout_ns;
241 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
242 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
243 otx2_err("Unsupported dequeue timeout requested");
247 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
248 dev->is_timeout_deq = 1;
250 dev->deq_tmo_ns = deq_tmo_ns;
252 if (conf->nb_event_ports > dev->max_event_ports ||
253 conf->nb_event_queues > dev->max_event_queues) {
254 otx2_err("Unsupported event queues/ports requested");
258 if (conf->nb_event_port_dequeue_depth > 1) {
259 otx2_err("Unsupported event port deq depth requested");
263 if (conf->nb_event_port_enqueue_depth > 1) {
264 otx2_err("Unsupported event port enq depth requested");
268 if (dev->nb_event_queues) {
269 /* Finit any previous queues. */
270 sso_lf_teardown(dev, SSO_LF_GGRP);
272 if (dev->nb_event_ports) {
273 /* Finit any previous ports. */
274 sso_lf_teardown(dev, SSO_LF_GWS);
277 dev->nb_event_queues = conf->nb_event_queues;
278 dev->nb_event_ports = conf->nb_event_ports;
280 if (sso_configure_ports(event_dev)) {
281 otx2_err("Failed to configure event ports");
285 if (sso_configure_queues(event_dev) < 0) {
286 otx2_err("Failed to configure event queues");
297 sso_lf_teardown(dev, SSO_LF_GWS);
298 dev->nb_event_queues = 0;
299 dev->nb_event_ports = 0;
305 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
306 struct rte_event_queue_conf *queue_conf)
308 RTE_SET_USED(event_dev);
309 RTE_SET_USED(queue_id);
311 queue_conf->nb_atomic_flows = (1ULL << 20);
312 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
313 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
314 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
318 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
319 const struct rte_event_queue_conf *queue_conf)
321 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
322 struct otx2_mbox *mbox = dev->mbox;
323 struct sso_grp_priority *req;
326 sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
328 req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
331 req->affinity = 0xFF;
332 /* Normalize <0-255> to <0-7> */
333 req->priority = queue_conf->priority / 32;
335 rc = otx2_mbox_process(mbox);
337 otx2_err("Failed to set priority queue=%d", queue_id);
344 /* Initialize and register event driver with DPDK Application */
345 static struct rte_eventdev_ops otx2_sso_ops = {
346 .dev_infos_get = otx2_sso_info_get,
347 .dev_configure = otx2_sso_configure,
348 .queue_def_conf = otx2_sso_queue_def_conf,
349 .queue_setup = otx2_sso_queue_setup,
350 .queue_release = otx2_sso_queue_release,
354 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
356 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
357 sizeof(struct otx2_sso_evdev),
362 otx2_sso_remove(struct rte_pci_device *pci_dev)
364 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
367 static const struct rte_pci_id pci_sso_map[] = {
369 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
370 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
377 static struct rte_pci_driver pci_sso = {
378 .id_table = pci_sso_map,
379 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
380 .probe = otx2_sso_probe,
381 .remove = otx2_sso_remove,
385 otx2_sso_init(struct rte_eventdev *event_dev)
387 struct free_rsrcs_rsp *rsrc_cnt;
388 struct rte_pci_device *pci_dev;
389 struct otx2_sso_evdev *dev;
392 event_dev->dev_ops = &otx2_sso_ops;
393 /* For secondary processes, the primary has done all the work */
394 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
397 dev = sso_pmd_priv(event_dev);
399 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
401 /* Initialize the base otx2_dev object */
402 rc = otx2_dev_init(pci_dev, dev);
404 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
408 /* Get SSO and SSOW MSIX rsrc cnt */
409 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
410 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
412 otx2_err("Unable to get free rsrc count");
413 goto otx2_dev_uninit;
415 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
416 rsrc_cnt->ssow, rsrc_cnt->npa);
418 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
419 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
420 /* Grab the NPA LF if required */
421 rc = otx2_npa_lf_init(pci_dev, dev);
423 otx2_err("Unable to init NPA lf. It might not be provisioned");
424 goto otx2_dev_uninit;
427 dev->drv_inited = true;
428 dev->is_timeout_deq = 0;
429 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
430 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
431 dev->max_num_events = -1;
432 dev->nb_event_queues = 0;
433 dev->nb_event_ports = 0;
435 if (!dev->max_event_ports || !dev->max_event_queues) {
436 otx2_err("Not enough eventdev resource queues=%d ports=%d",
437 dev->max_event_queues, dev->max_event_ports);
439 goto otx2_npa_lf_uninit;
442 otx2_sso_pf_func_set(dev->pf_func);
443 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
444 event_dev->data->name, dev->max_event_queues,
445 dev->max_event_ports);
453 otx2_dev_fini(pci_dev, dev);
459 otx2_sso_fini(struct rte_eventdev *event_dev)
461 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
462 struct rte_pci_device *pci_dev;
464 /* For secondary processes, nothing to be done */
465 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
468 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
470 if (!dev->drv_inited)
473 dev->drv_inited = false;
477 if (otx2_npa_lf_active(dev)) {
478 otx2_info("Common resource in use by other devices");
482 otx2_dev_fini(pci_dev, dev);
487 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
488 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
489 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");