1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_bus_pci.h>
8 #include <rte_common.h>
10 #include <rte_eventdev_pmd_pci.h>
11 #include <rte_mbuf_pool_ops.h>
14 #include "otx2_evdev.h"
17 otx2_sso_info_get(struct rte_eventdev *event_dev,
18 struct rte_event_dev_info *dev_info)
20 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
22 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX2_PMD);
23 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
24 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
25 dev_info->max_event_queues = dev->max_event_queues;
26 dev_info->max_event_queue_flows = (1ULL << 20);
27 dev_info->max_event_queue_priority_levels = 8;
28 dev_info->max_event_priority_levels = 1;
29 dev_info->max_event_ports = dev->max_event_ports;
30 dev_info->max_event_port_dequeue_depth = 1;
31 dev_info->max_event_port_enqueue_depth = 1;
32 dev_info->max_num_events = dev->max_num_events;
33 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
34 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
35 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
36 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
37 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
38 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
42 sso_hw_lf_cfg(struct otx2_mbox *mbox, enum otx2_sso_lf_type type,
43 uint16_t nb_lf, uint8_t attach)
46 struct rsrc_attach_req *req;
48 req = otx2_mbox_alloc_msg_attach_resources(mbox);
60 if (otx2_mbox_process(mbox) < 0)
63 struct rsrc_detach_req *req;
65 req = otx2_mbox_alloc_msg_detach_resources(mbox);
77 if (otx2_mbox_process(mbox) < 0)
85 sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
86 enum otx2_sso_lf_type type, uint16_t nb_lf, uint8_t alloc)
95 struct sso_lf_alloc_req *req_ggrp;
96 req_ggrp = otx2_mbox_alloc_msg_sso_lf_alloc(mbox);
97 req_ggrp->hwgrps = nb_lf;
102 struct ssow_lf_alloc_req *req_hws;
103 req_hws = otx2_mbox_alloc_msg_ssow_lf_alloc(mbox);
104 req_hws->hws = nb_lf;
114 struct sso_lf_free_req *req_ggrp;
115 req_ggrp = otx2_mbox_alloc_msg_sso_lf_free(mbox);
116 req_ggrp->hwgrps = nb_lf;
121 struct ssow_lf_free_req *req_hws;
122 req_hws = otx2_mbox_alloc_msg_ssow_lf_free(mbox);
123 req_hws->hws = nb_lf;
131 rc = otx2_mbox_process_msg_tmo(mbox, (void **)&rsp, ~0);
135 if (alloc && type == SSO_LF_GGRP) {
136 struct sso_lf_alloc_rsp *rsp_ggrp = rsp;
138 dev->xaq_buf_size = rsp_ggrp->xaq_buf_size;
139 dev->xae_waes = rsp_ggrp->xaq_wq_entries;
140 dev->iue = rsp_ggrp->in_unit_entries;
147 otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
149 RTE_SET_USED(event_dev);
150 RTE_SET_USED(queue_id);
154 sso_configure_ports(const struct rte_eventdev *event_dev)
156 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
157 struct otx2_mbox *mbox = dev->mbox;
161 otx2_sso_dbg("Configuring event ports %d", dev->nb_event_ports);
163 nb_lf = dev->nb_event_ports;
164 /* Ask AF to attach required LFs. */
165 rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
167 otx2_err("Failed to attach SSO GWS LF");
171 if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
172 sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
173 otx2_err("Failed to init SSO GWS LF");
181 sso_configure_queues(const struct rte_eventdev *event_dev)
183 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
184 struct otx2_mbox *mbox = dev->mbox;
188 otx2_sso_dbg("Configuring event queues %d", dev->nb_event_queues);
190 nb_lf = dev->nb_event_queues;
191 /* Ask AF to attach required LFs. */
192 rc = sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, true);
194 otx2_err("Failed to attach SSO GGRP LF");
198 if (sso_lf_cfg(dev, mbox, SSO_LF_GGRP, nb_lf, true) < 0) {
199 sso_hw_lf_cfg(mbox, SSO_LF_GGRP, nb_lf, false);
200 otx2_err("Failed to init SSO GGRP LF");
208 sso_xaq_allocate(struct otx2_sso_evdev *dev)
210 const struct rte_memzone *mz;
211 struct npa_aura_s *aura;
212 static int reconfig_cnt;
213 char pool_name[RTE_MEMZONE_NAMESIZE];
218 rte_mempool_free(dev->xaq_pool);
221 * Allocate memory for Add work backpressure.
223 mz = rte_memzone_lookup(OTX2_SSO_FC_NAME);
225 mz = rte_memzone_reserve_aligned(OTX2_SSO_FC_NAME,
227 sizeof(struct npa_aura_s),
229 RTE_MEMZONE_IOVA_CONTIG,
232 otx2_err("Failed to allocate mem for fcmem");
236 dev->fc_iova = mz->iova;
237 dev->fc_mem = mz->addr;
239 aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
240 memset(aura, 0, sizeof(struct npa_aura_s));
243 aura->fc_addr = dev->fc_iova;
244 aura->fc_hyst_bits = 0; /* Store count on all updates */
246 /* Taken from HRM 14.3.3(4) */
247 xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
248 xaq_cnt += (dev->iue / dev->xae_waes) +
249 (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
251 otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
252 /* Setup XAQ based on number of nb queues. */
253 snprintf(pool_name, 30, "otx2_xaq_buf_pool_%d", reconfig_cnt);
254 dev->xaq_pool = (void *)rte_mempool_create_empty(pool_name,
255 xaq_cnt, dev->xaq_buf_size, 0, 0,
258 if (dev->xaq_pool == NULL) {
259 otx2_err("Unable to create empty mempool.");
260 rte_memzone_free(mz);
264 rc = rte_mempool_set_ops_byname(dev->xaq_pool,
265 rte_mbuf_platform_mempool_ops(), aura);
267 otx2_err("Unable to set xaqpool ops.");
271 rc = rte_mempool_populate_default(dev->xaq_pool);
273 otx2_err("Unable to set populate xaqpool.");
277 /* When SW does addwork (enqueue) check if there is space in XAQ by
278 * comparing fc_addr above against the xaq_lmt calculated below.
279 * There should be a minimum headroom (OTX2_SSO_XAQ_SLACK / 2) for SSO
280 * to request XAQ to cache them even before enqueue is called.
282 dev->xaq_lmt = xaq_cnt - (OTX2_SSO_XAQ_SLACK / 2 *
283 dev->nb_event_queues);
284 dev->nb_xaq_cfg = xaq_cnt;
288 rte_mempool_free(dev->xaq_pool);
289 rte_memzone_free(mz);
294 sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
296 struct otx2_mbox *mbox = dev->mbox;
297 struct sso_hw_setconfig *req;
299 otx2_sso_dbg("Configuring XAQ for GGRPs");
300 req = otx2_mbox_alloc_msg_sso_hw_setconfig(mbox);
301 req->npa_pf_func = otx2_npa_pf_func_get();
302 req->npa_aura_id = npa_lf_aura_handle_to_aura(dev->xaq_pool->pool_id);
303 req->hwgrps = dev->nb_event_queues;
305 return otx2_mbox_process(mbox);
309 sso_lf_teardown(struct otx2_sso_evdev *dev,
310 enum otx2_sso_lf_type lf_type)
316 nb_lf = dev->nb_event_queues;
319 nb_lf = dev->nb_event_ports;
325 sso_lf_cfg(dev, dev->mbox, lf_type, nb_lf, false);
326 sso_hw_lf_cfg(dev->mbox, lf_type, nb_lf, false);
330 otx2_sso_configure(const struct rte_eventdev *event_dev)
332 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
333 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
338 deq_tmo_ns = conf->dequeue_timeout_ns;
341 deq_tmo_ns = dev->min_dequeue_timeout_ns;
343 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
344 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
345 otx2_err("Unsupported dequeue timeout requested");
349 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
350 dev->is_timeout_deq = 1;
352 dev->deq_tmo_ns = deq_tmo_ns;
354 if (conf->nb_event_ports > dev->max_event_ports ||
355 conf->nb_event_queues > dev->max_event_queues) {
356 otx2_err("Unsupported event queues/ports requested");
360 if (conf->nb_event_port_dequeue_depth > 1) {
361 otx2_err("Unsupported event port deq depth requested");
365 if (conf->nb_event_port_enqueue_depth > 1) {
366 otx2_err("Unsupported event port enq depth requested");
370 if (dev->nb_event_queues) {
371 /* Finit any previous queues. */
372 sso_lf_teardown(dev, SSO_LF_GGRP);
374 if (dev->nb_event_ports) {
375 /* Finit any previous ports. */
376 sso_lf_teardown(dev, SSO_LF_GWS);
379 dev->nb_event_queues = conf->nb_event_queues;
380 dev->nb_event_ports = conf->nb_event_ports;
382 if (sso_configure_ports(event_dev)) {
383 otx2_err("Failed to configure event ports");
387 if (sso_configure_queues(event_dev) < 0) {
388 otx2_err("Failed to configure event queues");
393 if (sso_xaq_allocate(dev) < 0) {
395 goto teardown_hwggrp;
398 rc = sso_ggrp_alloc_xaq(dev);
400 otx2_err("Failed to alloc xaq to ggrp %d", rc);
401 goto teardown_hwggrp;
409 sso_lf_teardown(dev, SSO_LF_GGRP);
411 sso_lf_teardown(dev, SSO_LF_GWS);
412 dev->nb_event_queues = 0;
413 dev->nb_event_ports = 0;
419 otx2_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
420 struct rte_event_queue_conf *queue_conf)
422 RTE_SET_USED(event_dev);
423 RTE_SET_USED(queue_id);
425 queue_conf->nb_atomic_flows = (1ULL << 20);
426 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
427 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
428 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
432 otx2_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
433 const struct rte_event_queue_conf *queue_conf)
435 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
436 struct otx2_mbox *mbox = dev->mbox;
437 struct sso_grp_priority *req;
440 sso_func_trace("Queue=%d prio=%d", queue_id, queue_conf->priority);
442 req = otx2_mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
445 req->affinity = 0xFF;
446 /* Normalize <0-255> to <0-7> */
447 req->priority = queue_conf->priority / 32;
449 rc = otx2_mbox_process(mbox);
451 otx2_err("Failed to set priority queue=%d", queue_id);
458 /* Initialize and register event driver with DPDK Application */
459 static struct rte_eventdev_ops otx2_sso_ops = {
460 .dev_infos_get = otx2_sso_info_get,
461 .dev_configure = otx2_sso_configure,
462 .queue_def_conf = otx2_sso_queue_def_conf,
463 .queue_setup = otx2_sso_queue_setup,
464 .queue_release = otx2_sso_queue_release,
468 otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
470 return rte_event_pmd_pci_probe(pci_drv, pci_dev,
471 sizeof(struct otx2_sso_evdev),
476 otx2_sso_remove(struct rte_pci_device *pci_dev)
478 return rte_event_pmd_pci_remove(pci_dev, otx2_sso_fini);
481 static const struct rte_pci_id pci_sso_map[] = {
483 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
484 PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF)
491 static struct rte_pci_driver pci_sso = {
492 .id_table = pci_sso_map,
493 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
494 .probe = otx2_sso_probe,
495 .remove = otx2_sso_remove,
499 otx2_sso_init(struct rte_eventdev *event_dev)
501 struct free_rsrcs_rsp *rsrc_cnt;
502 struct rte_pci_device *pci_dev;
503 struct otx2_sso_evdev *dev;
506 event_dev->dev_ops = &otx2_sso_ops;
507 /* For secondary processes, the primary has done all the work */
508 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
511 dev = sso_pmd_priv(event_dev);
513 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
515 /* Initialize the base otx2_dev object */
516 rc = otx2_dev_init(pci_dev, dev);
518 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
522 /* Get SSO and SSOW MSIX rsrc cnt */
523 otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
524 rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
526 otx2_err("Unable to get free rsrc count");
527 goto otx2_dev_uninit;
529 otx2_sso_dbg("SSO %d SSOW %d NPA %d provisioned", rsrc_cnt->sso,
530 rsrc_cnt->ssow, rsrc_cnt->npa);
532 dev->max_event_ports = RTE_MIN(rsrc_cnt->ssow, OTX2_SSO_MAX_VHWS);
533 dev->max_event_queues = RTE_MIN(rsrc_cnt->sso, OTX2_SSO_MAX_VHGRP);
534 /* Grab the NPA LF if required */
535 rc = otx2_npa_lf_init(pci_dev, dev);
537 otx2_err("Unable to init NPA lf. It might not be provisioned");
538 goto otx2_dev_uninit;
541 dev->drv_inited = true;
542 dev->is_timeout_deq = 0;
543 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
544 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
545 dev->max_num_events = -1;
546 dev->nb_event_queues = 0;
547 dev->nb_event_ports = 0;
549 if (!dev->max_event_ports || !dev->max_event_queues) {
550 otx2_err("Not enough eventdev resource queues=%d ports=%d",
551 dev->max_event_queues, dev->max_event_ports);
553 goto otx2_npa_lf_uninit;
556 otx2_sso_pf_func_set(dev->pf_func);
557 otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
558 event_dev->data->name, dev->max_event_queues,
559 dev->max_event_ports);
567 otx2_dev_fini(pci_dev, dev);
573 otx2_sso_fini(struct rte_eventdev *event_dev)
575 struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
576 struct rte_pci_device *pci_dev;
578 /* For secondary processes, nothing to be done */
579 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
582 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
584 if (!dev->drv_inited)
587 dev->drv_inited = false;
591 if (otx2_npa_lf_active(dev)) {
592 otx2_info("Common resource in use by other devices");
596 otx2_dev_fini(pci_dev, dev);
601 RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
602 RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
603 RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");