1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_eventdev.h"
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9 struct rte_event_dev_info *dev_info)
12 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14 dev_info->max_event_queues = dev->max_event_queues;
15 dev_info->max_event_queue_flows = (1ULL << 20);
16 dev_info->max_event_queue_priority_levels = 8;
17 dev_info->max_event_priority_levels = 1;
18 dev_info->max_event_ports = dev->max_event_ports;
19 dev_info->max_event_port_dequeue_depth = 1;
20 dev_info->max_event_port_enqueue_depth = 1;
21 dev_info->max_num_events = dev->max_num_events;
22 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
32 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
34 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
35 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
38 deq_tmo_ns = conf->dequeue_timeout_ns;
41 deq_tmo_ns = dev->min_dequeue_timeout_ns;
42 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
43 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
44 plt_err("Unsupported dequeue timeout requested");
48 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
49 dev->is_timeout_deq = 1;
51 dev->deq_tmo_ns = deq_tmo_ns;
53 if (!conf->nb_event_queues || !conf->nb_event_ports ||
54 conf->nb_event_ports > dev->max_event_ports ||
55 conf->nb_event_queues > dev->max_event_queues) {
56 plt_err("Unsupported event queues/ports requested");
60 if (conf->nb_event_port_dequeue_depth > 1) {
61 plt_err("Unsupported event port deq depth requested");
65 if (conf->nb_event_port_enqueue_depth > 1) {
66 plt_err("Unsupported event port enq depth requested");
70 dev->nb_event_queues = conf->nb_event_queues;
71 dev->nb_event_ports = conf->nb_event_ports;
77 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
78 struct rte_event_queue_conf *queue_conf)
80 RTE_SET_USED(event_dev);
81 RTE_SET_USED(queue_id);
83 queue_conf->nb_atomic_flows = (1ULL << 20);
84 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
85 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
86 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
90 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
91 struct rte_event_port_conf *port_conf)
93 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
95 RTE_SET_USED(port_id);
96 port_conf->new_event_threshold = dev->max_num_events;
97 port_conf->dequeue_depth = 1;
98 port_conf->enqueue_depth = 1;
102 cnxk_sso_init(struct rte_eventdev *event_dev)
104 const struct rte_memzone *mz = NULL;
105 struct rte_pci_device *pci_dev;
106 struct cnxk_sso_evdev *dev;
109 mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
112 plt_err("Failed to create eventdev memzone");
116 dev = cnxk_sso_pmd_priv(event_dev);
117 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
118 dev->sso.pci_dev = pci_dev;
120 *(uint64_t *)mz->addr = (uint64_t)dev;
122 /* Initialize the base cnxk_dev object */
123 rc = roc_sso_dev_init(&dev->sso);
125 plt_err("Failed to initialize RoC SSO rc=%d", rc);
129 dev->is_timeout_deq = 0;
130 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
131 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
132 dev->max_num_events = -1;
133 dev->nb_event_queues = 0;
134 dev->nb_event_ports = 0;
139 rte_memzone_free(mz);
144 cnxk_sso_fini(struct rte_eventdev *event_dev)
146 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
148 /* For secondary processes, nothing to be done */
149 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
152 roc_sso_rsrc_fini(&dev->sso);
153 roc_sso_dev_fini(&dev->sso);
159 cnxk_sso_remove(struct rte_pci_device *pci_dev)
161 return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);