1 /* SPDX-License-Identifier: BSD-3-Clause
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_dpaa_bus.h>
31 #include <rte_dpaa_logs.h>
32 #include <rte_cycles_64.h>
34 #include <dpaa_ethdev.h>
35 #include "dpaa_eventdev.h"
36 #include <dpaa_mempool.h>
40 * Evendev = Virtual Instance for SoC
41 * Eventport = Portal Instance
42 * Eventqueue = Channel Instance
43 * 1 Eventdev can have N Eventqueue
47 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
48 uint64_t *timeout_ticks)
50 uint64_t cycles_per_second;
52 EVENTDEV_DRV_FUNC_TRACE();
56 cycles_per_second = rte_get_timer_hz();
57 *timeout_ticks = ns * (cycles_per_second / NS_PER_S);
63 dpaa_event_dev_info_get(struct rte_eventdev *dev,
64 struct rte_event_dev_info *dev_info)
66 EVENTDEV_DRV_FUNC_TRACE();
69 dev_info->driver_name = "event_dpaa";
70 dev_info->min_dequeue_timeout_ns =
71 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
72 dev_info->max_dequeue_timeout_ns =
73 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
74 dev_info->dequeue_timeout_ns =
75 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
76 dev_info->max_event_queues =
77 DPAA_EVENT_MAX_QUEUES;
78 dev_info->max_event_queue_flows =
79 DPAA_EVENT_MAX_QUEUE_FLOWS;
80 dev_info->max_event_queue_priority_levels =
81 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
82 dev_info->max_event_priority_levels =
83 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
84 dev_info->max_event_ports =
85 DPAA_EVENT_MAX_EVENT_PORT;
86 dev_info->max_event_port_dequeue_depth =
87 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
88 dev_info->max_event_port_enqueue_depth =
89 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
91 * TODO: Need to find out that how to fetch this info
92 * from kernel or somewhere else.
94 dev_info->max_num_events =
95 DPAA_EVENT_MAX_NUM_EVENTS;
96 dev_info->event_dev_cap =
97 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
98 RTE_EVENT_DEV_CAP_BURST_MODE |
99 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
100 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
104 dpaa_event_dev_configure(const struct rte_eventdev *dev)
106 struct dpaa_eventdev *priv = dev->data->dev_private;
107 struct rte_event_dev_config *conf = &dev->data->dev_conf;
111 EVENTDEV_DRV_FUNC_TRACE();
113 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
114 priv->nb_events_limit = conf->nb_events_limit;
115 priv->nb_event_queues = conf->nb_event_queues;
116 priv->nb_event_ports = conf->nb_event_ports;
117 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
118 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
119 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
120 priv->event_dev_cfg = conf->event_dev_cfg;
122 /* Check dequeue timeout method is per dequeue or global */
123 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
125 * Use timeout value as given in dequeue operation.
126 * So invalidating this timetout value.
128 priv->dequeue_timeout_ns = 0;
131 ch_id = rte_malloc("dpaa-channels",
132 sizeof(uint32_t) * priv->nb_event_queues,
133 RTE_CACHE_LINE_SIZE);
135 EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
138 /* Create requested event queues within the given event device */
139 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
141 EVENTDEV_DRV_ERR("Failed to create internal channel\n");
145 for (i = 0; i < priv->nb_event_queues; i++)
146 priv->evq_info[i].ch_id = (u16)ch_id[i];
148 /* Lets prepare event ports */
149 memset(&priv->ports[0], 0,
150 sizeof(struct dpaa_port) * priv->nb_event_ports);
151 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
152 for (i = 0; i < priv->nb_event_ports; i++) {
153 priv->ports[i].timeout =
154 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
156 } else if (priv->dequeue_timeout_ns == 0) {
157 for (i = 0; i < priv->nb_event_ports; i++) {
158 dpaa_event_dequeue_timeout_ticks(NULL,
159 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
160 &priv->ports[i].timeout);
163 for (i = 0; i < priv->nb_event_ports; i++) {
164 dpaa_event_dequeue_timeout_ticks(NULL,
165 priv->dequeue_timeout_ns,
166 &priv->ports[i].timeout);
170 * TODO: Currently portals are affined with threads. Maximum threads
171 * can be created equals to number of lcore.
174 EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
180 dpaa_event_dev_start(struct rte_eventdev *dev)
182 EVENTDEV_DRV_FUNC_TRACE();
189 dpaa_event_dev_stop(struct rte_eventdev *dev)
191 EVENTDEV_DRV_FUNC_TRACE();
196 dpaa_event_dev_close(struct rte_eventdev *dev)
198 EVENTDEV_DRV_FUNC_TRACE();
205 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
206 struct rte_event_queue_conf *queue_conf)
208 EVENTDEV_DRV_FUNC_TRACE();
211 RTE_SET_USED(queue_id);
213 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
214 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
215 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
219 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
220 const struct rte_event_queue_conf *queue_conf)
222 struct dpaa_eventdev *priv = dev->data->dev_private;
223 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
225 EVENTDEV_DRV_FUNC_TRACE();
227 switch (queue_conf->schedule_type) {
228 case RTE_SCHED_TYPE_PARALLEL:
229 case RTE_SCHED_TYPE_ATOMIC:
231 case RTE_SCHED_TYPE_ORDERED:
232 EVENTDEV_DRV_ERR("Schedule type is not supported.");
235 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
236 evq_info->event_queue_id = queue_id;
242 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
244 EVENTDEV_DRV_FUNC_TRACE();
247 RTE_SET_USED(queue_id);
251 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
252 struct rte_event_port_conf *port_conf)
254 EVENTDEV_DRV_FUNC_TRACE();
257 RTE_SET_USED(port_id);
259 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
260 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
261 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
265 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
266 const struct rte_event_port_conf *port_conf)
268 struct dpaa_eventdev *eventdev = dev->data->dev_private;
270 EVENTDEV_DRV_FUNC_TRACE();
272 RTE_SET_USED(port_conf);
273 dev->data->ports[port_id] = &eventdev->ports[port_id];
279 dpaa_event_port_release(void *port)
281 EVENTDEV_DRV_FUNC_TRACE();
287 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
288 const uint8_t queues[], const uint8_t priorities[],
291 struct dpaa_eventdev *priv = dev->data->dev_private;
292 struct dpaa_port *event_port = (struct dpaa_port *)port;
293 struct dpaa_eventq *event_queue;
298 RTE_SET_USED(priorities);
300 /* First check that input configuration are valid */
301 for (i = 0; i < nb_links; i++) {
302 eventq_id = queues[i];
303 event_queue = &priv->evq_info[eventq_id];
304 if ((event_queue->event_queue_cfg
305 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
306 && (event_queue->event_port)) {
311 for (i = 0; i < nb_links; i++) {
312 eventq_id = queues[i];
313 event_queue = &priv->evq_info[eventq_id];
314 event_port->evq_info[i].event_queue_id = eventq_id;
315 event_port->evq_info[i].ch_id = event_queue->ch_id;
316 event_queue->event_port = port;
319 event_port->num_linked_evq = event_port->num_linked_evq + i;
325 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
326 uint8_t queues[], uint16_t nb_links)
330 struct dpaa_eventq *event_queue;
331 struct dpaa_eventdev *priv = dev->data->dev_private;
332 struct dpaa_port *event_port = (struct dpaa_port *)port;
334 if (!event_port->num_linked_evq)
337 for (i = 0; i < nb_links; i++) {
338 eventq_id = queues[i];
339 event_port->evq_info[eventq_id].event_queue_id = -1;
340 event_port->evq_info[eventq_id].ch_id = 0;
341 event_queue = &priv->evq_info[eventq_id];
342 event_queue->event_port = NULL;
345 event_port->num_linked_evq = event_port->num_linked_evq - i;
350 static const struct rte_eventdev_ops dpaa_eventdev_ops = {
351 .dev_infos_get = dpaa_event_dev_info_get,
352 .dev_configure = dpaa_event_dev_configure,
353 .dev_start = dpaa_event_dev_start,
354 .dev_stop = dpaa_event_dev_stop,
355 .dev_close = dpaa_event_dev_close,
356 .queue_def_conf = dpaa_event_queue_def_conf,
357 .queue_setup = dpaa_event_queue_setup,
358 .queue_release = dpaa_event_queue_release,
359 .port_def_conf = dpaa_event_port_default_conf_get,
360 .port_setup = dpaa_event_port_setup,
361 .port_release = dpaa_event_port_release,
362 .port_link = dpaa_event_port_link,
363 .port_unlink = dpaa_event_port_unlink,
364 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
368 dpaa_event_dev_create(const char *name)
370 struct rte_eventdev *eventdev;
371 struct dpaa_eventdev *priv;
373 eventdev = rte_event_pmd_vdev_init(name,
374 sizeof(struct dpaa_eventdev),
376 if (eventdev == NULL) {
377 EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
381 eventdev->dev_ops = &dpaa_eventdev_ops;
383 /* For secondary processes, the primary has done all the work */
384 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
387 priv = eventdev->data->dev_private;
388 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
396 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
400 name = rte_vdev_device_name(vdev);
401 EVENTDEV_DRV_LOG("Initializing %s", name);
403 return dpaa_event_dev_create(name);
407 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
411 name = rte_vdev_device_name(vdev);
412 EVENTDEV_DRV_LOG("Closing %s", name);
414 return rte_event_pmd_vdev_uninit(name);
417 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
418 .probe = dpaa_event_dev_probe,
419 .remove = dpaa_event_dev_remove
422 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);