1 /* SPDX-License-Identifier: BSD-3-Clause
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_dpaa_bus.h>
31 #include <rte_dpaa_logs.h>
32 #include <rte_cycles.h>
34 #include <dpaa_ethdev.h>
35 #include "dpaa_eventdev.h"
36 #include <dpaa_mempool.h>
40 * Evendev = Virtual Instance for SoC
41 * Eventport = Portal Instance
42 * Eventqueue = Channel Instance
43 * 1 Eventdev can have N Eventqueue
47 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
48 uint64_t *timeout_ticks)
50 uint64_t cycles_per_second;
52 EVENTDEV_DRV_FUNC_TRACE();
56 cycles_per_second = rte_get_timer_hz();
57 *timeout_ticks = ns * (cycles_per_second / NS_PER_S);
63 dpaa_eventq_portal_add(u16 ch_id)
67 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
68 qman_static_dequeue_add(sdqcr, NULL);
72 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
76 struct rte_mbuf *mbuf;
79 /*Release all the contexts saved previously*/
80 for (i = 0; i < nb_events; i++) {
82 case RTE_EVENT_OP_RELEASE:
83 qman_dca_index(ev[i].impl_opaque, 0);
84 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
85 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
86 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
87 DPAA_PER_LCORE_DQRR_SIZE--;
98 dpaa_event_enqueue(void *port, const struct rte_event *ev)
100 return dpaa_event_enqueue_burst(port, ev, 1);
104 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
105 uint16_t nb_events, uint64_t timeout_ticks)
111 uint64_t wait_time, cur_ticks, start_ticks;
112 struct dpaa_port *portal = (struct dpaa_port *)port;
113 struct rte_mbuf *mbuf;
115 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
116 /* Affine current thread context to a qman portal */
117 ret = rte_dpaa_portal_init((void *)0);
119 DPAA_EVENTDEV_ERR("Unable to initialize portal");
124 if (unlikely(!portal->is_port_linked)) {
126 * Affine event queue for current thread context
129 for (i = 0; i < portal->num_linked_evq; i++) {
130 ch_id = portal->evq_info[i].ch_id;
131 dpaa_eventq_portal_add(ch_id);
133 portal->is_port_linked = true;
136 /* Check if there are atomic contexts to be released */
138 while (DPAA_PER_LCORE_DQRR_SIZE) {
139 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
140 qman_dca_index(i, 0);
141 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
142 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
143 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
144 DPAA_PER_LCORE_DQRR_SIZE--;
148 DPAA_PER_LCORE_DQRR_HELD = 0;
150 if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
151 wait_time = timeout_ticks;
153 wait_time = portal->timeout;
155 /* Lets dequeue the frames */
156 start_ticks = rte_get_timer_cycles();
157 wait_time += start_ticks;
159 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
162 cur_ticks = rte_get_timer_cycles();
163 } while (cur_ticks < wait_time);
169 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
171 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
175 dpaa_event_dev_info_get(struct rte_eventdev *dev,
176 struct rte_event_dev_info *dev_info)
178 EVENTDEV_DRV_FUNC_TRACE();
181 dev_info->driver_name = "event_dpaa";
182 dev_info->min_dequeue_timeout_ns =
183 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
184 dev_info->max_dequeue_timeout_ns =
185 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
186 dev_info->dequeue_timeout_ns =
187 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
188 dev_info->max_event_queues =
189 DPAA_EVENT_MAX_QUEUES;
190 dev_info->max_event_queue_flows =
191 DPAA_EVENT_MAX_QUEUE_FLOWS;
192 dev_info->max_event_queue_priority_levels =
193 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
194 dev_info->max_event_priority_levels =
195 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
196 dev_info->max_event_ports =
197 DPAA_EVENT_MAX_EVENT_PORT;
198 dev_info->max_event_port_dequeue_depth =
199 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
200 dev_info->max_event_port_enqueue_depth =
201 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
203 * TODO: Need to find out that how to fetch this info
204 * from kernel or somewhere else.
206 dev_info->max_num_events =
207 DPAA_EVENT_MAX_NUM_EVENTS;
208 dev_info->event_dev_cap =
209 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
210 RTE_EVENT_DEV_CAP_BURST_MODE |
211 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
212 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
216 dpaa_event_dev_configure(const struct rte_eventdev *dev)
218 struct dpaa_eventdev *priv = dev->data->dev_private;
219 struct rte_event_dev_config *conf = &dev->data->dev_conf;
223 EVENTDEV_DRV_FUNC_TRACE();
225 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
226 priv->nb_events_limit = conf->nb_events_limit;
227 priv->nb_event_queues = conf->nb_event_queues;
228 priv->nb_event_ports = conf->nb_event_ports;
229 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
230 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
231 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
232 priv->event_dev_cfg = conf->event_dev_cfg;
234 /* Check dequeue timeout method is per dequeue or global */
235 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
237 * Use timeout value as given in dequeue operation.
238 * So invalidating this timetout value.
240 priv->dequeue_timeout_ns = 0;
243 ch_id = rte_malloc("dpaa-channels",
244 sizeof(uint32_t) * priv->nb_event_queues,
245 RTE_CACHE_LINE_SIZE);
247 EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
250 /* Create requested event queues within the given event device */
251 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
253 EVENTDEV_DRV_ERR("Failed to create internal channel\n");
257 for (i = 0; i < priv->nb_event_queues; i++)
258 priv->evq_info[i].ch_id = (u16)ch_id[i];
260 /* Lets prepare event ports */
261 memset(&priv->ports[0], 0,
262 sizeof(struct dpaa_port) * priv->nb_event_ports);
263 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
264 for (i = 0; i < priv->nb_event_ports; i++) {
265 priv->ports[i].timeout =
266 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
268 } else if (priv->dequeue_timeout_ns == 0) {
269 for (i = 0; i < priv->nb_event_ports; i++) {
270 dpaa_event_dequeue_timeout_ticks(NULL,
271 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
272 &priv->ports[i].timeout);
275 for (i = 0; i < priv->nb_event_ports; i++) {
276 dpaa_event_dequeue_timeout_ticks(NULL,
277 priv->dequeue_timeout_ns,
278 &priv->ports[i].timeout);
282 * TODO: Currently portals are affined with threads. Maximum threads
283 * can be created equals to number of lcore.
286 EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
292 dpaa_event_dev_start(struct rte_eventdev *dev)
294 EVENTDEV_DRV_FUNC_TRACE();
301 dpaa_event_dev_stop(struct rte_eventdev *dev)
303 EVENTDEV_DRV_FUNC_TRACE();
308 dpaa_event_dev_close(struct rte_eventdev *dev)
310 EVENTDEV_DRV_FUNC_TRACE();
317 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
318 struct rte_event_queue_conf *queue_conf)
320 EVENTDEV_DRV_FUNC_TRACE();
323 RTE_SET_USED(queue_id);
325 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
326 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
327 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
331 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
332 const struct rte_event_queue_conf *queue_conf)
334 struct dpaa_eventdev *priv = dev->data->dev_private;
335 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
337 EVENTDEV_DRV_FUNC_TRACE();
339 switch (queue_conf->schedule_type) {
340 case RTE_SCHED_TYPE_PARALLEL:
341 case RTE_SCHED_TYPE_ATOMIC:
343 case RTE_SCHED_TYPE_ORDERED:
344 EVENTDEV_DRV_ERR("Schedule type is not supported.");
347 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
348 evq_info->event_queue_id = queue_id;
354 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
356 EVENTDEV_DRV_FUNC_TRACE();
359 RTE_SET_USED(queue_id);
363 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
364 struct rte_event_port_conf *port_conf)
366 EVENTDEV_DRV_FUNC_TRACE();
369 RTE_SET_USED(port_id);
371 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
372 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
373 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
377 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
378 const struct rte_event_port_conf *port_conf)
380 struct dpaa_eventdev *eventdev = dev->data->dev_private;
382 EVENTDEV_DRV_FUNC_TRACE();
384 RTE_SET_USED(port_conf);
385 dev->data->ports[port_id] = &eventdev->ports[port_id];
391 dpaa_event_port_release(void *port)
393 EVENTDEV_DRV_FUNC_TRACE();
399 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
400 const uint8_t queues[], const uint8_t priorities[],
403 struct dpaa_eventdev *priv = dev->data->dev_private;
404 struct dpaa_port *event_port = (struct dpaa_port *)port;
405 struct dpaa_eventq *event_queue;
410 RTE_SET_USED(priorities);
412 /* First check that input configuration are valid */
413 for (i = 0; i < nb_links; i++) {
414 eventq_id = queues[i];
415 event_queue = &priv->evq_info[eventq_id];
416 if ((event_queue->event_queue_cfg
417 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
418 && (event_queue->event_port)) {
423 for (i = 0; i < nb_links; i++) {
424 eventq_id = queues[i];
425 event_queue = &priv->evq_info[eventq_id];
426 event_port->evq_info[i].event_queue_id = eventq_id;
427 event_port->evq_info[i].ch_id = event_queue->ch_id;
428 event_queue->event_port = port;
431 event_port->num_linked_evq = event_port->num_linked_evq + i;
437 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
438 uint8_t queues[], uint16_t nb_links)
442 struct dpaa_eventq *event_queue;
443 struct dpaa_eventdev *priv = dev->data->dev_private;
444 struct dpaa_port *event_port = (struct dpaa_port *)port;
446 if (!event_port->num_linked_evq)
449 for (i = 0; i < nb_links; i++) {
450 eventq_id = queues[i];
451 event_port->evq_info[eventq_id].event_queue_id = -1;
452 event_port->evq_info[eventq_id].ch_id = 0;
453 event_queue = &priv->evq_info[eventq_id];
454 event_queue->event_port = NULL;
457 event_port->num_linked_evq = event_port->num_linked_evq - i;
463 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
464 const struct rte_eth_dev *eth_dev,
467 const char *ethdev_driver = eth_dev->device->driver->name;
469 EVENTDEV_DRV_FUNC_TRACE();
473 if (!strcmp(ethdev_driver, "net_dpaa"))
474 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
476 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
482 dpaa_event_eth_rx_adapter_queue_add(
483 const struct rte_eventdev *dev,
484 const struct rte_eth_dev *eth_dev,
486 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
488 struct dpaa_eventdev *eventdev = dev->data->dev_private;
489 uint8_t ev_qid = queue_conf->ev.queue_id;
490 u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
491 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
494 EVENTDEV_DRV_FUNC_TRACE();
496 if (rx_queue_id == -1) {
497 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
498 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
502 "Event Queue attach failed:%d\n", ret);
503 goto detach_configured_queues;
509 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
511 EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
514 detach_configured_queues:
516 for (i = (i - 1); i >= 0 ; i--)
517 dpaa_eth_eventq_detach(eth_dev, i);
523 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
524 const struct rte_eth_dev *eth_dev,
528 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
530 EVENTDEV_DRV_FUNC_TRACE();
533 if (rx_queue_id == -1) {
534 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
535 ret = dpaa_eth_eventq_detach(eth_dev, i);
538 "Event Queue detach failed:%d\n", ret);
544 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
546 EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
551 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
552 const struct rte_eth_dev *eth_dev)
554 EVENTDEV_DRV_FUNC_TRACE();
557 RTE_SET_USED(eth_dev);
563 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
564 const struct rte_eth_dev *eth_dev)
566 EVENTDEV_DRV_FUNC_TRACE();
569 RTE_SET_USED(eth_dev);
574 static struct rte_eventdev_ops dpaa_eventdev_ops = {
575 .dev_infos_get = dpaa_event_dev_info_get,
576 .dev_configure = dpaa_event_dev_configure,
577 .dev_start = dpaa_event_dev_start,
578 .dev_stop = dpaa_event_dev_stop,
579 .dev_close = dpaa_event_dev_close,
580 .queue_def_conf = dpaa_event_queue_def_conf,
581 .queue_setup = dpaa_event_queue_setup,
582 .queue_release = dpaa_event_queue_release,
583 .port_def_conf = dpaa_event_port_default_conf_get,
584 .port_setup = dpaa_event_port_setup,
585 .port_release = dpaa_event_port_release,
586 .port_link = dpaa_event_port_link,
587 .port_unlink = dpaa_event_port_unlink,
588 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
589 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
590 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
591 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
592 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
593 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
597 dpaa_event_dev_create(const char *name)
599 struct rte_eventdev *eventdev;
600 struct dpaa_eventdev *priv;
602 eventdev = rte_event_pmd_vdev_init(name,
603 sizeof(struct dpaa_eventdev),
605 if (eventdev == NULL) {
606 EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
610 eventdev->dev_ops = &dpaa_eventdev_ops;
611 eventdev->enqueue = dpaa_event_enqueue;
612 eventdev->enqueue_burst = dpaa_event_enqueue_burst;
613 eventdev->dequeue = dpaa_event_dequeue;
614 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
616 /* For secondary processes, the primary has done all the work */
617 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
620 priv = eventdev->data->dev_private;
621 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
629 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
633 name = rte_vdev_device_name(vdev);
634 EVENTDEV_DRV_LOG("Initializing %s", name);
636 return dpaa_event_dev_create(name);
640 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
644 name = rte_vdev_device_name(vdev);
645 EVENTDEV_DRV_LOG("Closing %s", name);
647 return rte_event_pmd_vdev_uninit(name);
650 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
651 .probe = dpaa_event_dev_probe,
652 .remove = dpaa_event_dev_remove
655 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);