1 /* SPDX-License-Identifier: BSD-3-Clause
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_dpaa_bus.h>
31 #include <rte_dpaa_logs.h>
32 #include <rte_cycles.h>
33 #include <rte_kvargs.h>
35 #include <dpaa_ethdev.h>
36 #include "dpaa_eventdev.h"
37 #include <dpaa_mempool.h>
41 * Evendev = Virtual Instance for SoC
42 * Eventport = Portal Instance
43 * Eventqueue = Channel Instance
44 * 1 Eventdev can have N Eventqueue
47 #define DISABLE_INTR_MODE "disable_intr"
50 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
51 uint64_t *timeout_ticks)
53 EVENTDEV_INIT_FUNC_TRACE();
57 uint64_t cycles_per_second;
59 cycles_per_second = rte_get_timer_hz();
60 *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
66 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
67 uint64_t *timeout_ticks)
71 *timeout_ticks = ns/1000;
76 dpaa_eventq_portal_add(u16 ch_id)
80 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
81 qman_static_dequeue_add(sdqcr, NULL);
85 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
89 struct rte_mbuf *mbuf;
92 /*Release all the contexts saved previously*/
93 for (i = 0; i < nb_events; i++) {
95 case RTE_EVENT_OP_RELEASE:
96 qman_dca_index(ev[i].impl_opaque, 0);
97 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
98 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
99 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
100 DPAA_PER_LCORE_DQRR_SIZE--;
111 dpaa_event_enqueue(void *port, const struct rte_event *ev)
113 return dpaa_event_enqueue_burst(port, ev, 1);
116 static void drain_4_bytes(int fd, fd_set *fdset)
118 if (FD_ISSET(fd, fdset)) {
121 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
122 if (sjunk != sizeof(junk))
123 DPAA_EVENTDEV_ERR("UIO irq read error");
128 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
134 /* Go into (and back out of) IRQ mode for each select,
135 * it simplifies exit-path considerations and other
136 * potential nastiness.
138 struct timeval tv = {
139 .tv_sec = timeout_ticks / 1000000,
140 .tv_usec = timeout_ticks % 1000000
143 fd_qman = qman_thread_fd();
146 FD_SET(fd_qman, &readset);
148 qman_irqsource_add(QM_PIRQ_DQRI);
150 ret = select(nfds, &readset, NULL, NULL, &tv);
153 /* Calling irqsource_remove() prior to thread_irq()
154 * means thread_irq() will not process whatever caused
155 * the interrupts, however it does ensure that, once
156 * thread_irq() re-enables interrupts, they won't fire
159 qman_irqsource_remove(~0);
160 drain_4_bytes(fd_qman, &readset);
167 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
168 uint16_t nb_events, uint64_t timeout_ticks)
173 u32 num_frames, i, irq = 0;
174 uint64_t cur_ticks = 0, wait_time_ticks = 0;
175 struct dpaa_port *portal = (struct dpaa_port *)port;
176 struct rte_mbuf *mbuf;
178 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
179 /* Affine current thread context to a qman portal */
180 ret = rte_dpaa_portal_init((void *)0);
182 DPAA_EVENTDEV_ERR("Unable to initialize portal");
187 if (unlikely(!portal->is_port_linked)) {
189 * Affine event queue for current thread context
192 for (i = 0; i < portal->num_linked_evq; i++) {
193 ch_id = portal->evq_info[i].ch_id;
194 dpaa_eventq_portal_add(ch_id);
196 portal->is_port_linked = true;
199 /* Check if there are atomic contexts to be released */
201 while (DPAA_PER_LCORE_DQRR_SIZE) {
202 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
203 qman_dca_index(i, 0);
204 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
205 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
206 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
207 DPAA_PER_LCORE_DQRR_SIZE--;
211 DPAA_PER_LCORE_DQRR_HELD = 0;
214 wait_time_ticks = timeout_ticks;
216 wait_time_ticks = portal->timeout_us;
218 wait_time_ticks += rte_get_timer_cycles();
220 /* Lets dequeue the frames */
221 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
226 cur_ticks = rte_get_timer_cycles();
227 } while (cur_ticks < wait_time_ticks);
233 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
235 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
239 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
240 uint16_t nb_events, uint64_t timeout_ticks)
245 u32 num_frames, i, irq = 0;
246 uint64_t cur_ticks = 0, wait_time_ticks = 0;
247 struct dpaa_port *portal = (struct dpaa_port *)port;
248 struct rte_mbuf *mbuf;
250 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
251 /* Affine current thread context to a qman portal */
252 ret = rte_dpaa_portal_init((void *)0);
254 DPAA_EVENTDEV_ERR("Unable to initialize portal");
259 if (unlikely(!portal->is_port_linked)) {
261 * Affine event queue for current thread context
264 for (i = 0; i < portal->num_linked_evq; i++) {
265 ch_id = portal->evq_info[i].ch_id;
266 dpaa_eventq_portal_add(ch_id);
268 portal->is_port_linked = true;
271 /* Check if there are atomic contexts to be released */
273 while (DPAA_PER_LCORE_DQRR_SIZE) {
274 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
275 qman_dca_index(i, 0);
276 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
277 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
278 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
279 DPAA_PER_LCORE_DQRR_SIZE--;
283 DPAA_PER_LCORE_DQRR_HELD = 0;
286 wait_time_ticks = timeout_ticks;
288 wait_time_ticks = portal->timeout_us;
291 /* Lets dequeue the frames */
292 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
297 if (wait_time_ticks) { /* wait for time */
298 if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
302 break; /* no event after waiting */
304 cur_ticks = rte_get_timer_cycles();
305 } while (cur_ticks < wait_time_ticks);
311 dpaa_event_dequeue_intr(void *port,
312 struct rte_event *ev,
313 uint64_t timeout_ticks)
315 return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
319 dpaa_event_dev_info_get(struct rte_eventdev *dev,
320 struct rte_event_dev_info *dev_info)
322 EVENTDEV_INIT_FUNC_TRACE();
325 dev_info->driver_name = "event_dpaa";
326 dev_info->min_dequeue_timeout_ns =
327 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
328 dev_info->max_dequeue_timeout_ns =
329 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
330 dev_info->dequeue_timeout_ns =
331 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
332 dev_info->max_event_queues =
333 DPAA_EVENT_MAX_QUEUES;
334 dev_info->max_event_queue_flows =
335 DPAA_EVENT_MAX_QUEUE_FLOWS;
336 dev_info->max_event_queue_priority_levels =
337 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
338 dev_info->max_event_priority_levels =
339 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
340 dev_info->max_event_ports =
341 DPAA_EVENT_MAX_EVENT_PORT;
342 dev_info->max_event_port_dequeue_depth =
343 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
344 dev_info->max_event_port_enqueue_depth =
345 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
347 * TODO: Need to find out that how to fetch this info
348 * from kernel or somewhere else.
350 dev_info->max_num_events =
351 DPAA_EVENT_MAX_NUM_EVENTS;
352 dev_info->event_dev_cap =
353 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
354 RTE_EVENT_DEV_CAP_BURST_MODE |
355 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
356 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
360 dpaa_event_dev_configure(const struct rte_eventdev *dev)
362 struct dpaa_eventdev *priv = dev->data->dev_private;
363 struct rte_event_dev_config *conf = &dev->data->dev_conf;
367 EVENTDEV_INIT_FUNC_TRACE();
368 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
369 priv->nb_events_limit = conf->nb_events_limit;
370 priv->nb_event_queues = conf->nb_event_queues;
371 priv->nb_event_ports = conf->nb_event_ports;
372 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
373 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
374 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
375 priv->event_dev_cfg = conf->event_dev_cfg;
377 ch_id = rte_malloc("dpaa-channels",
378 sizeof(uint32_t) * priv->nb_event_queues,
379 RTE_CACHE_LINE_SIZE);
381 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
384 /* Create requested event queues within the given event device */
385 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
387 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
388 priv->nb_event_queues, ret);
392 for (i = 0; i < priv->nb_event_queues; i++)
393 priv->evq_info[i].ch_id = (u16)ch_id[i];
395 /* Lets prepare event ports */
396 memset(&priv->ports[0], 0,
397 sizeof(struct dpaa_port) * priv->nb_event_ports);
399 /* Check dequeue timeout method is per dequeue or global */
400 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
402 * Use timeout value as given in dequeue operation.
403 * So invalidating this timeout value.
405 priv->dequeue_timeout_ns = 0;
407 } else if (conf->dequeue_timeout_ns == 0) {
408 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
410 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
413 for (i = 0; i < priv->nb_event_ports; i++) {
414 if (priv->intr_mode) {
415 priv->ports[i].timeout_us =
416 priv->dequeue_timeout_ns/1000;
418 uint64_t cycles_per_second;
420 cycles_per_second = rte_get_timer_hz();
421 priv->ports[i].timeout_us =
422 (priv->dequeue_timeout_ns * cycles_per_second)
428 * TODO: Currently portals are affined with threads. Maximum threads
429 * can be created equals to number of lcore.
432 DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
438 dpaa_event_dev_start(struct rte_eventdev *dev)
440 EVENTDEV_INIT_FUNC_TRACE();
447 dpaa_event_dev_stop(struct rte_eventdev *dev)
449 EVENTDEV_INIT_FUNC_TRACE();
454 dpaa_event_dev_close(struct rte_eventdev *dev)
456 EVENTDEV_INIT_FUNC_TRACE();
463 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
464 struct rte_event_queue_conf *queue_conf)
466 EVENTDEV_INIT_FUNC_TRACE();
469 RTE_SET_USED(queue_id);
471 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
472 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
473 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
477 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
478 const struct rte_event_queue_conf *queue_conf)
480 struct dpaa_eventdev *priv = dev->data->dev_private;
481 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
483 EVENTDEV_INIT_FUNC_TRACE();
485 switch (queue_conf->schedule_type) {
486 case RTE_SCHED_TYPE_PARALLEL:
487 case RTE_SCHED_TYPE_ATOMIC:
489 case RTE_SCHED_TYPE_ORDERED:
490 DPAA_EVENTDEV_ERR("Schedule type is not supported.");
493 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
494 evq_info->event_queue_id = queue_id;
500 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
502 EVENTDEV_INIT_FUNC_TRACE();
505 RTE_SET_USED(queue_id);
509 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
510 struct rte_event_port_conf *port_conf)
512 EVENTDEV_INIT_FUNC_TRACE();
515 RTE_SET_USED(port_id);
517 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
518 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
519 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
523 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
524 const struct rte_event_port_conf *port_conf)
526 struct dpaa_eventdev *eventdev = dev->data->dev_private;
528 EVENTDEV_INIT_FUNC_TRACE();
530 RTE_SET_USED(port_conf);
531 dev->data->ports[port_id] = &eventdev->ports[port_id];
537 dpaa_event_port_release(void *port)
539 EVENTDEV_INIT_FUNC_TRACE();
545 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
546 const uint8_t queues[], const uint8_t priorities[],
549 struct dpaa_eventdev *priv = dev->data->dev_private;
550 struct dpaa_port *event_port = (struct dpaa_port *)port;
551 struct dpaa_eventq *event_queue;
556 RTE_SET_USED(priorities);
558 /* First check that input configuration are valid */
559 for (i = 0; i < nb_links; i++) {
560 eventq_id = queues[i];
561 event_queue = &priv->evq_info[eventq_id];
562 if ((event_queue->event_queue_cfg
563 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
564 && (event_queue->event_port)) {
569 for (i = 0; i < nb_links; i++) {
570 eventq_id = queues[i];
571 event_queue = &priv->evq_info[eventq_id];
572 event_port->evq_info[i].event_queue_id = eventq_id;
573 event_port->evq_info[i].ch_id = event_queue->ch_id;
574 event_queue->event_port = port;
577 event_port->num_linked_evq = event_port->num_linked_evq + i;
583 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
584 uint8_t queues[], uint16_t nb_links)
588 struct dpaa_eventq *event_queue;
589 struct dpaa_eventdev *priv = dev->data->dev_private;
590 struct dpaa_port *event_port = (struct dpaa_port *)port;
592 if (!event_port->num_linked_evq)
595 for (i = 0; i < nb_links; i++) {
596 eventq_id = queues[i];
597 event_port->evq_info[eventq_id].event_queue_id = -1;
598 event_port->evq_info[eventq_id].ch_id = 0;
599 event_queue = &priv->evq_info[eventq_id];
600 event_queue->event_port = NULL;
603 if (event_port->num_linked_evq)
604 event_port->num_linked_evq = event_port->num_linked_evq - i;
610 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
611 const struct rte_eth_dev *eth_dev,
614 const char *ethdev_driver = eth_dev->device->driver->name;
616 EVENTDEV_INIT_FUNC_TRACE();
620 if (!strcmp(ethdev_driver, "net_dpaa"))
621 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
623 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
629 dpaa_event_eth_rx_adapter_queue_add(
630 const struct rte_eventdev *dev,
631 const struct rte_eth_dev *eth_dev,
633 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
635 struct dpaa_eventdev *eventdev = dev->data->dev_private;
636 uint8_t ev_qid = queue_conf->ev.queue_id;
637 u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
638 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
641 EVENTDEV_INIT_FUNC_TRACE();
643 if (rx_queue_id == -1) {
644 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
645 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
649 "Event Queue attach failed:%d\n", ret);
650 goto detach_configured_queues;
656 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
658 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
661 detach_configured_queues:
663 for (i = (i - 1); i >= 0 ; i--)
664 dpaa_eth_eventq_detach(eth_dev, i);
670 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
671 const struct rte_eth_dev *eth_dev,
675 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
677 EVENTDEV_INIT_FUNC_TRACE();
680 if (rx_queue_id == -1) {
681 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
682 ret = dpaa_eth_eventq_detach(eth_dev, i);
685 "Event Queue detach failed:%d\n", ret);
691 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
693 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
698 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
699 const struct rte_eth_dev *eth_dev)
701 EVENTDEV_INIT_FUNC_TRACE();
704 RTE_SET_USED(eth_dev);
710 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
711 const struct rte_eth_dev *eth_dev)
713 EVENTDEV_INIT_FUNC_TRACE();
716 RTE_SET_USED(eth_dev);
721 static struct rte_eventdev_ops dpaa_eventdev_ops = {
722 .dev_infos_get = dpaa_event_dev_info_get,
723 .dev_configure = dpaa_event_dev_configure,
724 .dev_start = dpaa_event_dev_start,
725 .dev_stop = dpaa_event_dev_stop,
726 .dev_close = dpaa_event_dev_close,
727 .queue_def_conf = dpaa_event_queue_def_conf,
728 .queue_setup = dpaa_event_queue_setup,
729 .queue_release = dpaa_event_queue_release,
730 .port_def_conf = dpaa_event_port_default_conf_get,
731 .port_setup = dpaa_event_port_setup,
732 .port_release = dpaa_event_port_release,
733 .port_link = dpaa_event_port_link,
734 .port_unlink = dpaa_event_port_unlink,
735 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
736 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
737 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
738 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
739 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
740 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
743 static int flag_check_handler(__rte_unused const char *key,
744 const char *value, __rte_unused void *opaque)
746 if (strcmp(value, "1"))
753 dpaa_event_check_flags(const char *params)
755 struct rte_kvargs *kvlist;
757 if (params == NULL || params[0] == '\0')
760 kvlist = rte_kvargs_parse(params, NULL);
764 if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
765 rte_kvargs_free(kvlist);
768 /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
769 if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
770 flag_check_handler, NULL) < 0) {
771 rte_kvargs_free(kvlist);
774 rte_kvargs_free(kvlist);
780 dpaa_event_dev_create(const char *name, const char *params)
782 struct rte_eventdev *eventdev;
783 struct dpaa_eventdev *priv;
785 eventdev = rte_event_pmd_vdev_init(name,
786 sizeof(struct dpaa_eventdev),
788 if (eventdev == NULL) {
789 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
792 priv = eventdev->data->dev_private;
794 eventdev->dev_ops = &dpaa_eventdev_ops;
795 eventdev->enqueue = dpaa_event_enqueue;
796 eventdev->enqueue_burst = dpaa_event_enqueue_burst;
798 if (dpaa_event_check_flags(params)) {
799 eventdev->dequeue = dpaa_event_dequeue;
800 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
803 eventdev->dev_ops->timeout_ticks =
804 dpaa_event_dequeue_timeout_ticks_intr;
805 eventdev->dequeue = dpaa_event_dequeue_intr;
806 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
809 /* For secondary processes, the primary has done all the work */
810 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
813 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
821 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
826 name = rte_vdev_device_name(vdev);
827 DPAA_EVENTDEV_INFO("Initializing %s", name);
829 params = rte_vdev_device_args(vdev);
831 return dpaa_event_dev_create(name, params);
835 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
839 name = rte_vdev_device_name(vdev);
840 DPAA_EVENTDEV_INFO("Closing %s", name);
842 return rte_event_pmd_vdev_uninit(name);
845 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
846 .probe = dpaa_event_dev_probe,
847 .remove = dpaa_event_dev_remove
850 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
851 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
852 DISABLE_INTR_MODE "=<int>");