1 /* SPDX-License-Identifier: BSD-3-Clause
13 #include <sys/epoll.h>
15 #include <rte_atomic.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
21 #include <rte_fslmc.h>
22 #include <rte_lcore.h>
24 #include <rte_malloc.h>
25 #include <rte_memcpy.h>
26 #include <rte_memory.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_ethdev.h>
30 #include <rte_event_eth_rx_adapter.h>
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include "dpaa2_eventdev.h"
38 #include <portal/dpaa2_hw_pvt.h>
39 #include <mc/fsl_dpci.h>
42 * Evendev = SoC Instance
43 * Eventport = DPIO Instance
44 * Eventqueue = DPCON Instance
45 * 1 Eventdev can have N Eventqueue
46 * Soft Event Flow is DPCI Instance
50 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
53 struct rte_eventdev *ev_dev =
54 ((struct dpaa2_io_portal_t *)port)->eventdev;
55 struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
56 uint32_t queue_id = ev[0].queue_id;
57 struct evq_info_t *evq_info = &priv->evq_info[queue_id];
59 struct qbman_swp *swp;
60 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
61 uint32_t loop, frames_to_send;
62 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
68 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
69 ret = dpaa2_affine_qbman_swp();
71 PMD_DRV_LOG(ERR, "Failure in affining portal\n");
76 swp = DPAA2_PER_LCORE_PORTAL;
79 frames_to_send = (nb_events >> 3) ?
80 MAX_TX_RING_SLOTS : nb_events;
82 for (loop = 0; loop < frames_to_send; loop++) {
83 const struct rte_event *event = &ev[num_tx + loop];
85 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
86 fqid = evq_info->dpci->queue[
87 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
89 fqid = evq_info->dpci->queue[
90 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
92 /* Prepare enqueue descriptor */
93 qbman_eq_desc_clear(&eqdesc[loop]);
94 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
95 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
96 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
98 if (event->impl_opaque) {
99 uint8_t dqrr_index = event->impl_opaque - 1;
101 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
103 DPAA2_PER_LCORE_DPIO->dqrr_size--;
104 DPAA2_PER_LCORE_DPIO->dqrr_held &=
108 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
111 * todo - need to align with hw context data
114 struct rte_event *ev_temp = rte_malloc(NULL,
115 sizeof(struct rte_event), 0);
120 frames_to_send = loop;
121 PMD_DRV_LOG(ERR, "Unable to allocate memory");
124 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
125 DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
126 DPAA2_SET_FD_LEN((&fd_arr[loop]),
127 sizeof(struct rte_event));
131 while (loop < frames_to_send) {
132 loop += qbman_swp_enqueue_multiple_desc(swp,
133 &eqdesc[loop], &fd_arr[loop],
134 frames_to_send - loop);
136 num_tx += frames_to_send;
137 nb_events -= frames_to_send;
144 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
146 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
149 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
151 struct epoll_event epoll_ev;
154 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
155 QBMAN_SWP_INTERRUPT_DQRI);
158 ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
159 &epoll_ev, 1, timeout_ticks);
161 /* sometimes due to some spurious interrupts epoll_wait fails
162 * with errno EINTR. so here we are retrying epoll_wait in such
163 * case to avoid the problem.
165 if (errno == EINTR) {
166 PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
168 PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
174 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
175 const struct qbman_fd *fd,
176 const struct qbman_result *dq,
177 struct dpaa2_queue *rxq,
178 struct rte_event *ev)
180 struct rte_event *ev_temp =
181 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
185 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
188 qbman_swp_dqrr_consume(swp, dq);
191 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
192 const struct qbman_fd *fd,
193 const struct qbman_result *dq,
194 struct dpaa2_queue *rxq,
195 struct rte_event *ev)
197 struct rte_event *ev_temp =
198 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
199 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
204 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
206 ev->impl_opaque = dqrr_index + 1;
207 DPAA2_PER_LCORE_DPIO->dqrr_size++;
208 DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
212 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
213 uint16_t nb_events, uint64_t timeout_ticks)
215 const struct qbman_result *dq;
216 struct qbman_swp *swp;
217 const struct qbman_fd *fd;
218 struct dpaa2_queue *rxq;
219 int num_pkts = 0, ret, i = 0;
223 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
224 ret = dpaa2_affine_qbman_swp();
226 PMD_DRV_LOG(ERR, "Failure in affining portal\n");
231 swp = DPAA2_PER_LCORE_PORTAL;
233 /* Check if there are atomic contexts to be released */
234 while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
235 if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
236 dq = qbman_get_dqrr_from_idx(swp, i);
237 qbman_swp_dqrr_consume(swp, dq);
238 DPAA2_PER_LCORE_DPIO->dqrr_size--;
242 DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
245 dq = qbman_swp_dqrr_next(swp);
247 if (!num_pkts && timeout_ticks) {
248 dpaa2_eventdev_dequeue_wait(timeout_ticks);
255 fd = qbman_result_DQ_fd(dq);
257 rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
259 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
261 qbman_swp_dqrr_consume(swp, dq);
262 PMD_DRV_LOG(ERR, "Null Return VQ received\n");
267 } while (num_pkts < nb_events);
273 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
274 uint64_t timeout_ticks)
276 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
280 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
281 struct rte_event_dev_info *dev_info)
283 struct dpaa2_eventdev *priv = dev->data->dev_private;
285 PMD_DRV_FUNC_TRACE();
289 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
290 dev_info->min_dequeue_timeout_ns =
291 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
292 dev_info->max_dequeue_timeout_ns =
293 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
294 dev_info->dequeue_timeout_ns =
295 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
296 dev_info->max_event_queues = priv->max_event_queues;
297 dev_info->max_event_queue_flows =
298 DPAA2_EVENT_MAX_QUEUE_FLOWS;
299 dev_info->max_event_queue_priority_levels =
300 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
301 dev_info->max_event_priority_levels =
302 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
303 dev_info->max_event_ports = RTE_MAX_LCORE;
304 dev_info->max_event_port_dequeue_depth =
305 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
306 dev_info->max_event_port_enqueue_depth =
307 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
308 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
309 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
310 RTE_EVENT_DEV_CAP_BURST_MODE|
311 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
312 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
313 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
318 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
320 struct dpaa2_eventdev *priv = dev->data->dev_private;
321 struct rte_event_dev_config *conf = &dev->data->dev_conf;
323 PMD_DRV_FUNC_TRACE();
325 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
326 priv->nb_event_queues = conf->nb_event_queues;
327 priv->nb_event_ports = conf->nb_event_ports;
328 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
329 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
330 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
331 priv->event_dev_cfg = conf->event_dev_cfg;
333 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
338 dpaa2_eventdev_start(struct rte_eventdev *dev)
340 PMD_DRV_FUNC_TRACE();
348 dpaa2_eventdev_stop(struct rte_eventdev *dev)
350 PMD_DRV_FUNC_TRACE();
356 dpaa2_eventdev_close(struct rte_eventdev *dev)
358 PMD_DRV_FUNC_TRACE();
366 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
367 struct rte_event_queue_conf *queue_conf)
369 PMD_DRV_FUNC_TRACE();
372 RTE_SET_USED(queue_id);
373 RTE_SET_USED(queue_conf);
375 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
376 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
377 RTE_SCHED_TYPE_PARALLEL;
378 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
382 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
384 PMD_DRV_FUNC_TRACE();
387 RTE_SET_USED(queue_id);
391 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
392 const struct rte_event_queue_conf *queue_conf)
394 struct dpaa2_eventdev *priv = dev->data->dev_private;
395 struct evq_info_t *evq_info =
396 &priv->evq_info[queue_id];
398 PMD_DRV_FUNC_TRACE();
400 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
406 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
407 struct rte_event_port_conf *port_conf)
409 PMD_DRV_FUNC_TRACE();
412 RTE_SET_USED(port_id);
413 RTE_SET_USED(port_conf);
415 port_conf->new_event_threshold =
416 DPAA2_EVENT_MAX_NUM_EVENTS;
417 port_conf->dequeue_depth =
418 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
419 port_conf->enqueue_depth =
420 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
421 port_conf->disable_implicit_release = 0;
425 dpaa2_eventdev_port_release(void *port)
427 PMD_DRV_FUNC_TRACE();
433 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
434 const struct rte_event_port_conf *port_conf)
436 PMD_DRV_FUNC_TRACE();
438 RTE_SET_USED(port_conf);
440 if (!dpaa2_io_portal[port_id].dpio_dev) {
441 dpaa2_io_portal[port_id].dpio_dev =
442 dpaa2_get_qbman_swp(port_id);
443 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
444 if (!dpaa2_io_portal[port_id].dpio_dev)
448 dpaa2_io_portal[port_id].eventdev = dev;
449 dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
454 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
455 uint8_t queues[], uint16_t nb_unlinks)
457 struct dpaa2_eventdev *priv = dev->data->dev_private;
458 struct dpaa2_io_portal_t *dpaa2_portal = port;
459 struct evq_info_t *evq_info;
462 PMD_DRV_FUNC_TRACE();
464 for (i = 0; i < nb_unlinks; i++) {
465 evq_info = &priv->evq_info[queues[i]];
466 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
467 evq_info->dpcon->channel_index, 0);
468 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
469 0, dpaa2_portal->dpio_dev->token,
470 evq_info->dpcon->dpcon_id);
474 return (int)nb_unlinks;
478 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
479 const uint8_t queues[], const uint8_t priorities[],
482 struct dpaa2_eventdev *priv = dev->data->dev_private;
483 struct dpaa2_io_portal_t *dpaa2_portal = port;
484 struct evq_info_t *evq_info;
485 uint8_t channel_index;
488 PMD_DRV_FUNC_TRACE();
490 for (i = 0; i < nb_links; i++) {
491 evq_info = &priv->evq_info[queues[i]];
495 ret = dpio_add_static_dequeue_channel(
496 dpaa2_portal->dpio_dev->dpio,
497 CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
498 evq_info->dpcon->dpcon_id, &channel_index);
500 PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
505 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
507 evq_info->dpcon->channel_index = channel_index;
511 RTE_SET_USED(priorities);
513 return (int)nb_links;
515 for (n = 0; n < i; n++) {
516 evq_info = &priv->evq_info[queues[n]];
517 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
518 evq_info->dpcon->channel_index, 0);
519 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
520 0, dpaa2_portal->dpio_dev->token,
521 evq_info->dpcon->dpcon_id);
528 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
529 uint64_t *timeout_ticks)
533 PMD_DRV_FUNC_TRACE();
536 *timeout_ticks = ns * scale;
542 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
544 PMD_DRV_FUNC_TRACE();
551 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
552 const struct rte_eth_dev *eth_dev,
555 const char *ethdev_driver = eth_dev->device->driver->name;
557 PMD_DRV_FUNC_TRACE();
561 if (!strcmp(ethdev_driver, "net_dpaa2"))
562 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
564 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
570 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
571 const struct rte_eth_dev *eth_dev,
572 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
574 struct dpaa2_eventdev *priv = dev->data->dev_private;
575 uint8_t ev_qid = queue_conf->ev.queue_id;
576 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
579 PMD_DRV_FUNC_TRACE();
581 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
582 ret = dpaa2_eth_eventq_attach(eth_dev, i,
583 dpcon_id, queue_conf);
585 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
592 for (i = (i - 1); i >= 0 ; i--)
593 dpaa2_eth_eventq_detach(eth_dev, i);
599 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
600 const struct rte_eth_dev *eth_dev,
602 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
604 struct dpaa2_eventdev *priv = dev->data->dev_private;
605 uint8_t ev_qid = queue_conf->ev.queue_id;
606 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
609 PMD_DRV_FUNC_TRACE();
611 if (rx_queue_id == -1)
612 return dpaa2_eventdev_eth_queue_add_all(dev,
613 eth_dev, queue_conf);
615 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
616 dpcon_id, queue_conf);
618 PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
625 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
626 const struct rte_eth_dev *eth_dev)
630 PMD_DRV_FUNC_TRACE();
634 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
635 ret = dpaa2_eth_eventq_detach(eth_dev, i);
637 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
647 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
648 const struct rte_eth_dev *eth_dev,
653 PMD_DRV_FUNC_TRACE();
655 if (rx_queue_id == -1)
656 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
658 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
660 PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
668 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
669 const struct rte_eth_dev *eth_dev)
671 PMD_DRV_FUNC_TRACE();
674 RTE_SET_USED(eth_dev);
680 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
681 const struct rte_eth_dev *eth_dev)
683 PMD_DRV_FUNC_TRACE();
686 RTE_SET_USED(eth_dev);
691 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
692 .dev_infos_get = dpaa2_eventdev_info_get,
693 .dev_configure = dpaa2_eventdev_configure,
694 .dev_start = dpaa2_eventdev_start,
695 .dev_stop = dpaa2_eventdev_stop,
696 .dev_close = dpaa2_eventdev_close,
697 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
698 .queue_setup = dpaa2_eventdev_queue_setup,
699 .queue_release = dpaa2_eventdev_queue_release,
700 .port_def_conf = dpaa2_eventdev_port_def_conf,
701 .port_setup = dpaa2_eventdev_port_setup,
702 .port_release = dpaa2_eventdev_port_release,
703 .port_link = dpaa2_eventdev_port_link,
704 .port_unlink = dpaa2_eventdev_port_unlink,
705 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
706 .dump = dpaa2_eventdev_dump,
707 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
708 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
709 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
710 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
711 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
715 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
716 struct dpaa2_dpcon_dev *dpcon_dev)
718 struct dpci_rx_queue_cfg rx_queue_cfg;
721 /*Do settings to get the frame on a DPCON object*/
722 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
723 DPCI_QUEUE_OPT_USER_CTX;
724 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
725 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
726 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
728 dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
729 dpaa2_eventdev_process_parallel;
730 dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
731 dpaa2_eventdev_process_atomic;
733 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
734 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
735 ret = dpci_set_rx_queue(&dpci_dev->dpci,
741 "set_rx_q failed with err code: %d", ret);
749 dpaa2_eventdev_create(const char *name)
751 struct rte_eventdev *eventdev;
752 struct dpaa2_eventdev *priv;
753 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
754 struct dpaa2_dpci_dev *dpci_dev = NULL;
757 eventdev = rte_event_pmd_vdev_init(name,
758 sizeof(struct dpaa2_eventdev),
760 if (eventdev == NULL) {
761 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
765 eventdev->dev_ops = &dpaa2_eventdev_ops;
766 eventdev->enqueue = dpaa2_eventdev_enqueue;
767 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
768 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
769 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
770 eventdev->dequeue = dpaa2_eventdev_dequeue;
771 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
773 /* For secondary processes, the primary has done all the work */
774 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
777 priv = eventdev->data->dev_private;
778 priv->max_event_queues = 0;
781 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
784 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
786 dpci_dev = rte_dpaa2_alloc_dpci_dev();
788 rte_dpaa2_free_dpcon_dev(dpcon_dev);
791 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
793 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
796 "dpci setup failed with err code: %d", ret);
799 priv->max_event_queues++;
800 } while (dpcon_dev && dpci_dev);
808 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
812 name = rte_vdev_device_name(vdev);
813 PMD_DRV_LOG(INFO, "Initializing %s", name);
814 return dpaa2_eventdev_create(name);
818 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
822 name = rte_vdev_device_name(vdev);
823 PMD_DRV_LOG(INFO, "Closing %s", name);
825 return rte_event_pmd_vdev_uninit(name);
828 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
829 .probe = dpaa2_eventdev_probe,
830 .remove = dpaa2_eventdev_remove
833 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);