1 /* SPDX-License-Identifier: BSD-3-Clause
13 #include <sys/epoll.h>
15 #include <rte_atomic.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
21 #include <rte_fslmc.h>
22 #include <rte_lcore.h>
24 #include <rte_malloc.h>
25 #include <rte_memcpy.h>
26 #include <rte_memory.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_event_eth_rx_adapter.h>
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include "dpaa2_eventdev.h"
38 #include "dpaa2_eventdev_logs.h"
39 #include <portal/dpaa2_hw_pvt.h>
40 #include <mc/fsl_dpci.h>
43 * Evendev = SoC Instance
44 * Eventport = DPIO Instance
45 * Eventqueue = DPCON Instance
46 * 1 Eventdev can have N Eventqueue
47 * Soft Event Flow is DPCI Instance
50 /* Dynamic logging identified for mempool */
51 int dpaa2_logtype_event;
54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
57 struct rte_eventdev *ev_dev =
58 ((struct dpaa2_io_portal_t *)port)->eventdev;
59 struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
60 uint32_t queue_id = ev[0].queue_id;
61 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
63 struct qbman_swp *swp;
64 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65 uint32_t loop, frames_to_send;
66 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
72 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
73 ret = dpaa2_affine_qbman_swp();
75 DPAA2_EVENTDEV_ERR("Failure in affining portal");
80 swp = DPAA2_PER_LCORE_PORTAL;
83 frames_to_send = (nb_events >> 3) ?
84 MAX_TX_RING_SLOTS : nb_events;
86 for (loop = 0; loop < frames_to_send; loop++) {
87 const struct rte_event *event = &ev[num_tx + loop];
89 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
90 fqid = evq_info->dpci->rx_queue[
91 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
93 fqid = evq_info->dpci->rx_queue[
94 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
96 /* Prepare enqueue descriptor */
97 qbman_eq_desc_clear(&eqdesc[loop]);
98 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
99 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
100 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
102 if (event->mbuf->seqn) {
103 uint8_t dqrr_index = event->mbuf->seqn - 1;
105 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
107 DPAA2_PER_LCORE_DQRR_SIZE--;
108 DPAA2_PER_LCORE_DQRR_HELD &=
112 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
115 * todo - need to align with hw context data
118 struct rte_event *ev_temp = rte_malloc(NULL,
119 sizeof(struct rte_event), 0);
124 frames_to_send = loop;
126 "Unable to allocate event object");
129 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
130 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
131 DPAA2_SET_FD_LEN((&fd_arr[loop]),
132 sizeof(struct rte_event));
136 while (loop < frames_to_send) {
137 loop += qbman_swp_enqueue_multiple_desc(swp,
138 &eqdesc[loop], &fd_arr[loop],
139 frames_to_send - loop);
141 num_tx += frames_to_send;
142 nb_events -= frames_to_send;
149 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
151 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
154 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
156 struct epoll_event epoll_ev;
158 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
159 QBMAN_SWP_INTERRUPT_DQRI);
161 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
162 &epoll_ev, 1, timeout_ticks);
165 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
166 const struct qbman_fd *fd,
167 const struct qbman_result *dq,
168 struct dpaa2_queue *rxq,
169 struct rte_event *ev)
171 struct rte_event *ev_temp =
172 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
176 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
179 qbman_swp_dqrr_consume(swp, dq);
182 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
183 const struct qbman_fd *fd,
184 const struct qbman_result *dq,
185 struct dpaa2_queue *rxq,
186 struct rte_event *ev)
188 struct rte_event *ev_temp =
189 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
190 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
195 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
197 ev->mbuf->seqn = dqrr_index + 1;
198 DPAA2_PER_LCORE_DQRR_SIZE++;
199 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
200 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
204 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
205 uint16_t nb_events, uint64_t timeout_ticks)
207 const struct qbman_result *dq;
208 struct qbman_swp *swp;
209 const struct qbman_fd *fd;
210 struct dpaa2_queue *rxq;
211 int num_pkts = 0, ret, i = 0;
215 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
216 ret = dpaa2_affine_qbman_swp();
218 DPAA2_EVENTDEV_ERR("Failure in affining portal");
222 swp = DPAA2_PER_LCORE_PORTAL;
224 /* Check if there are atomic contexts to be released */
225 while (DPAA2_PER_LCORE_DQRR_SIZE) {
226 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
227 qbman_swp_dqrr_idx_consume(swp, i);
228 DPAA2_PER_LCORE_DQRR_SIZE--;
229 DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
230 DPAA2_INVALID_MBUF_SEQN;
234 DPAA2_PER_LCORE_DQRR_HELD = 0;
237 dq = qbman_swp_dqrr_next(swp);
239 if (!num_pkts && timeout_ticks) {
240 dpaa2_eventdev_dequeue_wait(timeout_ticks);
246 qbman_swp_prefetch_dqrr_next(swp);
248 fd = qbman_result_DQ_fd(dq);
249 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
251 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
253 qbman_swp_dqrr_consume(swp, dq);
254 DPAA2_EVENTDEV_ERR("Null Return VQ received");
259 } while (num_pkts < nb_events);
265 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
266 uint64_t timeout_ticks)
268 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
272 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
273 struct rte_event_dev_info *dev_info)
275 struct dpaa2_eventdev *priv = dev->data->dev_private;
277 EVENTDEV_INIT_FUNC_TRACE();
281 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
282 dev_info->min_dequeue_timeout_ns =
283 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
284 dev_info->max_dequeue_timeout_ns =
285 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
286 dev_info->dequeue_timeout_ns =
287 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
288 dev_info->max_event_queues = priv->max_event_queues;
289 dev_info->max_event_queue_flows =
290 DPAA2_EVENT_MAX_QUEUE_FLOWS;
291 dev_info->max_event_queue_priority_levels =
292 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
293 dev_info->max_event_priority_levels =
294 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
295 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
296 dev_info->max_event_port_dequeue_depth =
297 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
298 dev_info->max_event_port_enqueue_depth =
299 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
300 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
301 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
302 RTE_EVENT_DEV_CAP_BURST_MODE|
303 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
304 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
305 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
310 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
312 struct dpaa2_eventdev *priv = dev->data->dev_private;
313 struct rte_event_dev_config *conf = &dev->data->dev_conf;
315 EVENTDEV_INIT_FUNC_TRACE();
317 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
318 priv->nb_event_queues = conf->nb_event_queues;
319 priv->nb_event_ports = conf->nb_event_ports;
320 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
321 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
322 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
323 priv->event_dev_cfg = conf->event_dev_cfg;
325 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
331 dpaa2_eventdev_start(struct rte_eventdev *dev)
333 EVENTDEV_INIT_FUNC_TRACE();
341 dpaa2_eventdev_stop(struct rte_eventdev *dev)
343 EVENTDEV_INIT_FUNC_TRACE();
349 dpaa2_eventdev_close(struct rte_eventdev *dev)
351 EVENTDEV_INIT_FUNC_TRACE();
359 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
360 struct rte_event_queue_conf *queue_conf)
362 EVENTDEV_INIT_FUNC_TRACE();
365 RTE_SET_USED(queue_id);
366 RTE_SET_USED(queue_conf);
368 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
369 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
370 RTE_SCHED_TYPE_PARALLEL;
371 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
375 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
377 EVENTDEV_INIT_FUNC_TRACE();
380 RTE_SET_USED(queue_id);
384 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
385 const struct rte_event_queue_conf *queue_conf)
387 struct dpaa2_eventdev *priv = dev->data->dev_private;
388 struct dpaa2_eventq *evq_info =
389 &priv->evq_info[queue_id];
391 EVENTDEV_INIT_FUNC_TRACE();
393 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
399 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
400 struct rte_event_port_conf *port_conf)
402 EVENTDEV_INIT_FUNC_TRACE();
405 RTE_SET_USED(port_id);
406 RTE_SET_USED(port_conf);
408 port_conf->new_event_threshold =
409 DPAA2_EVENT_MAX_NUM_EVENTS;
410 port_conf->dequeue_depth =
411 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
412 port_conf->enqueue_depth =
413 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
414 port_conf->disable_implicit_release = 0;
418 dpaa2_eventdev_port_release(void *port)
420 EVENTDEV_INIT_FUNC_TRACE();
426 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
427 const struct rte_event_port_conf *port_conf)
429 EVENTDEV_INIT_FUNC_TRACE();
431 RTE_SET_USED(port_conf);
433 if (!dpaa2_io_portal[port_id].dpio_dev) {
434 dpaa2_io_portal[port_id].dpio_dev =
435 dpaa2_get_qbman_swp(port_id);
436 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
437 if (!dpaa2_io_portal[port_id].dpio_dev)
441 dpaa2_io_portal[port_id].eventdev = dev;
442 dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
447 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
448 uint8_t queues[], uint16_t nb_unlinks)
450 struct dpaa2_eventdev *priv = dev->data->dev_private;
451 struct dpaa2_io_portal_t *dpaa2_portal = port;
452 struct dpaa2_eventq *evq_info;
455 EVENTDEV_INIT_FUNC_TRACE();
457 for (i = 0; i < nb_unlinks; i++) {
458 evq_info = &priv->evq_info[queues[i]];
459 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
460 evq_info->dpcon->channel_index, 0);
461 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
462 0, dpaa2_portal->dpio_dev->token,
463 evq_info->dpcon->dpcon_id);
466 return (int)nb_unlinks;
470 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
471 const uint8_t queues[], const uint8_t priorities[],
474 struct dpaa2_eventdev *priv = dev->data->dev_private;
475 struct dpaa2_io_portal_t *dpaa2_portal = port;
476 struct dpaa2_eventq *evq_info;
477 uint8_t channel_index;
480 EVENTDEV_INIT_FUNC_TRACE();
482 for (i = 0; i < nb_links; i++) {
483 evq_info = &priv->evq_info[queues[i]];
485 ret = dpio_add_static_dequeue_channel(
486 dpaa2_portal->dpio_dev->dpio,
487 CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
488 evq_info->dpcon->dpcon_id, &channel_index);
491 "Static dequeue config failed: err(%d)", ret);
495 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
497 evq_info->dpcon->channel_index = channel_index;
500 RTE_SET_USED(priorities);
502 return (int)nb_links;
504 for (n = 0; n < i; n++) {
505 evq_info = &priv->evq_info[queues[n]];
506 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
507 evq_info->dpcon->channel_index, 0);
508 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
509 0, dpaa2_portal->dpio_dev->token,
510 evq_info->dpcon->dpcon_id);
516 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
517 uint64_t *timeout_ticks)
521 EVENTDEV_INIT_FUNC_TRACE();
524 *timeout_ticks = ns * scale;
530 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
532 EVENTDEV_INIT_FUNC_TRACE();
539 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
540 const struct rte_eth_dev *eth_dev,
543 const char *ethdev_driver = eth_dev->device->driver->name;
545 EVENTDEV_INIT_FUNC_TRACE();
549 if (!strcmp(ethdev_driver, "net_dpaa2"))
550 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
552 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
558 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
559 const struct rte_eth_dev *eth_dev,
560 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
562 struct dpaa2_eventdev *priv = dev->data->dev_private;
563 uint8_t ev_qid = queue_conf->ev.queue_id;
564 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
567 EVENTDEV_INIT_FUNC_TRACE();
569 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
570 ret = dpaa2_eth_eventq_attach(eth_dev, i,
571 dpcon_id, queue_conf);
574 "Event queue attach failed: err(%d)", ret);
580 for (i = (i - 1); i >= 0 ; i--)
581 dpaa2_eth_eventq_detach(eth_dev, i);
587 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
588 const struct rte_eth_dev *eth_dev,
590 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
592 struct dpaa2_eventdev *priv = dev->data->dev_private;
593 uint8_t ev_qid = queue_conf->ev.queue_id;
594 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
597 EVENTDEV_INIT_FUNC_TRACE();
599 if (rx_queue_id == -1)
600 return dpaa2_eventdev_eth_queue_add_all(dev,
601 eth_dev, queue_conf);
603 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
604 dpcon_id, queue_conf);
607 "Event queue attach failed: err(%d)", ret);
614 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
615 const struct rte_eth_dev *eth_dev)
619 EVENTDEV_INIT_FUNC_TRACE();
623 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
624 ret = dpaa2_eth_eventq_detach(eth_dev, i);
627 "Event queue detach failed: err(%d)", ret);
636 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
637 const struct rte_eth_dev *eth_dev,
642 EVENTDEV_INIT_FUNC_TRACE();
644 if (rx_queue_id == -1)
645 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
647 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
650 "Event queue detach failed: err(%d)", ret);
658 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
659 const struct rte_eth_dev *eth_dev)
661 EVENTDEV_INIT_FUNC_TRACE();
664 RTE_SET_USED(eth_dev);
670 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
671 const struct rte_eth_dev *eth_dev)
673 EVENTDEV_INIT_FUNC_TRACE();
676 RTE_SET_USED(eth_dev);
681 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
682 .dev_infos_get = dpaa2_eventdev_info_get,
683 .dev_configure = dpaa2_eventdev_configure,
684 .dev_start = dpaa2_eventdev_start,
685 .dev_stop = dpaa2_eventdev_stop,
686 .dev_close = dpaa2_eventdev_close,
687 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
688 .queue_setup = dpaa2_eventdev_queue_setup,
689 .queue_release = dpaa2_eventdev_queue_release,
690 .port_def_conf = dpaa2_eventdev_port_def_conf,
691 .port_setup = dpaa2_eventdev_port_setup,
692 .port_release = dpaa2_eventdev_port_release,
693 .port_link = dpaa2_eventdev_port_link,
694 .port_unlink = dpaa2_eventdev_port_unlink,
695 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
696 .dump = dpaa2_eventdev_dump,
697 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
698 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
699 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
700 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
701 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
705 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
706 struct dpaa2_dpcon_dev *dpcon_dev)
708 struct dpci_rx_queue_cfg rx_queue_cfg;
711 /*Do settings to get the frame on a DPCON object*/
712 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
713 DPCI_QUEUE_OPT_USER_CTX;
714 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
715 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
716 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
718 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
719 dpaa2_eventdev_process_parallel;
720 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
721 dpaa2_eventdev_process_atomic;
723 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
724 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
725 ret = dpci_set_rx_queue(&dpci_dev->dpci,
731 "DPCI Rx queue setup failed: err(%d)",
740 dpaa2_eventdev_create(const char *name)
742 struct rte_eventdev *eventdev;
743 struct dpaa2_eventdev *priv;
744 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
745 struct dpaa2_dpci_dev *dpci_dev = NULL;
748 eventdev = rte_event_pmd_vdev_init(name,
749 sizeof(struct dpaa2_eventdev),
751 if (eventdev == NULL) {
752 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
756 eventdev->dev_ops = &dpaa2_eventdev_ops;
757 eventdev->enqueue = dpaa2_eventdev_enqueue;
758 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
759 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
760 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
761 eventdev->dequeue = dpaa2_eventdev_dequeue;
762 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
764 /* For secondary processes, the primary has done all the work */
765 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
768 priv = eventdev->data->dev_private;
769 priv->max_event_queues = 0;
772 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
775 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
777 dpci_dev = rte_dpaa2_alloc_dpci_dev();
779 rte_dpaa2_free_dpcon_dev(dpcon_dev);
782 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
784 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
787 "DPCI setup failed: err(%d)", ret);
790 priv->max_event_queues++;
791 } while (dpcon_dev && dpci_dev);
799 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
803 name = rte_vdev_device_name(vdev);
804 DPAA2_EVENTDEV_INFO("Initializing %s", name);
805 return dpaa2_eventdev_create(name);
809 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
813 name = rte_vdev_device_name(vdev);
814 DPAA2_EVENTDEV_INFO("Closing %s", name);
816 return rte_event_pmd_vdev_uninit(name);
819 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
820 .probe = dpaa2_eventdev_probe,
821 .remove = dpaa2_eventdev_remove
824 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
826 RTE_INIT(dpaa2_eventdev_init_log)
828 dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
829 if (dpaa2_logtype_event >= 0)
830 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);