1 /* SPDX-License-Identifier: BSD-3-Clause
13 #include <sys/epoll.h>
15 #include <rte_atomic.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
21 #include <rte_fslmc.h>
22 #include <rte_lcore.h>
24 #include <rte_malloc.h>
25 #include <rte_memcpy.h>
26 #include <rte_memory.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_event_eth_rx_adapter.h>
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include "dpaa2_eventdev.h"
38 #include "dpaa2_eventdev_logs.h"
39 #include <portal/dpaa2_hw_pvt.h>
40 #include <mc/fsl_dpci.h>
43 * Evendev = SoC Instance
44 * Eventport = DPIO Instance
45 * Eventqueue = DPCON Instance
46 * 1 Eventdev can have N Eventqueue
47 * Soft Event Flow is DPCI Instance
50 /* Dynamic logging identified for mempool */
51 int dpaa2_logtype_event;
54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
58 struct dpaa2_port *dpaa2_portal = port;
59 struct dpaa2_dpio_dev *dpio_dev;
60 uint32_t queue_id = ev[0].queue_id;
61 struct dpaa2_eventq *evq_info;
63 struct qbman_swp *swp;
64 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65 uint32_t loop, frames_to_send;
66 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
69 uint8_t channel_index;
71 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
72 /* Affine current thread context to a qman portal */
73 ret = dpaa2_affine_qbman_swp();
75 DPAA2_EVENTDEV_ERR("Failure in affining portal");
79 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
80 dpio_dev = DPAA2_PER_LCORE_DPIO;
81 swp = DPAA2_PER_LCORE_PORTAL;
83 if (likely(dpaa2_portal->is_port_linked))
86 /* Create mapping between portal and channel to receive packets */
87 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
88 evq_info = &dpaa2_portal->evq_info[i];
89 if (!evq_info->event_port)
92 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
95 evq_info->dpcon->dpcon_id,
99 "Static dequeue config failed: err(%d)", ret);
103 qbman_swp_push_set(swp, channel_index, 1);
104 evq_info->dpcon->channel_index = channel_index;
106 dpaa2_portal->is_port_linked = true;
109 evq_info = &dpaa2_portal->evq_info[queue_id];
112 frames_to_send = (nb_events >> 3) ?
113 MAX_TX_RING_SLOTS : nb_events;
115 for (loop = 0; loop < frames_to_send; loop++) {
116 const struct rte_event *event = &ev[num_tx + loop];
118 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
119 fqid = evq_info->dpci->rx_queue[
120 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
122 fqid = evq_info->dpci->rx_queue[
123 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
125 /* Prepare enqueue descriptor */
126 qbman_eq_desc_clear(&eqdesc[loop]);
127 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
128 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
129 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
131 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
132 && event->mbuf->seqn) {
133 uint8_t dqrr_index = event->mbuf->seqn - 1;
135 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
137 DPAA2_PER_LCORE_DQRR_SIZE--;
138 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
141 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
144 * todo - need to align with hw context data
147 struct rte_event *ev_temp = rte_malloc(NULL,
148 sizeof(struct rte_event), 0);
153 frames_to_send = loop;
155 "Unable to allocate event object");
158 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
159 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
160 DPAA2_SET_FD_LEN((&fd_arr[loop]),
161 sizeof(struct rte_event));
165 while (loop < frames_to_send) {
166 loop += qbman_swp_enqueue_multiple_desc(swp,
167 &eqdesc[loop], &fd_arr[loop],
168 frames_to_send - loop);
170 num_tx += frames_to_send;
171 nb_events -= frames_to_send;
176 for (n = 0; n < i; n++) {
177 evq_info = &dpaa2_portal->evq_info[n];
178 if (!evq_info->event_port)
180 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
181 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
183 evq_info->dpcon->dpcon_id);
190 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
192 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
195 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
197 struct epoll_event epoll_ev;
199 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
200 QBMAN_SWP_INTERRUPT_DQRI);
202 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
203 &epoll_ev, 1, timeout_ticks);
206 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
207 const struct qbman_fd *fd,
208 const struct qbman_result *dq,
209 struct dpaa2_queue *rxq,
210 struct rte_event *ev)
212 struct rte_event *ev_temp =
213 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
217 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
220 qbman_swp_dqrr_consume(swp, dq);
223 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
224 const struct qbman_fd *fd,
225 const struct qbman_result *dq,
226 struct dpaa2_queue *rxq,
227 struct rte_event *ev)
229 struct rte_event *ev_temp =
230 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
231 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
236 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
238 ev->mbuf->seqn = dqrr_index + 1;
239 DPAA2_PER_LCORE_DQRR_SIZE++;
240 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
241 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
245 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
246 uint16_t nb_events, uint64_t timeout_ticks)
248 const struct qbman_result *dq;
249 struct dpaa2_dpio_dev *dpio_dev = NULL;
250 struct dpaa2_port *dpaa2_portal = port;
251 struct dpaa2_eventq *evq_info;
252 struct qbman_swp *swp;
253 const struct qbman_fd *fd;
254 struct dpaa2_queue *rxq;
255 int num_pkts = 0, ret, i = 0, n;
256 uint8_t channel_index;
258 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
259 /* Affine current thread context to a qman portal */
260 ret = dpaa2_affine_qbman_swp();
262 DPAA2_EVENTDEV_ERR("Failure in affining portal");
267 dpio_dev = DPAA2_PER_LCORE_DPIO;
268 swp = DPAA2_PER_LCORE_PORTAL;
270 if (likely(dpaa2_portal->is_port_linked))
273 /* Create mapping between portal and channel to receive packets */
274 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
275 evq_info = &dpaa2_portal->evq_info[i];
276 if (!evq_info->event_port)
279 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
282 evq_info->dpcon->dpcon_id,
286 "Static dequeue config failed: err(%d)", ret);
290 qbman_swp_push_set(swp, channel_index, 1);
291 evq_info->dpcon->channel_index = channel_index;
293 dpaa2_portal->is_port_linked = true;
296 /* Check if there are atomic contexts to be released */
297 while (DPAA2_PER_LCORE_DQRR_SIZE) {
298 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
299 qbman_swp_dqrr_idx_consume(swp, i);
300 DPAA2_PER_LCORE_DQRR_SIZE--;
301 DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
302 DPAA2_INVALID_MBUF_SEQN;
306 DPAA2_PER_LCORE_DQRR_HELD = 0;
309 dq = qbman_swp_dqrr_next(swp);
311 if (!num_pkts && timeout_ticks) {
312 dpaa2_eventdev_dequeue_wait(timeout_ticks);
318 qbman_swp_prefetch_dqrr_next(swp);
320 fd = qbman_result_DQ_fd(dq);
321 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
323 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
325 qbman_swp_dqrr_consume(swp, dq);
326 DPAA2_EVENTDEV_ERR("Null Return VQ received");
331 } while (num_pkts < nb_events);
335 for (n = 0; n < i; n++) {
336 evq_info = &dpaa2_portal->evq_info[n];
337 if (!evq_info->event_port)
340 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
341 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
343 evq_info->dpcon->dpcon_id);
349 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
350 uint64_t timeout_ticks)
352 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
356 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
357 struct rte_event_dev_info *dev_info)
359 struct dpaa2_eventdev *priv = dev->data->dev_private;
361 EVENTDEV_INIT_FUNC_TRACE();
365 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
366 dev_info->min_dequeue_timeout_ns =
367 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
368 dev_info->max_dequeue_timeout_ns =
369 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
370 dev_info->dequeue_timeout_ns =
371 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
372 dev_info->max_event_queues = priv->max_event_queues;
373 dev_info->max_event_queue_flows =
374 DPAA2_EVENT_MAX_QUEUE_FLOWS;
375 dev_info->max_event_queue_priority_levels =
376 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
377 dev_info->max_event_priority_levels =
378 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
379 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
380 /* we only support dpio upto number of cores*/
381 if (dev_info->max_event_ports > rte_lcore_count())
382 dev_info->max_event_ports = rte_lcore_count();
383 dev_info->max_event_port_dequeue_depth =
384 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
385 dev_info->max_event_port_enqueue_depth =
386 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
387 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
388 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
389 RTE_EVENT_DEV_CAP_BURST_MODE|
390 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
391 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
392 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
397 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
399 struct dpaa2_eventdev *priv = dev->data->dev_private;
400 struct rte_event_dev_config *conf = &dev->data->dev_conf;
402 EVENTDEV_INIT_FUNC_TRACE();
404 priv->nb_event_queues = conf->nb_event_queues;
405 priv->nb_event_ports = conf->nb_event_ports;
406 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
407 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
408 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
409 priv->event_dev_cfg = conf->event_dev_cfg;
411 /* Check dequeue timeout method is per dequeue or global */
412 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
414 * Use timeout value as given in dequeue operation.
415 * So invalidating this timeout value.
417 priv->dequeue_timeout_ns = 0;
419 } else if (conf->dequeue_timeout_ns == 0) {
420 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
422 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
425 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
431 dpaa2_eventdev_start(struct rte_eventdev *dev)
433 EVENTDEV_INIT_FUNC_TRACE();
441 dpaa2_eventdev_stop(struct rte_eventdev *dev)
443 EVENTDEV_INIT_FUNC_TRACE();
449 dpaa2_eventdev_close(struct rte_eventdev *dev)
451 EVENTDEV_INIT_FUNC_TRACE();
459 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
460 struct rte_event_queue_conf *queue_conf)
462 EVENTDEV_INIT_FUNC_TRACE();
465 RTE_SET_USED(queue_id);
466 RTE_SET_USED(queue_conf);
468 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
469 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
470 RTE_SCHED_TYPE_PARALLEL;
471 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
475 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
476 const struct rte_event_queue_conf *queue_conf)
478 struct dpaa2_eventdev *priv = dev->data->dev_private;
479 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
481 EVENTDEV_INIT_FUNC_TRACE();
483 switch (queue_conf->schedule_type) {
484 case RTE_SCHED_TYPE_PARALLEL:
485 case RTE_SCHED_TYPE_ATOMIC:
487 case RTE_SCHED_TYPE_ORDERED:
488 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
491 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
492 evq_info->event_queue_id = queue_id;
498 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
500 EVENTDEV_INIT_FUNC_TRACE();
503 RTE_SET_USED(queue_id);
507 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
508 struct rte_event_port_conf *port_conf)
510 EVENTDEV_INIT_FUNC_TRACE();
513 RTE_SET_USED(port_id);
515 port_conf->new_event_threshold =
516 DPAA2_EVENT_MAX_NUM_EVENTS;
517 port_conf->dequeue_depth =
518 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
519 port_conf->enqueue_depth =
520 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
521 port_conf->disable_implicit_release = 0;
525 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
526 const struct rte_event_port_conf *port_conf)
528 char event_port_name[32];
529 struct dpaa2_port *portal;
531 EVENTDEV_INIT_FUNC_TRACE();
533 RTE_SET_USED(port_conf);
535 sprintf(event_port_name, "event-port-%d", port_id);
536 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
538 DPAA2_EVENTDEV_ERR("Memory allocation failure");
542 memset(portal, 0, sizeof(struct dpaa2_port));
543 dev->data->ports[port_id] = portal;
548 dpaa2_eventdev_port_release(void *port)
550 struct dpaa2_port *portal = port;
552 EVENTDEV_INIT_FUNC_TRACE();
554 /* TODO: Cleanup is required when ports are in linked state. */
555 if (portal->is_port_linked)
556 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
565 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
566 const uint8_t queues[], const uint8_t priorities[],
569 struct dpaa2_eventdev *priv = dev->data->dev_private;
570 struct dpaa2_port *dpaa2_portal = port;
571 struct dpaa2_eventq *evq_info;
574 EVENTDEV_INIT_FUNC_TRACE();
576 RTE_SET_USED(priorities);
578 for (i = 0; i < nb_links; i++) {
579 evq_info = &priv->evq_info[queues[i]];
580 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
581 sizeof(struct dpaa2_eventq));
582 dpaa2_portal->evq_info[queues[i]].event_port = port;
583 dpaa2_portal->num_linked_evq++;
586 return (int)nb_links;
590 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
591 uint8_t queues[], uint16_t nb_unlinks)
593 struct dpaa2_port *dpaa2_portal = port;
595 struct dpaa2_dpio_dev *dpio_dev = NULL;
596 struct dpaa2_eventq *evq_info;
597 struct qbman_swp *swp;
599 EVENTDEV_INIT_FUNC_TRACE();
602 RTE_SET_USED(queues);
604 for (i = 0; i < nb_unlinks; i++) {
605 evq_info = &dpaa2_portal->evq_info[queues[i]];
607 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
608 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
609 dpio_dev = DPAA2_PER_LCORE_DPIO;
610 swp = DPAA2_PER_LCORE_PORTAL;
612 qbman_swp_push_set(swp,
613 evq_info->dpcon->channel_index, 0);
614 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
616 evq_info->dpcon->dpcon_id);
618 memset(evq_info, 0, sizeof(struct dpaa2_eventq));
619 if (dpaa2_portal->num_linked_evq)
620 dpaa2_portal->num_linked_evq--;
623 if (!dpaa2_portal->num_linked_evq)
624 dpaa2_portal->is_port_linked = false;
626 return (int)nb_unlinks;
631 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
632 uint64_t *timeout_ticks)
634 uint32_t scale = 1000*1000;
636 EVENTDEV_INIT_FUNC_TRACE();
639 *timeout_ticks = ns * scale;
645 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
647 EVENTDEV_INIT_FUNC_TRACE();
654 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
655 const struct rte_eth_dev *eth_dev,
658 const char *ethdev_driver = eth_dev->device->driver->name;
660 EVENTDEV_INIT_FUNC_TRACE();
664 if (!strcmp(ethdev_driver, "net_dpaa2"))
665 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
667 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
673 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
674 const struct rte_eth_dev *eth_dev,
675 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
677 struct dpaa2_eventdev *priv = dev->data->dev_private;
678 uint8_t ev_qid = queue_conf->ev.queue_id;
679 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
682 EVENTDEV_INIT_FUNC_TRACE();
684 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
685 ret = dpaa2_eth_eventq_attach(eth_dev, i,
686 dpcon_id, queue_conf);
689 "Event queue attach failed: err(%d)", ret);
695 for (i = (i - 1); i >= 0 ; i--)
696 dpaa2_eth_eventq_detach(eth_dev, i);
702 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
703 const struct rte_eth_dev *eth_dev,
705 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
707 struct dpaa2_eventdev *priv = dev->data->dev_private;
708 uint8_t ev_qid = queue_conf->ev.queue_id;
709 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
712 EVENTDEV_INIT_FUNC_TRACE();
714 if (rx_queue_id == -1)
715 return dpaa2_eventdev_eth_queue_add_all(dev,
716 eth_dev, queue_conf);
718 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
719 dpcon_id, queue_conf);
722 "Event queue attach failed: err(%d)", ret);
729 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
730 const struct rte_eth_dev *eth_dev)
734 EVENTDEV_INIT_FUNC_TRACE();
738 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
739 ret = dpaa2_eth_eventq_detach(eth_dev, i);
742 "Event queue detach failed: err(%d)", ret);
751 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
752 const struct rte_eth_dev *eth_dev,
757 EVENTDEV_INIT_FUNC_TRACE();
759 if (rx_queue_id == -1)
760 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
762 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
765 "Event queue detach failed: err(%d)", ret);
773 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
774 const struct rte_eth_dev *eth_dev)
776 EVENTDEV_INIT_FUNC_TRACE();
779 RTE_SET_USED(eth_dev);
785 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
786 const struct rte_eth_dev *eth_dev)
788 EVENTDEV_INIT_FUNC_TRACE();
791 RTE_SET_USED(eth_dev);
796 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
797 .dev_infos_get = dpaa2_eventdev_info_get,
798 .dev_configure = dpaa2_eventdev_configure,
799 .dev_start = dpaa2_eventdev_start,
800 .dev_stop = dpaa2_eventdev_stop,
801 .dev_close = dpaa2_eventdev_close,
802 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
803 .queue_setup = dpaa2_eventdev_queue_setup,
804 .queue_release = dpaa2_eventdev_queue_release,
805 .port_def_conf = dpaa2_eventdev_port_def_conf,
806 .port_setup = dpaa2_eventdev_port_setup,
807 .port_release = dpaa2_eventdev_port_release,
808 .port_link = dpaa2_eventdev_port_link,
809 .port_unlink = dpaa2_eventdev_port_unlink,
810 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
811 .dump = dpaa2_eventdev_dump,
812 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
813 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
814 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
815 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
816 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
820 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
821 struct dpaa2_dpcon_dev *dpcon_dev)
823 struct dpci_rx_queue_cfg rx_queue_cfg;
826 /*Do settings to get the frame on a DPCON object*/
827 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
828 DPCI_QUEUE_OPT_USER_CTX;
829 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
830 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
831 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
833 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
834 dpaa2_eventdev_process_parallel;
835 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
836 dpaa2_eventdev_process_atomic;
838 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
839 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
840 ret = dpci_set_rx_queue(&dpci_dev->dpci,
846 "DPCI Rx queue setup failed: err(%d)",
855 dpaa2_eventdev_create(const char *name)
857 struct rte_eventdev *eventdev;
858 struct dpaa2_eventdev *priv;
859 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
860 struct dpaa2_dpci_dev *dpci_dev = NULL;
863 eventdev = rte_event_pmd_vdev_init(name,
864 sizeof(struct dpaa2_eventdev),
866 if (eventdev == NULL) {
867 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
871 eventdev->dev_ops = &dpaa2_eventdev_ops;
872 eventdev->enqueue = dpaa2_eventdev_enqueue;
873 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
874 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
875 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
876 eventdev->dequeue = dpaa2_eventdev_dequeue;
877 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
879 /* For secondary processes, the primary has done all the work */
880 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
883 priv = eventdev->data->dev_private;
884 priv->max_event_queues = 0;
887 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
890 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
892 dpci_dev = rte_dpaa2_alloc_dpci_dev();
894 rte_dpaa2_free_dpcon_dev(dpcon_dev);
897 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
899 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
902 "DPCI setup failed: err(%d)", ret);
905 priv->max_event_queues++;
906 } while (dpcon_dev && dpci_dev);
908 RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
916 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
920 name = rte_vdev_device_name(vdev);
921 DPAA2_EVENTDEV_INFO("Initializing %s", name);
922 return dpaa2_eventdev_create(name);
926 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
930 name = rte_vdev_device_name(vdev);
931 DPAA2_EVENTDEV_INFO("Closing %s", name);
933 return rte_event_pmd_vdev_uninit(name);
936 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
937 .probe = dpaa2_eventdev_probe,
938 .remove = dpaa2_eventdev_remove
941 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
943 RTE_INIT(dpaa2_eventdev_init_log)
945 dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
946 if (dpaa2_logtype_event >= 0)
947 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);