1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2019 NXP
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
31 #include <rte_cryptodev.h>
32 #include <rte_dpaa_bus.h>
33 #include <rte_dpaa_logs.h>
34 #include <rte_cycles.h>
35 #include <rte_kvargs.h>
37 #include <dpaa_ethdev.h>
38 #include <dpaa_sec_event.h>
39 #include "dpaa_eventdev.h"
40 #include <dpaa_mempool.h>
44 * Evendev = Virtual Instance for SoC
45 * Eventport = Portal Instance
46 * Eventqueue = Channel Instance
47 * 1 Eventdev can have N Eventqueue
49 RTE_LOG_REGISTER(dpaa_logtype_eventdev, pmd.event.dpaa, NOTICE);
51 #define DISABLE_INTR_MODE "disable_intr"
54 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
55 uint64_t *timeout_ticks)
57 EVENTDEV_INIT_FUNC_TRACE();
61 uint64_t cycles_per_second;
63 cycles_per_second = rte_get_timer_hz();
64 *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
70 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
71 uint64_t *timeout_ticks)
75 *timeout_ticks = ns/1000;
80 dpaa_eventq_portal_add(u16 ch_id)
84 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
85 qman_static_dequeue_add(sdqcr, NULL);
89 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
93 struct rte_mbuf *mbuf;
96 /*Release all the contexts saved previously*/
97 for (i = 0; i < nb_events; i++) {
99 case RTE_EVENT_OP_RELEASE:
100 qman_dca_index(ev[i].impl_opaque, 0);
101 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
102 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
103 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
104 DPAA_PER_LCORE_DQRR_SIZE--;
115 dpaa_event_enqueue(void *port, const struct rte_event *ev)
117 return dpaa_event_enqueue_burst(port, ev, 1);
120 static void drain_4_bytes(int fd, fd_set *fdset)
122 if (FD_ISSET(fd, fdset)) {
125 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
126 if (sjunk != sizeof(junk))
127 DPAA_EVENTDEV_ERR("UIO irq read error");
132 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
138 /* Go into (and back out of) IRQ mode for each select,
139 * it simplifies exit-path considerations and other
140 * potential nastiness.
142 struct timeval tv = {
143 .tv_sec = timeout_ticks / 1000000,
144 .tv_usec = timeout_ticks % 1000000
147 fd_qman = qman_thread_fd();
150 FD_SET(fd_qman, &readset);
152 qman_irqsource_add(QM_PIRQ_DQRI);
154 ret = select(nfds, &readset, NULL, NULL, &tv);
157 /* Calling irqsource_remove() prior to thread_irq()
158 * means thread_irq() will not process whatever caused
159 * the interrupts, however it does ensure that, once
160 * thread_irq() re-enables interrupts, they won't fire
163 qman_irqsource_remove(~0);
164 drain_4_bytes(fd_qman, &readset);
171 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
172 uint16_t nb_events, uint64_t timeout_ticks)
177 u32 num_frames, i, irq = 0;
178 uint64_t cur_ticks = 0, wait_time_ticks = 0;
179 struct dpaa_port *portal = (struct dpaa_port *)port;
180 struct rte_mbuf *mbuf;
182 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
183 /* Affine current thread context to a qman portal */
184 ret = rte_dpaa_portal_init((void *)0);
186 DPAA_EVENTDEV_ERR("Unable to initialize portal");
191 if (unlikely(!portal->is_port_linked)) {
193 * Affine event queue for current thread context
196 for (i = 0; i < portal->num_linked_evq; i++) {
197 ch_id = portal->evq_info[i].ch_id;
198 dpaa_eventq_portal_add(ch_id);
200 portal->is_port_linked = true;
203 /* Check if there are atomic contexts to be released */
205 while (DPAA_PER_LCORE_DQRR_SIZE) {
206 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
207 qman_dca_index(i, 0);
208 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
209 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
210 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
211 DPAA_PER_LCORE_DQRR_SIZE--;
215 DPAA_PER_LCORE_DQRR_HELD = 0;
218 wait_time_ticks = timeout_ticks;
220 wait_time_ticks = portal->timeout_us;
222 wait_time_ticks += rte_get_timer_cycles();
224 /* Lets dequeue the frames */
225 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
230 cur_ticks = rte_get_timer_cycles();
231 } while (cur_ticks < wait_time_ticks);
237 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
239 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
243 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
244 uint16_t nb_events, uint64_t timeout_ticks)
249 u32 num_frames, i, irq = 0;
250 uint64_t cur_ticks = 0, wait_time_ticks = 0;
251 struct dpaa_port *portal = (struct dpaa_port *)port;
252 struct rte_mbuf *mbuf;
254 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
255 /* Affine current thread context to a qman portal */
256 ret = rte_dpaa_portal_init((void *)0);
258 DPAA_EVENTDEV_ERR("Unable to initialize portal");
263 if (unlikely(!portal->is_port_linked)) {
265 * Affine event queue for current thread context
268 for (i = 0; i < portal->num_linked_evq; i++) {
269 ch_id = portal->evq_info[i].ch_id;
270 dpaa_eventq_portal_add(ch_id);
272 portal->is_port_linked = true;
275 /* Check if there are atomic contexts to be released */
277 while (DPAA_PER_LCORE_DQRR_SIZE) {
278 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
279 qman_dca_index(i, 0);
280 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
281 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
282 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
283 DPAA_PER_LCORE_DQRR_SIZE--;
287 DPAA_PER_LCORE_DQRR_HELD = 0;
290 wait_time_ticks = timeout_ticks;
292 wait_time_ticks = portal->timeout_us;
295 /* Lets dequeue the frames */
296 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
301 if (wait_time_ticks) { /* wait for time */
302 if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
306 break; /* no event after waiting */
308 cur_ticks = rte_get_timer_cycles();
309 } while (cur_ticks < wait_time_ticks);
315 dpaa_event_dequeue_intr(void *port,
316 struct rte_event *ev,
317 uint64_t timeout_ticks)
319 return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
323 dpaa_event_dev_info_get(struct rte_eventdev *dev,
324 struct rte_event_dev_info *dev_info)
326 EVENTDEV_INIT_FUNC_TRACE();
329 dev_info->driver_name = "event_dpaa1";
330 dev_info->min_dequeue_timeout_ns =
331 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
332 dev_info->max_dequeue_timeout_ns =
333 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
334 dev_info->dequeue_timeout_ns =
335 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
336 dev_info->max_event_queues =
337 DPAA_EVENT_MAX_QUEUES;
338 dev_info->max_event_queue_flows =
339 DPAA_EVENT_MAX_QUEUE_FLOWS;
340 dev_info->max_event_queue_priority_levels =
341 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
342 dev_info->max_event_priority_levels =
343 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
344 dev_info->max_event_ports =
345 DPAA_EVENT_MAX_EVENT_PORT;
346 dev_info->max_event_port_dequeue_depth =
347 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
348 dev_info->max_event_port_enqueue_depth =
349 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
351 * TODO: Need to find out that how to fetch this info
352 * from kernel or somewhere else.
354 dev_info->max_num_events =
355 DPAA_EVENT_MAX_NUM_EVENTS;
356 dev_info->event_dev_cap =
357 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
358 RTE_EVENT_DEV_CAP_BURST_MODE |
359 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
360 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
364 dpaa_event_dev_configure(const struct rte_eventdev *dev)
366 struct dpaa_eventdev *priv = dev->data->dev_private;
367 struct rte_event_dev_config *conf = &dev->data->dev_conf;
371 EVENTDEV_INIT_FUNC_TRACE();
372 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
373 priv->nb_events_limit = conf->nb_events_limit;
374 priv->nb_event_queues = conf->nb_event_queues;
375 priv->nb_event_ports = conf->nb_event_ports;
376 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
377 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
378 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
379 priv->event_dev_cfg = conf->event_dev_cfg;
381 ch_id = rte_malloc("dpaa-channels",
382 sizeof(uint32_t) * priv->nb_event_queues,
383 RTE_CACHE_LINE_SIZE);
385 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
388 /* Create requested event queues within the given event device */
389 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
391 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
392 priv->nb_event_queues, ret);
396 for (i = 0; i < priv->nb_event_queues; i++)
397 priv->evq_info[i].ch_id = (u16)ch_id[i];
399 /* Lets prepare event ports */
400 memset(&priv->ports[0], 0,
401 sizeof(struct dpaa_port) * priv->nb_event_ports);
403 /* Check dequeue timeout method is per dequeue or global */
404 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
406 * Use timeout value as given in dequeue operation.
407 * So invalidating this timeout value.
409 priv->dequeue_timeout_ns = 0;
411 } else if (conf->dequeue_timeout_ns == 0) {
412 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
414 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
417 for (i = 0; i < priv->nb_event_ports; i++) {
418 if (priv->intr_mode) {
419 priv->ports[i].timeout_us =
420 priv->dequeue_timeout_ns/1000;
422 uint64_t cycles_per_second;
424 cycles_per_second = rte_get_timer_hz();
425 priv->ports[i].timeout_us =
426 (priv->dequeue_timeout_ns * cycles_per_second)
432 * TODO: Currently portals are affined with threads. Maximum threads
433 * can be created equals to number of lcore.
436 DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
442 dpaa_event_dev_start(struct rte_eventdev *dev)
444 EVENTDEV_INIT_FUNC_TRACE();
451 dpaa_event_dev_stop(struct rte_eventdev *dev)
453 EVENTDEV_INIT_FUNC_TRACE();
458 dpaa_event_dev_close(struct rte_eventdev *dev)
460 EVENTDEV_INIT_FUNC_TRACE();
467 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
468 struct rte_event_queue_conf *queue_conf)
470 EVENTDEV_INIT_FUNC_TRACE();
473 RTE_SET_USED(queue_id);
475 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
476 queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
477 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
478 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
482 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
483 const struct rte_event_queue_conf *queue_conf)
485 struct dpaa_eventdev *priv = dev->data->dev_private;
486 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
488 EVENTDEV_INIT_FUNC_TRACE();
490 switch (queue_conf->schedule_type) {
491 case RTE_SCHED_TYPE_PARALLEL:
492 case RTE_SCHED_TYPE_ATOMIC:
494 case RTE_SCHED_TYPE_ORDERED:
495 DPAA_EVENTDEV_ERR("Schedule type is not supported.");
498 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
499 evq_info->event_queue_id = queue_id;
505 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
507 EVENTDEV_INIT_FUNC_TRACE();
510 RTE_SET_USED(queue_id);
514 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
515 struct rte_event_port_conf *port_conf)
517 EVENTDEV_INIT_FUNC_TRACE();
520 RTE_SET_USED(port_id);
522 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
523 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
524 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
528 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
529 const struct rte_event_port_conf *port_conf)
531 struct dpaa_eventdev *eventdev = dev->data->dev_private;
533 EVENTDEV_INIT_FUNC_TRACE();
535 RTE_SET_USED(port_conf);
536 dev->data->ports[port_id] = &eventdev->ports[port_id];
542 dpaa_event_port_release(void *port)
544 EVENTDEV_INIT_FUNC_TRACE();
550 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
551 const uint8_t queues[], const uint8_t priorities[],
554 struct dpaa_eventdev *priv = dev->data->dev_private;
555 struct dpaa_port *event_port = (struct dpaa_port *)port;
556 struct dpaa_eventq *event_queue;
561 RTE_SET_USED(priorities);
563 /* First check that input configuration are valid */
564 for (i = 0; i < nb_links; i++) {
565 eventq_id = queues[i];
566 event_queue = &priv->evq_info[eventq_id];
567 if ((event_queue->event_queue_cfg
568 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
569 && (event_queue->event_port)) {
574 for (i = 0; i < nb_links; i++) {
575 eventq_id = queues[i];
576 event_queue = &priv->evq_info[eventq_id];
577 event_port->evq_info[i].event_queue_id = eventq_id;
578 event_port->evq_info[i].ch_id = event_queue->ch_id;
579 event_queue->event_port = port;
582 event_port->num_linked_evq = event_port->num_linked_evq + i;
588 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
589 uint8_t queues[], uint16_t nb_links)
593 struct dpaa_eventq *event_queue;
594 struct dpaa_eventdev *priv = dev->data->dev_private;
595 struct dpaa_port *event_port = (struct dpaa_port *)port;
597 if (!event_port->num_linked_evq)
600 for (i = 0; i < nb_links; i++) {
601 eventq_id = queues[i];
602 event_port->evq_info[eventq_id].event_queue_id = -1;
603 event_port->evq_info[eventq_id].ch_id = 0;
604 event_queue = &priv->evq_info[eventq_id];
605 event_queue->event_port = NULL;
608 if (event_port->num_linked_evq)
609 event_port->num_linked_evq = event_port->num_linked_evq - i;
615 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
616 const struct rte_eth_dev *eth_dev,
619 const char *ethdev_driver = eth_dev->device->driver->name;
621 EVENTDEV_INIT_FUNC_TRACE();
625 if (!strcmp(ethdev_driver, "net_dpaa"))
626 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
628 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
634 dpaa_event_eth_rx_adapter_queue_add(
635 const struct rte_eventdev *dev,
636 const struct rte_eth_dev *eth_dev,
638 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
640 struct dpaa_eventdev *eventdev = dev->data->dev_private;
641 uint8_t ev_qid = queue_conf->ev.queue_id;
642 u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
643 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
646 EVENTDEV_INIT_FUNC_TRACE();
648 if (rx_queue_id == -1) {
649 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
650 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
654 "Event Queue attach failed:%d\n", ret);
655 goto detach_configured_queues;
661 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
663 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
666 detach_configured_queues:
668 for (i = (i - 1); i >= 0 ; i--)
669 dpaa_eth_eventq_detach(eth_dev, i);
675 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
676 const struct rte_eth_dev *eth_dev,
680 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
682 EVENTDEV_INIT_FUNC_TRACE();
685 if (rx_queue_id == -1) {
686 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
687 ret = dpaa_eth_eventq_detach(eth_dev, i);
690 "Event Queue detach failed:%d\n", ret);
696 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
698 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
703 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
704 const struct rte_eth_dev *eth_dev)
706 EVENTDEV_INIT_FUNC_TRACE();
709 RTE_SET_USED(eth_dev);
715 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
716 const struct rte_eth_dev *eth_dev)
718 EVENTDEV_INIT_FUNC_TRACE();
721 RTE_SET_USED(eth_dev);
727 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
728 const struct rte_cryptodev *cdev,
731 const char *name = cdev->data->name;
733 EVENTDEV_INIT_FUNC_TRACE();
737 if (!strncmp(name, "dpaa_sec-", 9))
738 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
746 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
747 const struct rte_cryptodev *cryptodev,
748 const struct rte_event *ev)
750 struct dpaa_eventdev *priv = dev->data->dev_private;
751 uint8_t ev_qid = ev->queue_id;
752 u16 ch_id = priv->evq_info[ev_qid].ch_id;
755 EVENTDEV_INIT_FUNC_TRACE();
757 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
758 ret = dpaa_sec_eventq_attach(cryptodev, i,
761 DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
768 for (i = (i - 1); i >= 0 ; i--)
769 dpaa_sec_eventq_detach(cryptodev, i);
775 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
776 const struct rte_cryptodev *cryptodev,
778 const struct rte_event *ev)
780 struct dpaa_eventdev *priv = dev->data->dev_private;
781 uint8_t ev_qid = ev->queue_id;
782 u16 ch_id = priv->evq_info[ev_qid].ch_id;
785 EVENTDEV_INIT_FUNC_TRACE();
787 if (rx_queue_id == -1)
788 return dpaa_eventdev_crypto_queue_add_all(dev,
791 ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
795 "dpaa_sec_eventq_attach failed: ret: %d\n", ret);
802 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
803 const struct rte_cryptodev *cdev)
807 EVENTDEV_INIT_FUNC_TRACE();
811 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
812 ret = dpaa_sec_eventq_detach(cdev, i);
815 "dpaa_sec_eventq_detach failed:ret %d\n", ret);
824 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
825 const struct rte_cryptodev *cryptodev,
830 EVENTDEV_INIT_FUNC_TRACE();
832 if (rx_queue_id == -1)
833 return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
835 ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
838 "dpaa_sec_eventq_detach failed: ret: %d\n", ret);
846 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
847 const struct rte_cryptodev *cryptodev)
849 EVENTDEV_INIT_FUNC_TRACE();
852 RTE_SET_USED(cryptodev);
858 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
859 const struct rte_cryptodev *cryptodev)
861 EVENTDEV_INIT_FUNC_TRACE();
864 RTE_SET_USED(cryptodev);
870 dpaa_eventdev_tx_adapter_create(uint8_t id,
871 const struct rte_eventdev *dev)
876 /* Nothing to do. Simply return. */
881 dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
882 const struct rte_eth_dev *eth_dev,
886 RTE_SET_USED(eth_dev);
888 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
893 dpaa_eventdev_txa_enqueue_same_dest(void *port,
894 struct rte_event ev[],
897 struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
902 m0 = (struct rte_mbuf *)ev[0].mbuf;
903 qid = rte_event_eth_tx_adapter_txq_get(m0);
905 for (i = 0; i < nb_events; i++)
906 m[i] = (struct rte_mbuf *)ev[i].mbuf;
908 return rte_eth_tx_burst(m0->port, qid, m, nb_events);
912 dpaa_eventdev_txa_enqueue(void *port,
913 struct rte_event ev[],
916 struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
921 for (i = 0; i < nb_events; i++) {
922 qid = rte_event_eth_tx_adapter_txq_get(m);
923 rte_eth_tx_burst(m->port, qid, &m, 1);
929 static struct rte_eventdev_ops dpaa_eventdev_ops = {
930 .dev_infos_get = dpaa_event_dev_info_get,
931 .dev_configure = dpaa_event_dev_configure,
932 .dev_start = dpaa_event_dev_start,
933 .dev_stop = dpaa_event_dev_stop,
934 .dev_close = dpaa_event_dev_close,
935 .queue_def_conf = dpaa_event_queue_def_conf,
936 .queue_setup = dpaa_event_queue_setup,
937 .queue_release = dpaa_event_queue_release,
938 .port_def_conf = dpaa_event_port_default_conf_get,
939 .port_setup = dpaa_event_port_setup,
940 .port_release = dpaa_event_port_release,
941 .port_link = dpaa_event_port_link,
942 .port_unlink = dpaa_event_port_unlink,
943 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
944 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
945 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
946 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
947 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
948 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
949 .eth_tx_adapter_caps_get = dpaa_eventdev_tx_adapter_caps,
950 .eth_tx_adapter_create = dpaa_eventdev_tx_adapter_create,
951 .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get,
952 .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add,
953 .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del,
954 .crypto_adapter_start = dpaa_eventdev_crypto_start,
955 .crypto_adapter_stop = dpaa_eventdev_crypto_stop,
958 static int flag_check_handler(__rte_unused const char *key,
959 const char *value, __rte_unused void *opaque)
961 if (strcmp(value, "1"))
968 dpaa_event_check_flags(const char *params)
970 struct rte_kvargs *kvlist;
972 if (params == NULL || params[0] == '\0')
975 kvlist = rte_kvargs_parse(params, NULL);
979 if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
980 rte_kvargs_free(kvlist);
983 /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
984 if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
985 flag_check_handler, NULL) < 0) {
986 rte_kvargs_free(kvlist);
989 rte_kvargs_free(kvlist);
995 dpaa_event_dev_create(const char *name, const char *params)
997 struct rte_eventdev *eventdev;
998 struct dpaa_eventdev *priv;
1000 eventdev = rte_event_pmd_vdev_init(name,
1001 sizeof(struct dpaa_eventdev),
1003 if (eventdev == NULL) {
1004 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
1007 priv = eventdev->data->dev_private;
1009 eventdev->dev_ops = &dpaa_eventdev_ops;
1010 eventdev->enqueue = dpaa_event_enqueue;
1011 eventdev->enqueue_burst = dpaa_event_enqueue_burst;
1013 if (dpaa_event_check_flags(params)) {
1014 eventdev->dequeue = dpaa_event_dequeue;
1015 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
1017 priv->intr_mode = 1;
1018 eventdev->dev_ops->timeout_ticks =
1019 dpaa_event_dequeue_timeout_ticks_intr;
1020 eventdev->dequeue = dpaa_event_dequeue_intr;
1021 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
1023 eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
1024 eventdev->txa_enqueue_same_dest = dpaa_eventdev_txa_enqueue_same_dest;
1026 RTE_LOG(INFO, PMD, "%s eventdev added", name);
1028 /* For secondary processes, the primary has done all the work */
1029 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1032 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
1040 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
1045 name = rte_vdev_device_name(vdev);
1046 DPAA_EVENTDEV_INFO("Initializing %s", name);
1048 params = rte_vdev_device_args(vdev);
1050 return dpaa_event_dev_create(name, params);
1054 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
1058 name = rte_vdev_device_name(vdev);
1059 DPAA_EVENTDEV_INFO("Closing %s", name);
1061 return rte_event_pmd_vdev_uninit(name);
1064 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
1065 .probe = dpaa_event_dev_probe,
1066 .remove = dpaa_event_dev_remove
1069 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1070 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1071 DISABLE_INTR_MODE "=<int>");