1 /* SPDX-License-Identifier: BSD-3-Clause
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_lcore.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_cryptodev.h>
31 #include <rte_dpaa_bus.h>
32 #include <rte_dpaa_logs.h>
33 #include <rte_cycles.h>
34 #include <rte_kvargs.h>
36 #include <dpaa_ethdev.h>
37 #include <dpaa_sec_event.h>
38 #include "dpaa_eventdev.h"
39 #include <dpaa_mempool.h>
43 * Evendev = Virtual Instance for SoC
44 * Eventport = Portal Instance
45 * Eventqueue = Channel Instance
46 * 1 Eventdev can have N Eventqueue
49 #define DISABLE_INTR_MODE "disable_intr"
52 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
53 uint64_t *timeout_ticks)
55 EVENTDEV_INIT_FUNC_TRACE();
59 uint64_t cycles_per_second;
61 cycles_per_second = rte_get_timer_hz();
62 *timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
68 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
69 uint64_t *timeout_ticks)
73 *timeout_ticks = ns/1000;
78 dpaa_eventq_portal_add(u16 ch_id)
82 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
83 qman_static_dequeue_add(sdqcr, NULL);
87 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
91 struct rte_mbuf *mbuf;
94 /*Release all the contexts saved previously*/
95 for (i = 0; i < nb_events; i++) {
97 case RTE_EVENT_OP_RELEASE:
98 qman_dca_index(ev[i].impl_opaque, 0);
99 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
100 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
101 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
102 DPAA_PER_LCORE_DQRR_SIZE--;
113 dpaa_event_enqueue(void *port, const struct rte_event *ev)
115 return dpaa_event_enqueue_burst(port, ev, 1);
118 static void drain_4_bytes(int fd, fd_set *fdset)
120 if (FD_ISSET(fd, fdset)) {
123 ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
124 if (sjunk != sizeof(junk))
125 DPAA_EVENTDEV_ERR("UIO irq read error");
130 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
136 /* Go into (and back out of) IRQ mode for each select,
137 * it simplifies exit-path considerations and other
138 * potential nastiness.
140 struct timeval tv = {
141 .tv_sec = timeout_ticks / 1000000,
142 .tv_usec = timeout_ticks % 1000000
145 fd_qman = qman_thread_fd();
148 FD_SET(fd_qman, &readset);
150 qman_irqsource_add(QM_PIRQ_DQRI);
152 ret = select(nfds, &readset, NULL, NULL, &tv);
155 /* Calling irqsource_remove() prior to thread_irq()
156 * means thread_irq() will not process whatever caused
157 * the interrupts, however it does ensure that, once
158 * thread_irq() re-enables interrupts, they won't fire
161 qman_irqsource_remove(~0);
162 drain_4_bytes(fd_qman, &readset);
169 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
170 uint16_t nb_events, uint64_t timeout_ticks)
175 u32 num_frames, i, irq = 0;
176 uint64_t cur_ticks = 0, wait_time_ticks = 0;
177 struct dpaa_port *portal = (struct dpaa_port *)port;
178 struct rte_mbuf *mbuf;
180 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
181 /* Affine current thread context to a qman portal */
182 ret = rte_dpaa_portal_init((void *)0);
184 DPAA_EVENTDEV_ERR("Unable to initialize portal");
189 if (unlikely(!portal->is_port_linked)) {
191 * Affine event queue for current thread context
194 for (i = 0; i < portal->num_linked_evq; i++) {
195 ch_id = portal->evq_info[i].ch_id;
196 dpaa_eventq_portal_add(ch_id);
198 portal->is_port_linked = true;
201 /* Check if there are atomic contexts to be released */
203 while (DPAA_PER_LCORE_DQRR_SIZE) {
204 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
205 qman_dca_index(i, 0);
206 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
207 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
208 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
209 DPAA_PER_LCORE_DQRR_SIZE--;
213 DPAA_PER_LCORE_DQRR_HELD = 0;
216 wait_time_ticks = timeout_ticks;
218 wait_time_ticks = portal->timeout_us;
220 wait_time_ticks += rte_get_timer_cycles();
222 /* Lets dequeue the frames */
223 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
228 cur_ticks = rte_get_timer_cycles();
229 } while (cur_ticks < wait_time_ticks);
235 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
237 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
241 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
242 uint16_t nb_events, uint64_t timeout_ticks)
247 u32 num_frames, i, irq = 0;
248 uint64_t cur_ticks = 0, wait_time_ticks = 0;
249 struct dpaa_port *portal = (struct dpaa_port *)port;
250 struct rte_mbuf *mbuf;
252 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
253 /* Affine current thread context to a qman portal */
254 ret = rte_dpaa_portal_init((void *)0);
256 DPAA_EVENTDEV_ERR("Unable to initialize portal");
261 if (unlikely(!portal->is_port_linked)) {
263 * Affine event queue for current thread context
266 for (i = 0; i < portal->num_linked_evq; i++) {
267 ch_id = portal->evq_info[i].ch_id;
268 dpaa_eventq_portal_add(ch_id);
270 portal->is_port_linked = true;
273 /* Check if there are atomic contexts to be released */
275 while (DPAA_PER_LCORE_DQRR_SIZE) {
276 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
277 qman_dca_index(i, 0);
278 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
279 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
280 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
281 DPAA_PER_LCORE_DQRR_SIZE--;
285 DPAA_PER_LCORE_DQRR_HELD = 0;
288 wait_time_ticks = timeout_ticks;
290 wait_time_ticks = portal->timeout_us;
293 /* Lets dequeue the frames */
294 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
299 if (wait_time_ticks) { /* wait for time */
300 if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
304 break; /* no event after waiting */
306 cur_ticks = rte_get_timer_cycles();
307 } while (cur_ticks < wait_time_ticks);
313 dpaa_event_dequeue_intr(void *port,
314 struct rte_event *ev,
315 uint64_t timeout_ticks)
317 return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
321 dpaa_event_dev_info_get(struct rte_eventdev *dev,
322 struct rte_event_dev_info *dev_info)
324 EVENTDEV_INIT_FUNC_TRACE();
327 dev_info->driver_name = "event_dpaa1";
328 dev_info->min_dequeue_timeout_ns =
329 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
330 dev_info->max_dequeue_timeout_ns =
331 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
332 dev_info->dequeue_timeout_ns =
333 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
334 dev_info->max_event_queues =
335 DPAA_EVENT_MAX_QUEUES;
336 dev_info->max_event_queue_flows =
337 DPAA_EVENT_MAX_QUEUE_FLOWS;
338 dev_info->max_event_queue_priority_levels =
339 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
340 dev_info->max_event_priority_levels =
341 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
342 dev_info->max_event_ports =
343 DPAA_EVENT_MAX_EVENT_PORT;
344 dev_info->max_event_port_dequeue_depth =
345 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
346 dev_info->max_event_port_enqueue_depth =
347 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
349 * TODO: Need to find out that how to fetch this info
350 * from kernel or somewhere else.
352 dev_info->max_num_events =
353 DPAA_EVENT_MAX_NUM_EVENTS;
354 dev_info->event_dev_cap =
355 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
356 RTE_EVENT_DEV_CAP_BURST_MODE |
357 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
358 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
362 dpaa_event_dev_configure(const struct rte_eventdev *dev)
364 struct dpaa_eventdev *priv = dev->data->dev_private;
365 struct rte_event_dev_config *conf = &dev->data->dev_conf;
369 EVENTDEV_INIT_FUNC_TRACE();
370 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
371 priv->nb_events_limit = conf->nb_events_limit;
372 priv->nb_event_queues = conf->nb_event_queues;
373 priv->nb_event_ports = conf->nb_event_ports;
374 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
375 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
376 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
377 priv->event_dev_cfg = conf->event_dev_cfg;
379 ch_id = rte_malloc("dpaa-channels",
380 sizeof(uint32_t) * priv->nb_event_queues,
381 RTE_CACHE_LINE_SIZE);
383 DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
386 /* Create requested event queues within the given event device */
387 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
389 DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
390 priv->nb_event_queues, ret);
394 for (i = 0; i < priv->nb_event_queues; i++)
395 priv->evq_info[i].ch_id = (u16)ch_id[i];
397 /* Lets prepare event ports */
398 memset(&priv->ports[0], 0,
399 sizeof(struct dpaa_port) * priv->nb_event_ports);
401 /* Check dequeue timeout method is per dequeue or global */
402 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
404 * Use timeout value as given in dequeue operation.
405 * So invalidating this timeout value.
407 priv->dequeue_timeout_ns = 0;
409 } else if (conf->dequeue_timeout_ns == 0) {
410 priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
412 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
415 for (i = 0; i < priv->nb_event_ports; i++) {
416 if (priv->intr_mode) {
417 priv->ports[i].timeout_us =
418 priv->dequeue_timeout_ns/1000;
420 uint64_t cycles_per_second;
422 cycles_per_second = rte_get_timer_hz();
423 priv->ports[i].timeout_us =
424 (priv->dequeue_timeout_ns * cycles_per_second)
430 * TODO: Currently portals are affined with threads. Maximum threads
431 * can be created equals to number of lcore.
434 DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
440 dpaa_event_dev_start(struct rte_eventdev *dev)
442 EVENTDEV_INIT_FUNC_TRACE();
449 dpaa_event_dev_stop(struct rte_eventdev *dev)
451 EVENTDEV_INIT_FUNC_TRACE();
456 dpaa_event_dev_close(struct rte_eventdev *dev)
458 EVENTDEV_INIT_FUNC_TRACE();
465 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
466 struct rte_event_queue_conf *queue_conf)
468 EVENTDEV_INIT_FUNC_TRACE();
471 RTE_SET_USED(queue_id);
473 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
474 queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
475 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
476 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
480 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
481 const struct rte_event_queue_conf *queue_conf)
483 struct dpaa_eventdev *priv = dev->data->dev_private;
484 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
486 EVENTDEV_INIT_FUNC_TRACE();
488 switch (queue_conf->schedule_type) {
489 case RTE_SCHED_TYPE_PARALLEL:
490 case RTE_SCHED_TYPE_ATOMIC:
492 case RTE_SCHED_TYPE_ORDERED:
493 DPAA_EVENTDEV_ERR("Schedule type is not supported.");
496 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
497 evq_info->event_queue_id = queue_id;
503 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
505 EVENTDEV_INIT_FUNC_TRACE();
508 RTE_SET_USED(queue_id);
512 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
513 struct rte_event_port_conf *port_conf)
515 EVENTDEV_INIT_FUNC_TRACE();
518 RTE_SET_USED(port_id);
520 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
521 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
522 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
526 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
527 const struct rte_event_port_conf *port_conf)
529 struct dpaa_eventdev *eventdev = dev->data->dev_private;
531 EVENTDEV_INIT_FUNC_TRACE();
533 RTE_SET_USED(port_conf);
534 dev->data->ports[port_id] = &eventdev->ports[port_id];
540 dpaa_event_port_release(void *port)
542 EVENTDEV_INIT_FUNC_TRACE();
548 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
549 const uint8_t queues[], const uint8_t priorities[],
552 struct dpaa_eventdev *priv = dev->data->dev_private;
553 struct dpaa_port *event_port = (struct dpaa_port *)port;
554 struct dpaa_eventq *event_queue;
559 RTE_SET_USED(priorities);
561 /* First check that input configuration are valid */
562 for (i = 0; i < nb_links; i++) {
563 eventq_id = queues[i];
564 event_queue = &priv->evq_info[eventq_id];
565 if ((event_queue->event_queue_cfg
566 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
567 && (event_queue->event_port)) {
572 for (i = 0; i < nb_links; i++) {
573 eventq_id = queues[i];
574 event_queue = &priv->evq_info[eventq_id];
575 event_port->evq_info[i].event_queue_id = eventq_id;
576 event_port->evq_info[i].ch_id = event_queue->ch_id;
577 event_queue->event_port = port;
580 event_port->num_linked_evq = event_port->num_linked_evq + i;
586 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
587 uint8_t queues[], uint16_t nb_links)
591 struct dpaa_eventq *event_queue;
592 struct dpaa_eventdev *priv = dev->data->dev_private;
593 struct dpaa_port *event_port = (struct dpaa_port *)port;
595 if (!event_port->num_linked_evq)
598 for (i = 0; i < nb_links; i++) {
599 eventq_id = queues[i];
600 event_port->evq_info[eventq_id].event_queue_id = -1;
601 event_port->evq_info[eventq_id].ch_id = 0;
602 event_queue = &priv->evq_info[eventq_id];
603 event_queue->event_port = NULL;
606 if (event_port->num_linked_evq)
607 event_port->num_linked_evq = event_port->num_linked_evq - i;
613 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
614 const struct rte_eth_dev *eth_dev,
617 const char *ethdev_driver = eth_dev->device->driver->name;
619 EVENTDEV_INIT_FUNC_TRACE();
623 if (!strcmp(ethdev_driver, "net_dpaa"))
624 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
626 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
632 dpaa_event_eth_rx_adapter_queue_add(
633 const struct rte_eventdev *dev,
634 const struct rte_eth_dev *eth_dev,
636 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
638 struct dpaa_eventdev *eventdev = dev->data->dev_private;
639 uint8_t ev_qid = queue_conf->ev.queue_id;
640 u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
641 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
644 EVENTDEV_INIT_FUNC_TRACE();
646 if (rx_queue_id == -1) {
647 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
648 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
652 "Event Queue attach failed:%d\n", ret);
653 goto detach_configured_queues;
659 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
661 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
664 detach_configured_queues:
666 for (i = (i - 1); i >= 0 ; i--)
667 dpaa_eth_eventq_detach(eth_dev, i);
673 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
674 const struct rte_eth_dev *eth_dev,
678 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
680 EVENTDEV_INIT_FUNC_TRACE();
683 if (rx_queue_id == -1) {
684 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
685 ret = dpaa_eth_eventq_detach(eth_dev, i);
688 "Event Queue detach failed:%d\n", ret);
694 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
696 DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
701 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
702 const struct rte_eth_dev *eth_dev)
704 EVENTDEV_INIT_FUNC_TRACE();
707 RTE_SET_USED(eth_dev);
713 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
714 const struct rte_eth_dev *eth_dev)
716 EVENTDEV_INIT_FUNC_TRACE();
719 RTE_SET_USED(eth_dev);
725 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
726 const struct rte_cryptodev *cdev,
729 const char *name = cdev->data->name;
731 EVENTDEV_INIT_FUNC_TRACE();
735 if (!strncmp(name, "dpaa_sec-", 9))
736 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
744 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
745 const struct rte_cryptodev *cryptodev,
746 const struct rte_event *ev)
748 struct dpaa_eventdev *priv = dev->data->dev_private;
749 uint8_t ev_qid = ev->queue_id;
750 u16 ch_id = priv->evq_info[ev_qid].ch_id;
753 EVENTDEV_INIT_FUNC_TRACE();
755 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
756 ret = dpaa_sec_eventq_attach(cryptodev, i,
759 DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
766 for (i = (i - 1); i >= 0 ; i--)
767 dpaa_sec_eventq_detach(cryptodev, i);
773 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
774 const struct rte_cryptodev *cryptodev,
776 const struct rte_event *ev)
778 struct dpaa_eventdev *priv = dev->data->dev_private;
779 uint8_t ev_qid = ev->queue_id;
780 u16 ch_id = priv->evq_info[ev_qid].ch_id;
783 EVENTDEV_INIT_FUNC_TRACE();
785 if (rx_queue_id == -1)
786 return dpaa_eventdev_crypto_queue_add_all(dev,
789 ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
793 "dpaa_sec_eventq_attach failed: ret: %d\n", ret);
800 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
801 const struct rte_cryptodev *cdev)
805 EVENTDEV_INIT_FUNC_TRACE();
809 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
810 ret = dpaa_sec_eventq_detach(cdev, i);
813 "dpaa_sec_eventq_detach failed:ret %d\n", ret);
822 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
823 const struct rte_cryptodev *cryptodev,
828 EVENTDEV_INIT_FUNC_TRACE();
830 if (rx_queue_id == -1)
831 return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
833 ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
836 "dpaa_sec_eventq_detach failed: ret: %d\n", ret);
844 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
845 const struct rte_cryptodev *cryptodev)
847 EVENTDEV_INIT_FUNC_TRACE();
850 RTE_SET_USED(cryptodev);
856 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
857 const struct rte_cryptodev *cryptodev)
859 EVENTDEV_INIT_FUNC_TRACE();
862 RTE_SET_USED(cryptodev);
867 static struct rte_eventdev_ops dpaa_eventdev_ops = {
868 .dev_infos_get = dpaa_event_dev_info_get,
869 .dev_configure = dpaa_event_dev_configure,
870 .dev_start = dpaa_event_dev_start,
871 .dev_stop = dpaa_event_dev_stop,
872 .dev_close = dpaa_event_dev_close,
873 .queue_def_conf = dpaa_event_queue_def_conf,
874 .queue_setup = dpaa_event_queue_setup,
875 .queue_release = dpaa_event_queue_release,
876 .port_def_conf = dpaa_event_port_default_conf_get,
877 .port_setup = dpaa_event_port_setup,
878 .port_release = dpaa_event_port_release,
879 .port_link = dpaa_event_port_link,
880 .port_unlink = dpaa_event_port_unlink,
881 .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
882 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
883 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
884 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
885 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
886 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
887 .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get,
888 .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add,
889 .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del,
890 .crypto_adapter_start = dpaa_eventdev_crypto_start,
891 .crypto_adapter_stop = dpaa_eventdev_crypto_stop,
894 static int flag_check_handler(__rte_unused const char *key,
895 const char *value, __rte_unused void *opaque)
897 if (strcmp(value, "1"))
904 dpaa_event_check_flags(const char *params)
906 struct rte_kvargs *kvlist;
908 if (params == NULL || params[0] == '\0')
911 kvlist = rte_kvargs_parse(params, NULL);
915 if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
916 rte_kvargs_free(kvlist);
919 /* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
920 if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
921 flag_check_handler, NULL) < 0) {
922 rte_kvargs_free(kvlist);
925 rte_kvargs_free(kvlist);
931 dpaa_event_dev_create(const char *name, const char *params)
933 struct rte_eventdev *eventdev;
934 struct dpaa_eventdev *priv;
936 eventdev = rte_event_pmd_vdev_init(name,
937 sizeof(struct dpaa_eventdev),
939 if (eventdev == NULL) {
940 DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
943 priv = eventdev->data->dev_private;
945 eventdev->dev_ops = &dpaa_eventdev_ops;
946 eventdev->enqueue = dpaa_event_enqueue;
947 eventdev->enqueue_burst = dpaa_event_enqueue_burst;
949 if (dpaa_event_check_flags(params)) {
950 eventdev->dequeue = dpaa_event_dequeue;
951 eventdev->dequeue_burst = dpaa_event_dequeue_burst;
954 eventdev->dev_ops->timeout_ticks =
955 dpaa_event_dequeue_timeout_ticks_intr;
956 eventdev->dequeue = dpaa_event_dequeue_intr;
957 eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
960 RTE_LOG(INFO, PMD, "%s eventdev added", name);
962 /* For secondary processes, the primary has done all the work */
963 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
966 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
974 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
979 name = rte_vdev_device_name(vdev);
980 DPAA_EVENTDEV_INFO("Initializing %s", name);
982 params = rte_vdev_device_args(vdev);
984 return dpaa_event_dev_create(name, params);
988 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
992 name = rte_vdev_device_name(vdev);
993 DPAA_EVENTDEV_INFO("Closing %s", name);
995 return rte_event_pmd_vdev_uninit(name);
998 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
999 .probe = dpaa_event_dev_probe,
1000 .remove = dpaa_event_dev_remove
1003 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1004 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1005 DISABLE_INTR_MODE "=<int>");