1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017,2019 NXP
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
31 #include <fslmc_vfio.h>
32 #include <dpaa2_hw_pvt.h>
33 #include <dpaa2_hw_mempool.h>
34 #include <dpaa2_hw_dpio.h>
35 #include <dpaa2_ethdev.h>
36 #include <dpaa2_sec_event.h>
37 #include "dpaa2_eventdev.h"
38 #include "dpaa2_eventdev_logs.h"
39 #include <portal/dpaa2_hw_pvt.h>
40 #include <mc/fsl_dpci.h>
43 * Evendev = SoC Instance
44 * Eventport = DPIO Instance
45 * Eventqueue = DPCON Instance
46 * 1 Eventdev can have N Eventqueue
47 * Soft Event Flow is DPCI Instance
50 /* Dynamic logging identified for mempool */
51 int dpaa2_logtype_event;
52 #define DPAA2_EV_TX_RETRY_COUNT 10000
55 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
59 struct dpaa2_port *dpaa2_portal = port;
60 struct dpaa2_dpio_dev *dpio_dev;
61 uint32_t queue_id = ev[0].queue_id;
62 struct dpaa2_eventq *evq_info;
63 uint32_t fqid, retry_count;
64 struct qbman_swp *swp;
65 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
66 uint32_t loop, frames_to_send;
67 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
70 uint8_t channel_index;
72 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
73 /* Affine current thread context to a qman portal */
74 ret = dpaa2_affine_qbman_swp();
76 DPAA2_EVENTDEV_ERR("Failure in affining portal");
80 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
81 dpio_dev = DPAA2_PER_LCORE_DPIO;
82 swp = DPAA2_PER_LCORE_PORTAL;
84 if (likely(dpaa2_portal->is_port_linked))
87 /* Create mapping between portal and channel to receive packets */
88 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
89 evq_info = &dpaa2_portal->evq_info[i];
90 if (!evq_info->event_port)
93 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
96 evq_info->dpcon->dpcon_id,
100 "Static dequeue config failed: err(%d)", ret);
104 qbman_swp_push_set(swp, channel_index, 1);
105 evq_info->dpcon->channel_index = channel_index;
107 dpaa2_portal->is_port_linked = true;
110 evq_info = &dpaa2_portal->evq_info[queue_id];
113 frames_to_send = (nb_events > dpaa2_eqcr_size) ?
114 dpaa2_eqcr_size : nb_events;
116 for (loop = 0; loop < frames_to_send; loop++) {
117 const struct rte_event *event = &ev[num_tx + loop];
119 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
120 fqid = evq_info->dpci->rx_queue[
121 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
123 fqid = evq_info->dpci->rx_queue[
124 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
126 /* Prepare enqueue descriptor */
127 qbman_eq_desc_clear(&eqdesc[loop]);
128 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
129 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
130 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
132 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
133 && event->mbuf->seqn) {
134 uint8_t dqrr_index = event->mbuf->seqn - 1;
136 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
138 DPAA2_PER_LCORE_DQRR_SIZE--;
139 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
142 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
145 * todo - need to align with hw context data
148 struct rte_event *ev_temp = rte_malloc(NULL,
149 sizeof(struct rte_event), 0);
154 frames_to_send = loop;
156 "Unable to allocate event object");
159 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
160 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
161 DPAA2_SET_FD_LEN((&fd_arr[loop]),
162 sizeof(struct rte_event));
167 while (loop < frames_to_send) {
168 ret = qbman_swp_enqueue_multiple_desc(swp,
169 &eqdesc[loop], &fd_arr[loop],
170 frames_to_send - loop);
171 if (unlikely(ret < 0)) {
173 if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
176 return num_tx + loop;
189 for (n = 0; n < i; n++) {
190 evq_info = &dpaa2_portal->evq_info[n];
191 if (!evq_info->event_port)
193 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
194 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
196 evq_info->dpcon->dpcon_id);
203 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
205 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
208 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
210 struct epoll_event epoll_ev;
212 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
213 QBMAN_SWP_INTERRUPT_DQRI);
215 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
216 &epoll_ev, 1, timeout_ticks);
219 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
220 const struct qbman_fd *fd,
221 const struct qbman_result *dq,
222 struct dpaa2_queue *rxq,
223 struct rte_event *ev)
225 struct rte_event *ev_temp =
226 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
230 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
233 qbman_swp_dqrr_consume(swp, dq);
236 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
237 const struct qbman_fd *fd,
238 const struct qbman_result *dq,
239 struct dpaa2_queue *rxq,
240 struct rte_event *ev)
242 struct rte_event *ev_temp =
243 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
244 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
249 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
251 ev->mbuf->seqn = dqrr_index + 1;
252 DPAA2_PER_LCORE_DQRR_SIZE++;
253 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
254 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
258 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
259 uint16_t nb_events, uint64_t timeout_ticks)
261 const struct qbman_result *dq;
262 struct dpaa2_dpio_dev *dpio_dev = NULL;
263 struct dpaa2_port *dpaa2_portal = port;
264 struct dpaa2_eventq *evq_info;
265 struct qbman_swp *swp;
266 const struct qbman_fd *fd;
267 struct dpaa2_queue *rxq;
268 int num_pkts = 0, ret, i = 0, n;
269 uint8_t channel_index;
271 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
272 /* Affine current thread context to a qman portal */
273 ret = dpaa2_affine_qbman_swp();
275 DPAA2_EVENTDEV_ERR("Failure in affining portal");
280 dpio_dev = DPAA2_PER_LCORE_DPIO;
281 swp = DPAA2_PER_LCORE_PORTAL;
283 if (likely(dpaa2_portal->is_port_linked))
286 /* Create mapping between portal and channel to receive packets */
287 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
288 evq_info = &dpaa2_portal->evq_info[i];
289 if (!evq_info->event_port)
292 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
295 evq_info->dpcon->dpcon_id,
299 "Static dequeue config failed: err(%d)", ret);
303 qbman_swp_push_set(swp, channel_index, 1);
304 evq_info->dpcon->channel_index = channel_index;
306 dpaa2_portal->is_port_linked = true;
309 /* Check if there are atomic contexts to be released */
310 while (DPAA2_PER_LCORE_DQRR_SIZE) {
311 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
312 qbman_swp_dqrr_idx_consume(swp, i);
313 DPAA2_PER_LCORE_DQRR_SIZE--;
314 DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
315 DPAA2_INVALID_MBUF_SEQN;
319 DPAA2_PER_LCORE_DQRR_HELD = 0;
322 dq = qbman_swp_dqrr_next(swp);
324 if (!num_pkts && timeout_ticks) {
325 dpaa2_eventdev_dequeue_wait(timeout_ticks);
331 qbman_swp_prefetch_dqrr_next(swp);
333 fd = qbman_result_DQ_fd(dq);
334 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
336 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
338 qbman_swp_dqrr_consume(swp, dq);
339 DPAA2_EVENTDEV_ERR("Null Return VQ received");
344 } while (num_pkts < nb_events);
348 for (n = 0; n < i; n++) {
349 evq_info = &dpaa2_portal->evq_info[n];
350 if (!evq_info->event_port)
353 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
354 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
356 evq_info->dpcon->dpcon_id);
362 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
363 uint64_t timeout_ticks)
365 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
369 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
370 struct rte_event_dev_info *dev_info)
372 struct dpaa2_eventdev *priv = dev->data->dev_private;
374 EVENTDEV_INIT_FUNC_TRACE();
378 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
379 dev_info->min_dequeue_timeout_ns =
380 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
381 dev_info->max_dequeue_timeout_ns =
382 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
383 dev_info->dequeue_timeout_ns =
384 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
385 dev_info->max_event_queues = priv->max_event_queues;
386 dev_info->max_event_queue_flows =
387 DPAA2_EVENT_MAX_QUEUE_FLOWS;
388 dev_info->max_event_queue_priority_levels =
389 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
390 dev_info->max_event_priority_levels =
391 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
392 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
393 /* we only support dpio upto number of cores*/
394 if (dev_info->max_event_ports > rte_lcore_count())
395 dev_info->max_event_ports = rte_lcore_count();
396 dev_info->max_event_port_dequeue_depth =
397 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
398 dev_info->max_event_port_enqueue_depth =
399 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
400 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
401 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
402 RTE_EVENT_DEV_CAP_BURST_MODE|
403 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
404 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
405 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
410 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
412 struct dpaa2_eventdev *priv = dev->data->dev_private;
413 struct rte_event_dev_config *conf = &dev->data->dev_conf;
415 EVENTDEV_INIT_FUNC_TRACE();
417 priv->nb_event_queues = conf->nb_event_queues;
418 priv->nb_event_ports = conf->nb_event_ports;
419 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
420 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
421 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
422 priv->event_dev_cfg = conf->event_dev_cfg;
424 /* Check dequeue timeout method is per dequeue or global */
425 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
427 * Use timeout value as given in dequeue operation.
428 * So invalidating this timeout value.
430 priv->dequeue_timeout_ns = 0;
432 } else if (conf->dequeue_timeout_ns == 0) {
433 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
435 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
438 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
444 dpaa2_eventdev_start(struct rte_eventdev *dev)
446 EVENTDEV_INIT_FUNC_TRACE();
454 dpaa2_eventdev_stop(struct rte_eventdev *dev)
456 EVENTDEV_INIT_FUNC_TRACE();
462 dpaa2_eventdev_close(struct rte_eventdev *dev)
464 EVENTDEV_INIT_FUNC_TRACE();
472 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
473 struct rte_event_queue_conf *queue_conf)
475 EVENTDEV_INIT_FUNC_TRACE();
478 RTE_SET_USED(queue_id);
479 RTE_SET_USED(queue_conf);
481 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
482 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
483 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
487 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
488 const struct rte_event_queue_conf *queue_conf)
490 struct dpaa2_eventdev *priv = dev->data->dev_private;
491 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
493 EVENTDEV_INIT_FUNC_TRACE();
495 switch (queue_conf->schedule_type) {
496 case RTE_SCHED_TYPE_PARALLEL:
497 case RTE_SCHED_TYPE_ATOMIC:
499 case RTE_SCHED_TYPE_ORDERED:
500 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
503 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
504 evq_info->event_queue_id = queue_id;
510 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
512 EVENTDEV_INIT_FUNC_TRACE();
515 RTE_SET_USED(queue_id);
519 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
520 struct rte_event_port_conf *port_conf)
522 EVENTDEV_INIT_FUNC_TRACE();
525 RTE_SET_USED(port_id);
527 port_conf->new_event_threshold =
528 DPAA2_EVENT_MAX_NUM_EVENTS;
529 port_conf->dequeue_depth =
530 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
531 port_conf->enqueue_depth =
532 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
533 port_conf->disable_implicit_release = 0;
537 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
538 const struct rte_event_port_conf *port_conf)
540 char event_port_name[32];
541 struct dpaa2_port *portal;
543 EVENTDEV_INIT_FUNC_TRACE();
545 RTE_SET_USED(port_conf);
547 sprintf(event_port_name, "event-port-%d", port_id);
548 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
550 DPAA2_EVENTDEV_ERR("Memory allocation failure");
554 memset(portal, 0, sizeof(struct dpaa2_port));
555 dev->data->ports[port_id] = portal;
560 dpaa2_eventdev_port_release(void *port)
562 struct dpaa2_port *portal = port;
564 EVENTDEV_INIT_FUNC_TRACE();
566 /* TODO: Cleanup is required when ports are in linked state. */
567 if (portal->is_port_linked)
568 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
577 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
578 const uint8_t queues[], const uint8_t priorities[],
581 struct dpaa2_eventdev *priv = dev->data->dev_private;
582 struct dpaa2_port *dpaa2_portal = port;
583 struct dpaa2_eventq *evq_info;
586 EVENTDEV_INIT_FUNC_TRACE();
588 RTE_SET_USED(priorities);
590 for (i = 0; i < nb_links; i++) {
591 evq_info = &priv->evq_info[queues[i]];
592 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
593 sizeof(struct dpaa2_eventq));
594 dpaa2_portal->evq_info[queues[i]].event_port = port;
595 dpaa2_portal->num_linked_evq++;
598 return (int)nb_links;
602 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
603 uint8_t queues[], uint16_t nb_unlinks)
605 struct dpaa2_port *dpaa2_portal = port;
607 struct dpaa2_dpio_dev *dpio_dev = NULL;
608 struct dpaa2_eventq *evq_info;
609 struct qbman_swp *swp;
611 EVENTDEV_INIT_FUNC_TRACE();
614 RTE_SET_USED(queues);
616 for (i = 0; i < nb_unlinks; i++) {
617 evq_info = &dpaa2_portal->evq_info[queues[i]];
619 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
620 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
621 dpio_dev = DPAA2_PER_LCORE_DPIO;
622 swp = DPAA2_PER_LCORE_PORTAL;
624 qbman_swp_push_set(swp,
625 evq_info->dpcon->channel_index, 0);
626 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
628 evq_info->dpcon->dpcon_id);
630 memset(evq_info, 0, sizeof(struct dpaa2_eventq));
631 if (dpaa2_portal->num_linked_evq)
632 dpaa2_portal->num_linked_evq--;
635 if (!dpaa2_portal->num_linked_evq)
636 dpaa2_portal->is_port_linked = false;
638 return (int)nb_unlinks;
643 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
644 uint64_t *timeout_ticks)
646 uint32_t scale = 1000*1000;
648 EVENTDEV_INIT_FUNC_TRACE();
651 *timeout_ticks = ns / scale;
657 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
659 EVENTDEV_INIT_FUNC_TRACE();
666 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
667 const struct rte_eth_dev *eth_dev,
670 const char *ethdev_driver = eth_dev->device->driver->name;
672 EVENTDEV_INIT_FUNC_TRACE();
676 if (!strcmp(ethdev_driver, "net_dpaa2"))
677 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
679 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
685 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
686 const struct rte_eth_dev *eth_dev,
687 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
689 struct dpaa2_eventdev *priv = dev->data->dev_private;
690 uint8_t ev_qid = queue_conf->ev.queue_id;
691 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
694 EVENTDEV_INIT_FUNC_TRACE();
696 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
697 ret = dpaa2_eth_eventq_attach(eth_dev, i,
698 dpcon_id, queue_conf);
701 "Event queue attach failed: err(%d)", ret);
707 for (i = (i - 1); i >= 0 ; i--)
708 dpaa2_eth_eventq_detach(eth_dev, i);
714 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
715 const struct rte_eth_dev *eth_dev,
717 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
719 struct dpaa2_eventdev *priv = dev->data->dev_private;
720 uint8_t ev_qid = queue_conf->ev.queue_id;
721 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
724 EVENTDEV_INIT_FUNC_TRACE();
726 if (rx_queue_id == -1)
727 return dpaa2_eventdev_eth_queue_add_all(dev,
728 eth_dev, queue_conf);
730 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
731 dpcon_id, queue_conf);
734 "Event queue attach failed: err(%d)", ret);
741 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
742 const struct rte_eth_dev *eth_dev)
746 EVENTDEV_INIT_FUNC_TRACE();
750 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
751 ret = dpaa2_eth_eventq_detach(eth_dev, i);
754 "Event queue detach failed: err(%d)", ret);
763 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
764 const struct rte_eth_dev *eth_dev,
769 EVENTDEV_INIT_FUNC_TRACE();
771 if (rx_queue_id == -1)
772 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
774 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
777 "Event queue detach failed: err(%d)", ret);
785 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
786 const struct rte_eth_dev *eth_dev)
788 EVENTDEV_INIT_FUNC_TRACE();
791 RTE_SET_USED(eth_dev);
797 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
798 const struct rte_eth_dev *eth_dev)
800 EVENTDEV_INIT_FUNC_TRACE();
803 RTE_SET_USED(eth_dev);
809 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
810 const struct rte_cryptodev *cdev,
813 const char *name = cdev->data->name;
815 EVENTDEV_INIT_FUNC_TRACE();
819 if (!strncmp(name, "dpsec-", 6))
820 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
828 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
829 const struct rte_cryptodev *cryptodev,
830 const struct rte_event *ev)
832 struct dpaa2_eventdev *priv = dev->data->dev_private;
833 uint8_t ev_qid = ev->queue_id;
834 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
837 EVENTDEV_INIT_FUNC_TRACE();
839 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
840 ret = dpaa2_sec_eventq_attach(cryptodev, i,
843 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
850 for (i = (i - 1); i >= 0 ; i--)
851 dpaa2_sec_eventq_detach(cryptodev, i);
857 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
858 const struct rte_cryptodev *cryptodev,
860 const struct rte_event *ev)
862 struct dpaa2_eventdev *priv = dev->data->dev_private;
863 uint8_t ev_qid = ev->queue_id;
864 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
867 EVENTDEV_INIT_FUNC_TRACE();
869 if (rx_queue_id == -1)
870 return dpaa2_eventdev_crypto_queue_add_all(dev,
873 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
877 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
884 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
885 const struct rte_cryptodev *cdev)
889 EVENTDEV_INIT_FUNC_TRACE();
893 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
894 ret = dpaa2_sec_eventq_detach(cdev, i);
897 "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
906 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
907 const struct rte_cryptodev *cryptodev,
912 EVENTDEV_INIT_FUNC_TRACE();
914 if (rx_queue_id == -1)
915 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
917 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
920 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
928 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
929 const struct rte_cryptodev *cryptodev)
931 EVENTDEV_INIT_FUNC_TRACE();
934 RTE_SET_USED(cryptodev);
940 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
941 const struct rte_cryptodev *cryptodev)
943 EVENTDEV_INIT_FUNC_TRACE();
946 RTE_SET_USED(cryptodev);
951 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
952 .dev_infos_get = dpaa2_eventdev_info_get,
953 .dev_configure = dpaa2_eventdev_configure,
954 .dev_start = dpaa2_eventdev_start,
955 .dev_stop = dpaa2_eventdev_stop,
956 .dev_close = dpaa2_eventdev_close,
957 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
958 .queue_setup = dpaa2_eventdev_queue_setup,
959 .queue_release = dpaa2_eventdev_queue_release,
960 .port_def_conf = dpaa2_eventdev_port_def_conf,
961 .port_setup = dpaa2_eventdev_port_setup,
962 .port_release = dpaa2_eventdev_port_release,
963 .port_link = dpaa2_eventdev_port_link,
964 .port_unlink = dpaa2_eventdev_port_unlink,
965 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
966 .dump = dpaa2_eventdev_dump,
967 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
968 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
969 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
970 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
971 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
972 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
973 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
974 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
975 .crypto_adapter_start = dpaa2_eventdev_crypto_start,
976 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
980 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
981 struct dpaa2_dpcon_dev *dpcon_dev)
983 struct dpci_rx_queue_cfg rx_queue_cfg;
986 /*Do settings to get the frame on a DPCON object*/
987 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
988 DPCI_QUEUE_OPT_USER_CTX;
989 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
990 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
991 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
993 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
994 dpaa2_eventdev_process_parallel;
995 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
996 dpaa2_eventdev_process_atomic;
998 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
999 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
1000 ret = dpci_set_rx_queue(&dpci_dev->dpci,
1006 "DPCI Rx queue setup failed: err(%d)",
1015 dpaa2_eventdev_create(const char *name)
1017 struct rte_eventdev *eventdev;
1018 struct dpaa2_eventdev *priv;
1019 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1020 struct dpaa2_dpci_dev *dpci_dev = NULL;
1023 eventdev = rte_event_pmd_vdev_init(name,
1024 sizeof(struct dpaa2_eventdev),
1026 if (eventdev == NULL) {
1027 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1031 eventdev->dev_ops = &dpaa2_eventdev_ops;
1032 eventdev->enqueue = dpaa2_eventdev_enqueue;
1033 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1034 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1035 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1036 eventdev->dequeue = dpaa2_eventdev_dequeue;
1037 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1039 /* For secondary processes, the primary has done all the work */
1040 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1043 priv = eventdev->data->dev_private;
1044 priv->max_event_queues = 0;
1047 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1050 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1052 dpci_dev = rte_dpaa2_alloc_dpci_dev();
1054 rte_dpaa2_free_dpcon_dev(dpcon_dev);
1057 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1059 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1062 "DPCI setup failed: err(%d)", ret);
1065 priv->max_event_queues++;
1066 } while (dpcon_dev && dpci_dev);
1068 RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1076 dpaa2_eventdev_destroy(const char *name)
1078 struct rte_eventdev *eventdev;
1079 struct dpaa2_eventdev *priv;
1082 eventdev = rte_event_pmd_get_named_dev(name);
1083 if (eventdev == NULL) {
1084 RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
1088 /* For secondary processes, the primary has done all the work */
1089 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1092 priv = eventdev->data->dev_private;
1093 for (i = 0; i < priv->max_event_queues; i++) {
1094 if (priv->evq_info[i].dpcon)
1095 rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
1097 if (priv->evq_info[i].dpci)
1098 rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
1101 priv->max_event_queues = 0;
1103 RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
1109 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1113 name = rte_vdev_device_name(vdev);
1114 DPAA2_EVENTDEV_INFO("Initializing %s", name);
1115 return dpaa2_eventdev_create(name);
1119 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1123 name = rte_vdev_device_name(vdev);
1124 DPAA2_EVENTDEV_INFO("Closing %s", name);
1126 dpaa2_eventdev_destroy(name);
1128 return rte_event_pmd_vdev_uninit(name);
1131 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1132 .probe = dpaa2_eventdev_probe,
1133 .remove = dpaa2_eventdev_remove
1136 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1138 RTE_INIT(dpaa2_eventdev_init_log)
1140 dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
1141 if (dpaa2_logtype_event >= 0)
1142 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);