1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017,2019 NXP
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
31 #include <fslmc_vfio.h>
32 #include <dpaa2_hw_pvt.h>
33 #include <dpaa2_hw_mempool.h>
34 #include <dpaa2_hw_dpio.h>
35 #include <dpaa2_ethdev.h>
36 #ifdef RTE_LIBRTE_SECURITY
37 #include <dpaa2_sec_event.h>
39 #include "dpaa2_eventdev.h"
40 #include "dpaa2_eventdev_logs.h"
41 #include <portal/dpaa2_hw_pvt.h>
42 #include <mc/fsl_dpci.h>
45 * Evendev = SoC Instance
46 * Eventport = DPIO Instance
47 * Eventqueue = DPCON Instance
48 * 1 Eventdev can have N Eventqueue
49 * Soft Event Flow is DPCI Instance
52 /* Dynamic logging identified for mempool */
53 int dpaa2_logtype_event;
56 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
60 struct dpaa2_port *dpaa2_portal = port;
61 struct dpaa2_dpio_dev *dpio_dev;
62 uint32_t queue_id = ev[0].queue_id;
63 struct dpaa2_eventq *evq_info;
65 struct qbman_swp *swp;
66 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
67 uint32_t loop, frames_to_send;
68 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
71 uint8_t channel_index;
73 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
74 /* Affine current thread context to a qman portal */
75 ret = dpaa2_affine_qbman_swp();
77 DPAA2_EVENTDEV_ERR("Failure in affining portal");
81 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
82 dpio_dev = DPAA2_PER_LCORE_DPIO;
83 swp = DPAA2_PER_LCORE_PORTAL;
85 if (likely(dpaa2_portal->is_port_linked))
88 /* Create mapping between portal and channel to receive packets */
89 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
90 evq_info = &dpaa2_portal->evq_info[i];
91 if (!evq_info->event_port)
94 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
97 evq_info->dpcon->dpcon_id,
101 "Static dequeue config failed: err(%d)", ret);
105 qbman_swp_push_set(swp, channel_index, 1);
106 evq_info->dpcon->channel_index = channel_index;
108 dpaa2_portal->is_port_linked = true;
111 evq_info = &dpaa2_portal->evq_info[queue_id];
114 frames_to_send = (nb_events > dpaa2_eqcr_size) ?
115 dpaa2_eqcr_size : nb_events;
117 for (loop = 0; loop < frames_to_send; loop++) {
118 const struct rte_event *event = &ev[num_tx + loop];
120 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
121 fqid = evq_info->dpci->rx_queue[
122 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
124 fqid = evq_info->dpci->rx_queue[
125 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
127 /* Prepare enqueue descriptor */
128 qbman_eq_desc_clear(&eqdesc[loop]);
129 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
130 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
131 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
133 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
134 && event->mbuf->seqn) {
135 uint8_t dqrr_index = event->mbuf->seqn - 1;
137 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
139 DPAA2_PER_LCORE_DQRR_SIZE--;
140 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
143 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
146 * todo - need to align with hw context data
149 struct rte_event *ev_temp = rte_malloc(NULL,
150 sizeof(struct rte_event), 0);
155 frames_to_send = loop;
157 "Unable to allocate event object");
160 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
161 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
162 DPAA2_SET_FD_LEN((&fd_arr[loop]),
163 sizeof(struct rte_event));
167 while (loop < frames_to_send) {
168 loop += qbman_swp_enqueue_multiple_desc(swp,
169 &eqdesc[loop], &fd_arr[loop],
170 frames_to_send - loop);
172 num_tx += frames_to_send;
173 nb_events -= frames_to_send;
178 for (n = 0; n < i; n++) {
179 evq_info = &dpaa2_portal->evq_info[n];
180 if (!evq_info->event_port)
182 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
183 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
185 evq_info->dpcon->dpcon_id);
192 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
194 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
197 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
199 struct epoll_event epoll_ev;
201 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
202 QBMAN_SWP_INTERRUPT_DQRI);
204 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
205 &epoll_ev, 1, timeout_ticks);
208 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
209 const struct qbman_fd *fd,
210 const struct qbman_result *dq,
211 struct dpaa2_queue *rxq,
212 struct rte_event *ev)
214 struct rte_event *ev_temp =
215 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
219 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
222 qbman_swp_dqrr_consume(swp, dq);
225 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
226 const struct qbman_fd *fd,
227 const struct qbman_result *dq,
228 struct dpaa2_queue *rxq,
229 struct rte_event *ev)
231 struct rte_event *ev_temp =
232 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
233 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
238 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
240 ev->mbuf->seqn = dqrr_index + 1;
241 DPAA2_PER_LCORE_DQRR_SIZE++;
242 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
243 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
247 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
248 uint16_t nb_events, uint64_t timeout_ticks)
250 const struct qbman_result *dq;
251 struct dpaa2_dpio_dev *dpio_dev = NULL;
252 struct dpaa2_port *dpaa2_portal = port;
253 struct dpaa2_eventq *evq_info;
254 struct qbman_swp *swp;
255 const struct qbman_fd *fd;
256 struct dpaa2_queue *rxq;
257 int num_pkts = 0, ret, i = 0, n;
258 uint8_t channel_index;
260 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
261 /* Affine current thread context to a qman portal */
262 ret = dpaa2_affine_qbman_swp();
264 DPAA2_EVENTDEV_ERR("Failure in affining portal");
269 dpio_dev = DPAA2_PER_LCORE_DPIO;
270 swp = DPAA2_PER_LCORE_PORTAL;
272 if (likely(dpaa2_portal->is_port_linked))
275 /* Create mapping between portal and channel to receive packets */
276 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
277 evq_info = &dpaa2_portal->evq_info[i];
278 if (!evq_info->event_port)
281 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
284 evq_info->dpcon->dpcon_id,
288 "Static dequeue config failed: err(%d)", ret);
292 qbman_swp_push_set(swp, channel_index, 1);
293 evq_info->dpcon->channel_index = channel_index;
295 dpaa2_portal->is_port_linked = true;
298 /* Check if there are atomic contexts to be released */
299 while (DPAA2_PER_LCORE_DQRR_SIZE) {
300 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
301 qbman_swp_dqrr_idx_consume(swp, i);
302 DPAA2_PER_LCORE_DQRR_SIZE--;
303 DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
304 DPAA2_INVALID_MBUF_SEQN;
308 DPAA2_PER_LCORE_DQRR_HELD = 0;
311 dq = qbman_swp_dqrr_next(swp);
313 if (!num_pkts && timeout_ticks) {
314 dpaa2_eventdev_dequeue_wait(timeout_ticks);
320 qbman_swp_prefetch_dqrr_next(swp);
322 fd = qbman_result_DQ_fd(dq);
323 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
325 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
327 qbman_swp_dqrr_consume(swp, dq);
328 DPAA2_EVENTDEV_ERR("Null Return VQ received");
333 } while (num_pkts < nb_events);
337 for (n = 0; n < i; n++) {
338 evq_info = &dpaa2_portal->evq_info[n];
339 if (!evq_info->event_port)
342 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
343 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
345 evq_info->dpcon->dpcon_id);
351 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
352 uint64_t timeout_ticks)
354 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
358 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
359 struct rte_event_dev_info *dev_info)
361 struct dpaa2_eventdev *priv = dev->data->dev_private;
363 EVENTDEV_INIT_FUNC_TRACE();
367 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
368 dev_info->min_dequeue_timeout_ns =
369 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
370 dev_info->max_dequeue_timeout_ns =
371 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
372 dev_info->dequeue_timeout_ns =
373 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
374 dev_info->max_event_queues = priv->max_event_queues;
375 dev_info->max_event_queue_flows =
376 DPAA2_EVENT_MAX_QUEUE_FLOWS;
377 dev_info->max_event_queue_priority_levels =
378 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
379 dev_info->max_event_priority_levels =
380 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
381 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
382 /* we only support dpio upto number of cores*/
383 if (dev_info->max_event_ports > rte_lcore_count())
384 dev_info->max_event_ports = rte_lcore_count();
385 dev_info->max_event_port_dequeue_depth =
386 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
387 dev_info->max_event_port_enqueue_depth =
388 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
389 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
390 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
391 RTE_EVENT_DEV_CAP_BURST_MODE|
392 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
393 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
394 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
399 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
401 struct dpaa2_eventdev *priv = dev->data->dev_private;
402 struct rte_event_dev_config *conf = &dev->data->dev_conf;
404 EVENTDEV_INIT_FUNC_TRACE();
406 priv->nb_event_queues = conf->nb_event_queues;
407 priv->nb_event_ports = conf->nb_event_ports;
408 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
409 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
410 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
411 priv->event_dev_cfg = conf->event_dev_cfg;
413 /* Check dequeue timeout method is per dequeue or global */
414 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
416 * Use timeout value as given in dequeue operation.
417 * So invalidating this timeout value.
419 priv->dequeue_timeout_ns = 0;
421 } else if (conf->dequeue_timeout_ns == 0) {
422 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
424 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
427 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
433 dpaa2_eventdev_start(struct rte_eventdev *dev)
435 EVENTDEV_INIT_FUNC_TRACE();
443 dpaa2_eventdev_stop(struct rte_eventdev *dev)
445 EVENTDEV_INIT_FUNC_TRACE();
451 dpaa2_eventdev_close(struct rte_eventdev *dev)
453 EVENTDEV_INIT_FUNC_TRACE();
461 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
462 struct rte_event_queue_conf *queue_conf)
464 EVENTDEV_INIT_FUNC_TRACE();
467 RTE_SET_USED(queue_id);
468 RTE_SET_USED(queue_conf);
470 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
471 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
472 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
476 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
477 const struct rte_event_queue_conf *queue_conf)
479 struct dpaa2_eventdev *priv = dev->data->dev_private;
480 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
482 EVENTDEV_INIT_FUNC_TRACE();
484 switch (queue_conf->schedule_type) {
485 case RTE_SCHED_TYPE_PARALLEL:
486 case RTE_SCHED_TYPE_ATOMIC:
488 case RTE_SCHED_TYPE_ORDERED:
489 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
492 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
493 evq_info->event_queue_id = queue_id;
499 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
501 EVENTDEV_INIT_FUNC_TRACE();
504 RTE_SET_USED(queue_id);
508 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
509 struct rte_event_port_conf *port_conf)
511 EVENTDEV_INIT_FUNC_TRACE();
514 RTE_SET_USED(port_id);
516 port_conf->new_event_threshold =
517 DPAA2_EVENT_MAX_NUM_EVENTS;
518 port_conf->dequeue_depth =
519 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
520 port_conf->enqueue_depth =
521 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
522 port_conf->disable_implicit_release = 0;
526 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
527 const struct rte_event_port_conf *port_conf)
529 char event_port_name[32];
530 struct dpaa2_port *portal;
532 EVENTDEV_INIT_FUNC_TRACE();
534 RTE_SET_USED(port_conf);
536 sprintf(event_port_name, "event-port-%d", port_id);
537 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
539 DPAA2_EVENTDEV_ERR("Memory allocation failure");
543 memset(portal, 0, sizeof(struct dpaa2_port));
544 dev->data->ports[port_id] = portal;
549 dpaa2_eventdev_port_release(void *port)
551 struct dpaa2_port *portal = port;
553 EVENTDEV_INIT_FUNC_TRACE();
555 /* TODO: Cleanup is required when ports are in linked state. */
556 if (portal->is_port_linked)
557 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
566 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
567 const uint8_t queues[], const uint8_t priorities[],
570 struct dpaa2_eventdev *priv = dev->data->dev_private;
571 struct dpaa2_port *dpaa2_portal = port;
572 struct dpaa2_eventq *evq_info;
575 EVENTDEV_INIT_FUNC_TRACE();
577 RTE_SET_USED(priorities);
579 for (i = 0; i < nb_links; i++) {
580 evq_info = &priv->evq_info[queues[i]];
581 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
582 sizeof(struct dpaa2_eventq));
583 dpaa2_portal->evq_info[queues[i]].event_port = port;
584 dpaa2_portal->num_linked_evq++;
587 return (int)nb_links;
591 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
592 uint8_t queues[], uint16_t nb_unlinks)
594 struct dpaa2_port *dpaa2_portal = port;
596 struct dpaa2_dpio_dev *dpio_dev = NULL;
597 struct dpaa2_eventq *evq_info;
598 struct qbman_swp *swp;
600 EVENTDEV_INIT_FUNC_TRACE();
603 RTE_SET_USED(queues);
605 for (i = 0; i < nb_unlinks; i++) {
606 evq_info = &dpaa2_portal->evq_info[queues[i]];
608 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
609 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
610 dpio_dev = DPAA2_PER_LCORE_DPIO;
611 swp = DPAA2_PER_LCORE_PORTAL;
613 qbman_swp_push_set(swp,
614 evq_info->dpcon->channel_index, 0);
615 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
617 evq_info->dpcon->dpcon_id);
619 memset(evq_info, 0, sizeof(struct dpaa2_eventq));
620 if (dpaa2_portal->num_linked_evq)
621 dpaa2_portal->num_linked_evq--;
624 if (!dpaa2_portal->num_linked_evq)
625 dpaa2_portal->is_port_linked = false;
627 return (int)nb_unlinks;
632 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
633 uint64_t *timeout_ticks)
635 uint32_t scale = 1000*1000;
637 EVENTDEV_INIT_FUNC_TRACE();
640 *timeout_ticks = ns / scale;
646 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
648 EVENTDEV_INIT_FUNC_TRACE();
655 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
656 const struct rte_eth_dev *eth_dev,
659 const char *ethdev_driver = eth_dev->device->driver->name;
661 EVENTDEV_INIT_FUNC_TRACE();
665 if (!strcmp(ethdev_driver, "net_dpaa2"))
666 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
668 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
674 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
675 const struct rte_eth_dev *eth_dev,
676 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
678 struct dpaa2_eventdev *priv = dev->data->dev_private;
679 uint8_t ev_qid = queue_conf->ev.queue_id;
680 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
683 EVENTDEV_INIT_FUNC_TRACE();
685 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
686 ret = dpaa2_eth_eventq_attach(eth_dev, i,
687 dpcon_id, queue_conf);
690 "Event queue attach failed: err(%d)", ret);
696 for (i = (i - 1); i >= 0 ; i--)
697 dpaa2_eth_eventq_detach(eth_dev, i);
703 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
704 const struct rte_eth_dev *eth_dev,
706 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
708 struct dpaa2_eventdev *priv = dev->data->dev_private;
709 uint8_t ev_qid = queue_conf->ev.queue_id;
710 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
713 EVENTDEV_INIT_FUNC_TRACE();
715 if (rx_queue_id == -1)
716 return dpaa2_eventdev_eth_queue_add_all(dev,
717 eth_dev, queue_conf);
719 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
720 dpcon_id, queue_conf);
723 "Event queue attach failed: err(%d)", ret);
730 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
731 const struct rte_eth_dev *eth_dev)
735 EVENTDEV_INIT_FUNC_TRACE();
739 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
740 ret = dpaa2_eth_eventq_detach(eth_dev, i);
743 "Event queue detach failed: err(%d)", ret);
752 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
753 const struct rte_eth_dev *eth_dev,
758 EVENTDEV_INIT_FUNC_TRACE();
760 if (rx_queue_id == -1)
761 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
763 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
766 "Event queue detach failed: err(%d)", ret);
774 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
775 const struct rte_eth_dev *eth_dev)
777 EVENTDEV_INIT_FUNC_TRACE();
780 RTE_SET_USED(eth_dev);
786 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
787 const struct rte_eth_dev *eth_dev)
789 EVENTDEV_INIT_FUNC_TRACE();
792 RTE_SET_USED(eth_dev);
797 #ifdef RTE_LIBRTE_SECURITY
799 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
800 const struct rte_cryptodev *cdev,
803 const char *name = cdev->data->name;
805 EVENTDEV_INIT_FUNC_TRACE();
809 if (!strncmp(name, "dpsec-", 6))
810 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
818 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
819 const struct rte_cryptodev *cryptodev,
820 const struct rte_event *ev)
822 struct dpaa2_eventdev *priv = dev->data->dev_private;
823 uint8_t ev_qid = ev->queue_id;
824 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
827 EVENTDEV_INIT_FUNC_TRACE();
829 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
830 ret = dpaa2_sec_eventq_attach(cryptodev, i,
833 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
840 for (i = (i - 1); i >= 0 ; i--)
841 dpaa2_sec_eventq_detach(cryptodev, i);
847 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
848 const struct rte_cryptodev *cryptodev,
850 const struct rte_event *ev)
852 struct dpaa2_eventdev *priv = dev->data->dev_private;
853 uint8_t ev_qid = ev->queue_id;
854 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
857 EVENTDEV_INIT_FUNC_TRACE();
859 if (rx_queue_id == -1)
860 return dpaa2_eventdev_crypto_queue_add_all(dev,
863 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
867 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
874 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
875 const struct rte_cryptodev *cdev)
879 EVENTDEV_INIT_FUNC_TRACE();
883 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
884 ret = dpaa2_sec_eventq_detach(cdev, i);
887 "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
896 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
897 const struct rte_cryptodev *cryptodev,
902 EVENTDEV_INIT_FUNC_TRACE();
904 if (rx_queue_id == -1)
905 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
907 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
910 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
918 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
919 const struct rte_cryptodev *cryptodev)
921 EVENTDEV_INIT_FUNC_TRACE();
924 RTE_SET_USED(cryptodev);
930 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
931 const struct rte_cryptodev *cryptodev)
933 EVENTDEV_INIT_FUNC_TRACE();
936 RTE_SET_USED(cryptodev);
942 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
943 .dev_infos_get = dpaa2_eventdev_info_get,
944 .dev_configure = dpaa2_eventdev_configure,
945 .dev_start = dpaa2_eventdev_start,
946 .dev_stop = dpaa2_eventdev_stop,
947 .dev_close = dpaa2_eventdev_close,
948 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
949 .queue_setup = dpaa2_eventdev_queue_setup,
950 .queue_release = dpaa2_eventdev_queue_release,
951 .port_def_conf = dpaa2_eventdev_port_def_conf,
952 .port_setup = dpaa2_eventdev_port_setup,
953 .port_release = dpaa2_eventdev_port_release,
954 .port_link = dpaa2_eventdev_port_link,
955 .port_unlink = dpaa2_eventdev_port_unlink,
956 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
957 .dump = dpaa2_eventdev_dump,
958 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
959 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
960 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
961 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
962 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
963 #ifdef RTE_LIBRTE_SECURITY
964 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
965 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
966 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
967 .crypto_adapter_start = dpaa2_eventdev_crypto_start,
968 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
973 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
974 struct dpaa2_dpcon_dev *dpcon_dev)
976 struct dpci_rx_queue_cfg rx_queue_cfg;
979 /*Do settings to get the frame on a DPCON object*/
980 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
981 DPCI_QUEUE_OPT_USER_CTX;
982 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
983 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
984 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
986 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
987 dpaa2_eventdev_process_parallel;
988 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
989 dpaa2_eventdev_process_atomic;
991 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
992 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
993 ret = dpci_set_rx_queue(&dpci_dev->dpci,
999 "DPCI Rx queue setup failed: err(%d)",
1008 dpaa2_eventdev_create(const char *name)
1010 struct rte_eventdev *eventdev;
1011 struct dpaa2_eventdev *priv;
1012 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1013 struct dpaa2_dpci_dev *dpci_dev = NULL;
1016 eventdev = rte_event_pmd_vdev_init(name,
1017 sizeof(struct dpaa2_eventdev),
1019 if (eventdev == NULL) {
1020 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1024 eventdev->dev_ops = &dpaa2_eventdev_ops;
1025 eventdev->enqueue = dpaa2_eventdev_enqueue;
1026 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1027 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1028 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1029 eventdev->dequeue = dpaa2_eventdev_dequeue;
1030 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1032 /* For secondary processes, the primary has done all the work */
1033 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1036 priv = eventdev->data->dev_private;
1037 priv->max_event_queues = 0;
1040 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1043 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1045 dpci_dev = rte_dpaa2_alloc_dpci_dev();
1047 rte_dpaa2_free_dpcon_dev(dpcon_dev);
1050 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1052 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1055 "DPCI setup failed: err(%d)", ret);
1058 priv->max_event_queues++;
1059 } while (dpcon_dev && dpci_dev);
1061 RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1069 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1073 name = rte_vdev_device_name(vdev);
1074 DPAA2_EVENTDEV_INFO("Initializing %s", name);
1075 return dpaa2_eventdev_create(name);
1079 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1083 name = rte_vdev_device_name(vdev);
1084 DPAA2_EVENTDEV_INFO("Closing %s", name);
1086 return rte_event_pmd_vdev_uninit(name);
1089 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1090 .probe = dpaa2_eventdev_probe,
1091 .remove = dpaa2_eventdev_remove
1094 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1096 RTE_INIT(dpaa2_eventdev_init_log)
1098 dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
1099 if (dpaa2_logtype_event >= 0)
1100 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);