1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017,2019-2021 NXP
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
26 #include <rte_bus_vdev.h>
27 #include <ethdev_driver.h>
28 #include <cryptodev_pmd.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include <dpaa2_sec_event.h>
38 #include "dpaa2_eventdev.h"
39 #include "dpaa2_eventdev_logs.h"
40 #include <portal/dpaa2_hw_pvt.h>
41 #include <mc/fsl_dpci.h>
44 * Evendev = SoC Instance
45 * Eventport = DPIO Instance
46 * Eventqueue = DPCON Instance
47 * 1 Eventdev can have N Eventqueue
48 * Soft Event Flow is DPCI Instance
51 #define DPAA2_EV_TX_RETRY_COUNT 10000
54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
58 struct dpaa2_port *dpaa2_portal = port;
59 struct dpaa2_dpio_dev *dpio_dev;
60 uint32_t queue_id = ev[0].queue_id;
61 struct dpaa2_eventq *evq_info;
62 uint32_t fqid, retry_count;
63 struct qbman_swp *swp;
64 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65 uint32_t loop, frames_to_send;
66 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
69 uint8_t channel_index;
71 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
72 /* Affine current thread context to a qman portal */
73 ret = dpaa2_affine_qbman_swp();
76 "Failed to allocate IO portal, tid: %d\n",
81 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
82 dpio_dev = DPAA2_PER_LCORE_DPIO;
83 swp = DPAA2_PER_LCORE_PORTAL;
85 if (likely(dpaa2_portal->is_port_linked))
88 /* Create mapping between portal and channel to receive packets */
89 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
90 evq_info = &dpaa2_portal->evq_info[i];
91 if (!evq_info->event_port)
94 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
97 evq_info->dpcon->dpcon_id,
101 "Static dequeue config failed: err(%d)", ret);
105 qbman_swp_push_set(swp, channel_index, 1);
106 evq_info->dpcon->channel_index = channel_index;
108 dpaa2_portal->is_port_linked = true;
111 evq_info = &dpaa2_portal->evq_info[queue_id];
114 frames_to_send = (nb_events > dpaa2_eqcr_size) ?
115 dpaa2_eqcr_size : nb_events;
117 for (loop = 0; loop < frames_to_send; loop++) {
118 const struct rte_event *event = &ev[num_tx + loop];
120 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
121 fqid = evq_info->dpci->rx_queue[
122 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
124 fqid = evq_info->dpci->rx_queue[
125 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
127 /* Prepare enqueue descriptor */
128 qbman_eq_desc_clear(&eqdesc[loop]);
129 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
130 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
131 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
133 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
134 && *dpaa2_seqn(event->mbuf)) {
136 *dpaa2_seqn(event->mbuf) - 1;
138 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
140 DPAA2_PER_LCORE_DQRR_SIZE--;
141 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
144 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
147 * todo - need to align with hw context data
150 struct rte_event *ev_temp = rte_malloc(NULL,
151 sizeof(struct rte_event), 0);
156 frames_to_send = loop;
158 "Unable to allocate event object");
161 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
162 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
163 DPAA2_SET_FD_LEN((&fd_arr[loop]),
164 sizeof(struct rte_event));
169 while (loop < frames_to_send) {
170 ret = qbman_swp_enqueue_multiple_desc(swp,
171 &eqdesc[loop], &fd_arr[loop],
172 frames_to_send - loop);
173 if (unlikely(ret < 0)) {
175 if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
178 return num_tx + loop;
191 for (n = 0; n < i; n++) {
192 evq_info = &dpaa2_portal->evq_info[n];
193 if (!evq_info->event_port)
195 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
196 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
198 evq_info->dpcon->dpcon_id);
205 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
207 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
210 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
212 struct epoll_event epoll_ev;
214 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
215 QBMAN_SWP_INTERRUPT_DQRI);
217 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
218 &epoll_ev, 1, timeout_ticks);
221 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
222 const struct qbman_fd *fd,
223 const struct qbman_result *dq,
224 struct dpaa2_queue *rxq,
225 struct rte_event *ev)
227 struct rte_event *ev_temp =
228 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
232 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
235 qbman_swp_dqrr_consume(swp, dq);
238 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
239 const struct qbman_fd *fd,
240 const struct qbman_result *dq,
241 struct dpaa2_queue *rxq,
242 struct rte_event *ev)
244 struct rte_event *ev_temp =
245 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
246 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
251 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
253 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
254 DPAA2_PER_LCORE_DQRR_SIZE++;
255 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
256 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
260 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
261 uint16_t nb_events, uint64_t timeout_ticks)
263 const struct qbman_result *dq;
264 struct dpaa2_dpio_dev *dpio_dev = NULL;
265 struct dpaa2_port *dpaa2_portal = port;
266 struct dpaa2_eventq *evq_info;
267 struct qbman_swp *swp;
268 const struct qbman_fd *fd;
269 struct dpaa2_queue *rxq;
270 int num_pkts = 0, ret, i = 0, n;
271 uint8_t channel_index;
273 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
274 /* Affine current thread context to a qman portal */
275 ret = dpaa2_affine_qbman_swp();
278 "Failed to allocate IO portal, tid: %d\n",
284 dpio_dev = DPAA2_PER_LCORE_DPIO;
285 swp = DPAA2_PER_LCORE_PORTAL;
287 if (likely(dpaa2_portal->is_port_linked))
290 /* Create mapping between portal and channel to receive packets */
291 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
292 evq_info = &dpaa2_portal->evq_info[i];
293 if (!evq_info->event_port)
296 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
299 evq_info->dpcon->dpcon_id,
303 "Static dequeue config failed: err(%d)", ret);
307 qbman_swp_push_set(swp, channel_index, 1);
308 evq_info->dpcon->channel_index = channel_index;
310 dpaa2_portal->is_port_linked = true;
313 /* Check if there are atomic contexts to be released */
314 while (DPAA2_PER_LCORE_DQRR_SIZE) {
315 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
316 qbman_swp_dqrr_idx_consume(swp, i);
317 DPAA2_PER_LCORE_DQRR_SIZE--;
318 *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) =
319 DPAA2_INVALID_MBUF_SEQN;
323 DPAA2_PER_LCORE_DQRR_HELD = 0;
326 dq = qbman_swp_dqrr_next(swp);
328 if (!num_pkts && timeout_ticks) {
329 dpaa2_eventdev_dequeue_wait(timeout_ticks);
335 qbman_swp_prefetch_dqrr_next(swp);
337 fd = qbman_result_DQ_fd(dq);
338 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
340 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
342 qbman_swp_dqrr_consume(swp, dq);
343 DPAA2_EVENTDEV_ERR("Null Return VQ received");
348 } while (num_pkts < nb_events);
352 for (n = 0; n < i; n++) {
353 evq_info = &dpaa2_portal->evq_info[n];
354 if (!evq_info->event_port)
357 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
358 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
360 evq_info->dpcon->dpcon_id);
366 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
367 uint64_t timeout_ticks)
369 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
373 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
374 struct rte_event_dev_info *dev_info)
376 struct dpaa2_eventdev *priv = dev->data->dev_private;
378 EVENTDEV_INIT_FUNC_TRACE();
382 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
383 dev_info->min_dequeue_timeout_ns =
384 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
385 dev_info->max_dequeue_timeout_ns =
386 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
387 dev_info->dequeue_timeout_ns =
388 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
389 dev_info->max_event_queues = priv->max_event_queues;
390 dev_info->max_event_queue_flows =
391 DPAA2_EVENT_MAX_QUEUE_FLOWS;
392 dev_info->max_event_queue_priority_levels =
393 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
394 dev_info->max_event_priority_levels =
395 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
396 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
397 /* we only support dpio up to number of cores */
398 if (dev_info->max_event_ports > rte_lcore_count())
399 dev_info->max_event_ports = rte_lcore_count();
400 dev_info->max_event_port_dequeue_depth =
401 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
402 dev_info->max_event_port_enqueue_depth =
403 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
404 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
405 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
406 RTE_EVENT_DEV_CAP_BURST_MODE|
407 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
408 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
409 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
410 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
411 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
412 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
417 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
419 struct dpaa2_eventdev *priv = dev->data->dev_private;
420 struct rte_event_dev_config *conf = &dev->data->dev_conf;
422 EVENTDEV_INIT_FUNC_TRACE();
424 priv->nb_event_queues = conf->nb_event_queues;
425 priv->nb_event_ports = conf->nb_event_ports;
426 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
427 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
428 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
429 priv->event_dev_cfg = conf->event_dev_cfg;
431 /* Check dequeue timeout method is per dequeue or global */
432 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
434 * Use timeout value as given in dequeue operation.
435 * So invalidating this timeout value.
437 priv->dequeue_timeout_ns = 0;
439 } else if (conf->dequeue_timeout_ns == 0) {
440 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
442 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
445 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
451 dpaa2_eventdev_start(struct rte_eventdev *dev)
453 EVENTDEV_INIT_FUNC_TRACE();
461 dpaa2_eventdev_stop(struct rte_eventdev *dev)
463 EVENTDEV_INIT_FUNC_TRACE();
469 dpaa2_eventdev_close(struct rte_eventdev *dev)
471 EVENTDEV_INIT_FUNC_TRACE();
479 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
480 struct rte_event_queue_conf *queue_conf)
482 EVENTDEV_INIT_FUNC_TRACE();
485 RTE_SET_USED(queue_id);
487 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
488 queue_conf->nb_atomic_order_sequences =
489 DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
490 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
491 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
495 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
496 const struct rte_event_queue_conf *queue_conf)
498 struct dpaa2_eventdev *priv = dev->data->dev_private;
499 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
501 EVENTDEV_INIT_FUNC_TRACE();
503 switch (queue_conf->schedule_type) {
504 case RTE_SCHED_TYPE_PARALLEL:
505 case RTE_SCHED_TYPE_ATOMIC:
506 case RTE_SCHED_TYPE_ORDERED:
509 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
512 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
513 evq_info->event_queue_id = queue_id;
519 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
521 EVENTDEV_INIT_FUNC_TRACE();
524 RTE_SET_USED(queue_id);
528 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
529 struct rte_event_port_conf *port_conf)
531 EVENTDEV_INIT_FUNC_TRACE();
534 RTE_SET_USED(port_id);
536 port_conf->new_event_threshold =
537 DPAA2_EVENT_MAX_NUM_EVENTS;
538 port_conf->dequeue_depth =
539 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
540 port_conf->enqueue_depth =
541 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
542 port_conf->event_port_cfg = 0;
546 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
547 const struct rte_event_port_conf *port_conf)
549 char event_port_name[32];
550 struct dpaa2_port *portal;
552 EVENTDEV_INIT_FUNC_TRACE();
554 RTE_SET_USED(port_conf);
556 sprintf(event_port_name, "event-port-%d", port_id);
557 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
559 DPAA2_EVENTDEV_ERR("Memory allocation failure");
563 memset(portal, 0, sizeof(struct dpaa2_port));
564 dev->data->ports[port_id] = portal;
569 dpaa2_eventdev_port_release(void *port)
571 struct dpaa2_port *portal = port;
573 EVENTDEV_INIT_FUNC_TRACE();
578 /* TODO: Cleanup is required when ports are in linked state. */
579 if (portal->is_port_linked)
580 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
586 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
587 const uint8_t queues[], const uint8_t priorities[],
590 struct dpaa2_eventdev *priv = dev->data->dev_private;
591 struct dpaa2_port *dpaa2_portal = port;
592 struct dpaa2_eventq *evq_info;
595 EVENTDEV_INIT_FUNC_TRACE();
597 RTE_SET_USED(priorities);
599 for (i = 0; i < nb_links; i++) {
600 evq_info = &priv->evq_info[queues[i]];
601 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
602 sizeof(struct dpaa2_eventq));
603 dpaa2_portal->evq_info[queues[i]].event_port = port;
604 dpaa2_portal->num_linked_evq++;
607 return (int)nb_links;
611 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
612 uint8_t queues[], uint16_t nb_unlinks)
614 struct dpaa2_port *dpaa2_portal = port;
616 struct dpaa2_dpio_dev *dpio_dev = NULL;
617 struct dpaa2_eventq *evq_info;
618 struct qbman_swp *swp;
620 EVENTDEV_INIT_FUNC_TRACE();
623 RTE_SET_USED(queues);
625 for (i = 0; i < nb_unlinks; i++) {
626 evq_info = &dpaa2_portal->evq_info[queues[i]];
628 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
629 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
630 dpio_dev = DPAA2_PER_LCORE_DPIO;
631 swp = DPAA2_PER_LCORE_PORTAL;
633 qbman_swp_push_set(swp,
634 evq_info->dpcon->channel_index, 0);
635 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
637 evq_info->dpcon->dpcon_id);
639 memset(evq_info, 0, sizeof(struct dpaa2_eventq));
640 if (dpaa2_portal->num_linked_evq)
641 dpaa2_portal->num_linked_evq--;
644 if (!dpaa2_portal->num_linked_evq)
645 dpaa2_portal->is_port_linked = false;
647 return (int)nb_unlinks;
652 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
653 uint64_t *timeout_ticks)
655 uint32_t scale = 1000*1000;
657 EVENTDEV_INIT_FUNC_TRACE();
660 *timeout_ticks = ns / scale;
666 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
668 EVENTDEV_INIT_FUNC_TRACE();
675 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
676 const struct rte_eth_dev *eth_dev,
679 const char *ethdev_driver = eth_dev->device->driver->name;
681 EVENTDEV_INIT_FUNC_TRACE();
685 if (!strcmp(ethdev_driver, "net_dpaa2"))
686 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
688 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
694 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
695 const struct rte_eth_dev *eth_dev,
696 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
698 struct dpaa2_eventdev *priv = dev->data->dev_private;
699 uint8_t ev_qid = queue_conf->ev.queue_id;
700 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
703 EVENTDEV_INIT_FUNC_TRACE();
705 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
706 ret = dpaa2_eth_eventq_attach(eth_dev, i,
710 "Event queue attach failed: err(%d)", ret);
716 for (i = (i - 1); i >= 0 ; i--)
717 dpaa2_eth_eventq_detach(eth_dev, i);
723 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
724 const struct rte_eth_dev *eth_dev,
726 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
728 struct dpaa2_eventdev *priv = dev->data->dev_private;
729 uint8_t ev_qid = queue_conf->ev.queue_id;
730 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
733 EVENTDEV_INIT_FUNC_TRACE();
735 if (rx_queue_id == -1)
736 return dpaa2_eventdev_eth_queue_add_all(dev,
737 eth_dev, queue_conf);
739 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
743 "Event queue attach failed: err(%d)", ret);
750 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
751 const struct rte_eth_dev *eth_dev)
755 EVENTDEV_INIT_FUNC_TRACE();
759 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
760 ret = dpaa2_eth_eventq_detach(eth_dev, i);
763 "Event queue detach failed: err(%d)", ret);
772 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
773 const struct rte_eth_dev *eth_dev,
778 EVENTDEV_INIT_FUNC_TRACE();
780 if (rx_queue_id == -1)
781 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
783 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
786 "Event queue detach failed: err(%d)", ret);
794 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
795 const struct rte_eth_dev *eth_dev)
797 EVENTDEV_INIT_FUNC_TRACE();
800 RTE_SET_USED(eth_dev);
806 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
807 const struct rte_eth_dev *eth_dev)
809 EVENTDEV_INIT_FUNC_TRACE();
812 RTE_SET_USED(eth_dev);
818 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
819 const struct rte_cryptodev *cdev,
822 const char *name = cdev->data->name;
824 EVENTDEV_INIT_FUNC_TRACE();
828 if (!strncmp(name, "dpsec-", 6))
829 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
837 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
838 const struct rte_cryptodev *cryptodev,
839 const struct rte_event *ev)
841 struct dpaa2_eventdev *priv = dev->data->dev_private;
842 uint8_t ev_qid = ev->queue_id;
843 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
846 EVENTDEV_INIT_FUNC_TRACE();
848 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
849 ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
851 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
858 for (i = (i - 1); i >= 0 ; i--)
859 dpaa2_sec_eventq_detach(cryptodev, i);
865 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
866 const struct rte_cryptodev *cryptodev,
868 const struct rte_event *ev)
870 struct dpaa2_eventdev *priv = dev->data->dev_private;
871 uint8_t ev_qid = ev->queue_id;
872 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
875 EVENTDEV_INIT_FUNC_TRACE();
877 if (rx_queue_id == -1)
878 return dpaa2_eventdev_crypto_queue_add_all(dev,
881 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
885 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
892 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
893 const struct rte_cryptodev *cdev)
897 EVENTDEV_INIT_FUNC_TRACE();
901 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
902 ret = dpaa2_sec_eventq_detach(cdev, i);
905 "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
914 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
915 const struct rte_cryptodev *cryptodev,
920 EVENTDEV_INIT_FUNC_TRACE();
922 if (rx_queue_id == -1)
923 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
925 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
928 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
936 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
937 const struct rte_cryptodev *cryptodev)
939 EVENTDEV_INIT_FUNC_TRACE();
942 RTE_SET_USED(cryptodev);
948 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
949 const struct rte_cryptodev *cryptodev)
951 EVENTDEV_INIT_FUNC_TRACE();
954 RTE_SET_USED(cryptodev);
960 dpaa2_eventdev_tx_adapter_create(uint8_t id,
961 const struct rte_eventdev *dev)
966 /* Nothing to do. Simply return. */
971 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
972 const struct rte_eth_dev *eth_dev,
976 RTE_SET_USED(eth_dev);
978 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
983 dpaa2_eventdev_txa_enqueue_same_dest(void *port,
984 struct rte_event ev[],
987 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
992 m0 = (struct rte_mbuf *)ev[0].mbuf;
993 qid = rte_event_eth_tx_adapter_txq_get(m0);
995 for (i = 0; i < nb_events; i++)
996 m[i] = (struct rte_mbuf *)ev[i].mbuf;
998 return rte_eth_tx_burst(m0->port, qid, m, nb_events);
1002 dpaa2_eventdev_txa_enqueue(void *port,
1003 struct rte_event ev[],
1006 void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
1007 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
1012 for (i = 0; i < nb_events; i++) {
1013 m[i] = (struct rte_mbuf *)ev[i].mbuf;
1014 qid = rte_event_eth_tx_adapter_txq_get(m[i]);
1015 txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
1018 dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
1023 static struct eventdev_ops dpaa2_eventdev_ops = {
1024 .dev_infos_get = dpaa2_eventdev_info_get,
1025 .dev_configure = dpaa2_eventdev_configure,
1026 .dev_start = dpaa2_eventdev_start,
1027 .dev_stop = dpaa2_eventdev_stop,
1028 .dev_close = dpaa2_eventdev_close,
1029 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
1030 .queue_setup = dpaa2_eventdev_queue_setup,
1031 .queue_release = dpaa2_eventdev_queue_release,
1032 .port_def_conf = dpaa2_eventdev_port_def_conf,
1033 .port_setup = dpaa2_eventdev_port_setup,
1034 .port_release = dpaa2_eventdev_port_release,
1035 .port_link = dpaa2_eventdev_port_link,
1036 .port_unlink = dpaa2_eventdev_port_unlink,
1037 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
1038 .dump = dpaa2_eventdev_dump,
1039 .dev_selftest = test_eventdev_dpaa2,
1040 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
1041 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
1042 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
1043 .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
1044 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
1045 .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps,
1046 .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create,
1047 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
1048 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
1049 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
1050 .crypto_adapter_start = dpaa2_eventdev_crypto_start,
1051 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
1055 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
1056 struct dpaa2_dpcon_dev *dpcon_dev)
1058 struct dpci_rx_queue_cfg rx_queue_cfg;
1061 /*Do settings to get the frame on a DPCON object*/
1062 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
1063 DPCI_QUEUE_OPT_USER_CTX;
1064 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
1065 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
1066 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
1068 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
1069 dpaa2_eventdev_process_parallel;
1070 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
1071 dpaa2_eventdev_process_atomic;
1073 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
1074 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
1075 ret = dpci_set_rx_queue(&dpci_dev->dpci,
1081 "DPCI Rx queue setup failed: err(%d)",
1090 dpaa2_eventdev_create(const char *name)
1092 struct rte_eventdev *eventdev;
1093 struct dpaa2_eventdev *priv;
1094 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1095 struct dpaa2_dpci_dev *dpci_dev = NULL;
1098 eventdev = rte_event_pmd_vdev_init(name,
1099 sizeof(struct dpaa2_eventdev),
1101 if (eventdev == NULL) {
1102 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1106 eventdev->dev_ops = &dpaa2_eventdev_ops;
1107 eventdev->enqueue = dpaa2_eventdev_enqueue;
1108 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1109 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1110 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1111 eventdev->dequeue = dpaa2_eventdev_dequeue;
1112 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1113 eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue;
1114 eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest;
1116 /* For secondary processes, the primary has done all the work */
1117 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1120 priv = eventdev->data->dev_private;
1121 priv->max_event_queues = 0;
1124 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1127 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1129 dpci_dev = rte_dpaa2_alloc_dpci_dev();
1131 rte_dpaa2_free_dpcon_dev(dpcon_dev);
1134 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1136 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1139 "DPCI setup failed: err(%d)", ret);
1142 priv->max_event_queues++;
1143 } while (dpcon_dev && dpci_dev);
1145 RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1148 event_dev_probing_finish(eventdev);
1155 dpaa2_eventdev_destroy(const char *name)
1157 struct rte_eventdev *eventdev;
1158 struct dpaa2_eventdev *priv;
1161 eventdev = rte_event_pmd_get_named_dev(name);
1162 if (eventdev == NULL) {
1163 RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
1167 /* For secondary processes, the primary has done all the work */
1168 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1171 priv = eventdev->data->dev_private;
1172 for (i = 0; i < priv->max_event_queues; i++) {
1173 if (priv->evq_info[i].dpcon)
1174 rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
1176 if (priv->evq_info[i].dpci)
1177 rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
1180 priv->max_event_queues = 0;
1182 RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
1188 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1192 name = rte_vdev_device_name(vdev);
1193 DPAA2_EVENTDEV_INFO("Initializing %s", name);
1194 return dpaa2_eventdev_create(name);
1198 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1202 name = rte_vdev_device_name(vdev);
1203 DPAA2_EVENTDEV_INFO("Closing %s", name);
1205 dpaa2_eventdev_destroy(name);
1207 return rte_event_pmd_vdev_uninit(name);
1210 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1211 .probe = dpaa2_eventdev_probe,
1212 .remove = dpaa2_eventdev_remove
1215 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1216 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE);