6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of NXP nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/epoll.h>
42 #include <rte_atomic.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_debug.h>
48 #include <rte_fslmc.h>
49 #include <rte_lcore.h>
51 #include <rte_malloc.h>
52 #include <rte_memcpy.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
58 #include <fslmc_vfio.h>
59 #include <dpaa2_hw_pvt.h>
60 #include <dpaa2_hw_mempool.h>
61 #include <dpaa2_hw_dpio.h>
62 #include "dpaa2_eventdev.h"
63 #include <portal/dpaa2_hw_pvt.h>
64 #include <mc/fsl_dpci.h>
67 * Evendev = SoC Instance
68 * Eventport = DPIO Instance
69 * Eventqueue = DPCON Instance
70 * 1 Eventdev can have N Eventqueue
71 * Soft Event Flow is DPCI Instance
75 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
78 struct rte_eventdev *ev_dev =
79 ((struct dpaa2_io_portal_t *)port)->eventdev;
80 struct dpaa2_eventdev *priv = ev_dev->data->dev_private;
81 uint32_t queue_id = ev[0].queue_id;
82 struct evq_info_t *evq_info = &priv->evq_info[queue_id];
84 struct qbman_swp *swp;
85 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
86 uint32_t loop, frames_to_send;
87 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
93 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
94 ret = dpaa2_affine_qbman_swp();
96 PMD_DRV_LOG(ERR, PMD, "Failure in affining portal\n");
101 swp = DPAA2_PER_LCORE_PORTAL;
104 frames_to_send = (nb_events >> 3) ?
105 MAX_TX_RING_SLOTS : nb_events;
107 for (loop = 0; loop < frames_to_send; loop++) {
108 const struct rte_event *event = &ev[num_tx + loop];
110 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
111 fqid = evq_info->dpci->queue[
112 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
114 fqid = evq_info->dpci->queue[
115 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
117 /* Prepare enqueue descriptor */
118 qbman_eq_desc_clear(&eqdesc[loop]);
119 qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
120 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
121 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
123 if (event->impl_opaque) {
124 uint8_t dqrr_index = event->impl_opaque - 1;
126 qbman_eq_desc_set_dca(&eqdesc[loop], 1,
128 DPAA2_PER_LCORE_DPIO->dqrr_size--;
129 DPAA2_PER_LCORE_DPIO->dqrr_held &=
133 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
136 * todo - need to align with hw context data
139 struct rte_event *ev_temp = rte_malloc(NULL,
140 sizeof(struct rte_event), 0);
141 rte_memcpy(ev_temp, event, sizeof(struct rte_event));
142 DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
143 DPAA2_SET_FD_LEN((&fd_arr[loop]),
144 sizeof(struct rte_event));
147 while (loop < frames_to_send) {
148 loop += qbman_swp_enqueue_multiple_eqdesc(swp,
149 &eqdesc[loop], &fd_arr[loop],
150 frames_to_send - loop);
152 num_tx += frames_to_send;
153 nb_events -= frames_to_send;
160 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
162 return dpaa2_eventdev_enqueue_burst(port, ev, 1);
165 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
166 const struct qbman_fd *fd,
167 const struct qbman_result *dq,
168 struct rte_event *ev)
170 struct rte_event *ev_temp =
171 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
172 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
175 qbman_swp_dqrr_consume(swp, dq);
178 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
179 const struct qbman_fd *fd,
180 const struct qbman_result *dq,
181 struct rte_event *ev)
183 struct rte_event *ev_temp =
184 (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
185 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
189 rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
191 ev->impl_opaque = dqrr_index + 1;
192 DPAA2_PER_LCORE_DPIO->dqrr_size++;
193 DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
197 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
198 uint16_t nb_events, uint64_t timeout_ticks)
200 const struct qbman_result *dq;
201 struct qbman_swp *swp;
202 const struct qbman_fd *fd;
203 struct dpaa2_queue *rxq;
204 int num_pkts = 0, ret, i = 0;
207 RTE_SET_USED(timeout_ticks);
209 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
210 ret = dpaa2_affine_qbman_swp();
212 PMD_DRV_LOG(ERR, PMD, "Failure in affining portal\n");
217 swp = DPAA2_PER_LCORE_PORTAL;
219 /* Check if there are atomic contexts to be released */
220 while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
221 if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
222 dq = qbman_get_dqrr_from_idx(swp, i);
223 qbman_swp_dqrr_consume(swp, dq);
224 DPAA2_PER_LCORE_DPIO->dqrr_size--;
228 DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
231 dq = qbman_swp_dqrr_next(swp);
235 fd = qbman_result_DQ_fd(dq);
237 rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
239 rxq->cb(swp, fd, dq, &ev[num_pkts]);
241 qbman_swp_dqrr_consume(swp, dq);
242 PMD_DRV_LOG(ERR, PMD, "Null Return VQ received\n");
247 } while (num_pkts < nb_events);
253 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
254 uint64_t timeout_ticks)
256 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
260 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
261 struct rte_event_dev_info *dev_info)
263 struct dpaa2_eventdev *priv = dev->data->dev_private;
265 PMD_DRV_FUNC_TRACE();
269 memset(dev_info, 0, sizeof(struct rte_event_dev_info));
270 dev_info->min_dequeue_timeout_ns =
271 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
272 dev_info->max_dequeue_timeout_ns =
273 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
274 dev_info->dequeue_timeout_ns =
275 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
276 dev_info->max_event_queues = priv->max_event_queues;
277 dev_info->max_event_queue_flows =
278 DPAA2_EVENT_MAX_QUEUE_FLOWS;
279 dev_info->max_event_queue_priority_levels =
280 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
281 dev_info->max_event_priority_levels =
282 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
283 dev_info->max_event_ports = RTE_MAX_LCORE;
284 dev_info->max_event_port_dequeue_depth =
285 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
286 dev_info->max_event_port_enqueue_depth =
287 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
288 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
289 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED;
293 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
295 struct dpaa2_eventdev *priv = dev->data->dev_private;
296 struct rte_event_dev_config *conf = &dev->data->dev_conf;
298 PMD_DRV_FUNC_TRACE();
300 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
301 priv->nb_event_queues = conf->nb_event_queues;
302 priv->nb_event_ports = conf->nb_event_ports;
303 priv->nb_event_queue_flows = conf->nb_event_queue_flows;
304 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
305 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
306 priv->event_dev_cfg = conf->event_dev_cfg;
308 PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
313 dpaa2_eventdev_start(struct rte_eventdev *dev)
315 PMD_DRV_FUNC_TRACE();
323 dpaa2_eventdev_stop(struct rte_eventdev *dev)
325 PMD_DRV_FUNC_TRACE();
331 dpaa2_eventdev_close(struct rte_eventdev *dev)
333 PMD_DRV_FUNC_TRACE();
341 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
342 struct rte_event_queue_conf *queue_conf)
344 PMD_DRV_FUNC_TRACE();
347 RTE_SET_USED(queue_id);
348 RTE_SET_USED(queue_conf);
350 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
351 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
352 RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
353 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
357 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
359 PMD_DRV_FUNC_TRACE();
362 RTE_SET_USED(queue_id);
366 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
367 const struct rte_event_queue_conf *queue_conf)
369 struct dpaa2_eventdev *priv = dev->data->dev_private;
370 struct evq_info_t *evq_info =
371 &priv->evq_info[queue_id];
373 PMD_DRV_FUNC_TRACE();
375 evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
381 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
382 struct rte_event_port_conf *port_conf)
384 PMD_DRV_FUNC_TRACE();
387 RTE_SET_USED(port_id);
388 RTE_SET_USED(port_conf);
390 port_conf->new_event_threshold =
391 DPAA2_EVENT_MAX_NUM_EVENTS;
392 port_conf->dequeue_depth =
393 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
394 port_conf->enqueue_depth =
395 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
399 dpaa2_eventdev_port_release(void *port)
401 PMD_DRV_FUNC_TRACE();
407 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
408 const struct rte_event_port_conf *port_conf)
410 PMD_DRV_FUNC_TRACE();
412 RTE_SET_USED(port_conf);
414 if (!dpaa2_io_portal[port_id].dpio_dev) {
415 dpaa2_io_portal[port_id].dpio_dev =
416 dpaa2_get_qbman_swp(port_id);
417 rte_atomic16_inc(&dpaa2_io_portal[port_id].dpio_dev->ref_count);
418 if (!dpaa2_io_portal[port_id].dpio_dev)
422 dpaa2_io_portal[port_id].eventdev = dev;
423 dev->data->ports[port_id] = &dpaa2_io_portal[port_id];
428 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
429 uint8_t queues[], uint16_t nb_unlinks)
431 struct dpaa2_eventdev *priv = dev->data->dev_private;
432 struct dpaa2_io_portal_t *dpaa2_portal = port;
433 struct evq_info_t *evq_info;
436 PMD_DRV_FUNC_TRACE();
438 for (i = 0; i < nb_unlinks; i++) {
439 evq_info = &priv->evq_info[queues[i]];
440 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
441 evq_info->dpcon->channel_index, 0);
442 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
443 0, dpaa2_portal->dpio_dev->token,
444 evq_info->dpcon->dpcon_id);
448 return (int)nb_unlinks;
452 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
453 const uint8_t queues[], const uint8_t priorities[],
456 struct dpaa2_eventdev *priv = dev->data->dev_private;
457 struct dpaa2_io_portal_t *dpaa2_portal = port;
458 struct evq_info_t *evq_info;
459 uint8_t channel_index;
462 PMD_DRV_FUNC_TRACE();
464 for (i = 0; i < nb_links; i++) {
465 evq_info = &priv->evq_info[queues[i]];
469 ret = dpio_add_static_dequeue_channel(
470 dpaa2_portal->dpio_dev->dpio,
471 CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
472 evq_info->dpcon->dpcon_id, &channel_index);
474 PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
479 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
481 evq_info->dpcon->channel_index = channel_index;
485 RTE_SET_USED(priorities);
487 return (int)nb_links;
489 for (n = 0; n < i; n++) {
490 evq_info = &priv->evq_info[queues[n]];
491 qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
492 evq_info->dpcon->channel_index, 0);
493 dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
494 0, dpaa2_portal->dpio_dev->token,
495 evq_info->dpcon->dpcon_id);
502 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
503 uint64_t *timeout_ticks)
507 PMD_DRV_FUNC_TRACE();
510 *timeout_ticks = ns * scale;
516 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
518 PMD_DRV_FUNC_TRACE();
524 static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
525 .dev_infos_get = dpaa2_eventdev_info_get,
526 .dev_configure = dpaa2_eventdev_configure,
527 .dev_start = dpaa2_eventdev_start,
528 .dev_stop = dpaa2_eventdev_stop,
529 .dev_close = dpaa2_eventdev_close,
530 .queue_def_conf = dpaa2_eventdev_queue_def_conf,
531 .queue_setup = dpaa2_eventdev_queue_setup,
532 .queue_release = dpaa2_eventdev_queue_release,
533 .port_def_conf = dpaa2_eventdev_port_def_conf,
534 .port_setup = dpaa2_eventdev_port_setup,
535 .port_release = dpaa2_eventdev_port_release,
536 .port_link = dpaa2_eventdev_port_link,
537 .port_unlink = dpaa2_eventdev_port_unlink,
538 .timeout_ticks = dpaa2_eventdev_timeout_ticks,
539 .dump = dpaa2_eventdev_dump
543 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
544 struct dpaa2_dpcon_dev *dpcon_dev)
546 struct dpci_rx_queue_cfg rx_queue_cfg;
549 /*Do settings to get the frame on a DPCON object*/
550 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
551 DPCI_QUEUE_OPT_USER_CTX;
552 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
553 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
554 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
556 dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
557 dpaa2_eventdev_process_parallel;
558 dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
559 dpaa2_eventdev_process_atomic;
561 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
562 rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
563 ret = dpci_set_rx_queue(&dpci_dev->dpci,
568 PMD_DRV_LOG(ERR, PMD,
569 "set_rx_q failed with err code: %d", ret);
577 dpaa2_eventdev_create(const char *name)
579 struct rte_eventdev *eventdev;
580 struct dpaa2_eventdev *priv;
581 struct dpaa2_dpcon_dev *dpcon_dev = NULL;
582 struct dpaa2_dpci_dev *dpci_dev = NULL;
585 eventdev = rte_event_pmd_vdev_init(name,
586 sizeof(struct dpaa2_eventdev),
588 if (eventdev == NULL) {
589 PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
593 eventdev->dev_ops = &dpaa2_eventdev_ops;
594 eventdev->schedule = NULL;
595 eventdev->enqueue = dpaa2_eventdev_enqueue;
596 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
597 eventdev->dequeue = dpaa2_eventdev_dequeue;
598 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
600 /* For secondary processes, the primary has done all the work */
601 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
604 priv = eventdev->data->dev_private;
605 priv->max_event_queues = 0;
608 dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
611 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
613 dpci_dev = rte_dpaa2_alloc_dpci_dev();
615 rte_dpaa2_free_dpcon_dev(dpcon_dev);
618 priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
620 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
622 PMD_DRV_LOG(ERR, PMD,
623 "dpci setup failed with err code: %d", ret);
626 priv->max_event_queues++;
627 } while (dpcon_dev && dpci_dev);
635 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
639 name = rte_vdev_device_name(vdev);
640 PMD_DRV_LOG(INFO, PMD, "Initializing %s\n", name);
641 return dpaa2_eventdev_create(name);
645 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
649 name = rte_vdev_device_name(vdev);
650 PMD_DRV_LOG(INFO, "Closing %s", name);
652 return rte_event_pmd_vdev_uninit(name);
655 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
656 .probe = dpaa2_eventdev_probe,
657 .remove = dpaa2_eventdev_remove
660 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);