X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fdpaa2%2Fdpaa2_eventdev.c;h=1d3ad8ffd659a82d5359a58f6df769f6e0725ed7;hb=918fd2f1466b0e3b21a033df7012a77a83665582;hp=8d168b0287f9a108449363f846acc3f59c486c57;hpb=e7b3b13ba930254ce07f84a57e578325e79fe5fa;p=dpdk.git diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c index 8d168b0287..1d3ad8ffd6 100644 --- a/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/drivers/event/dpaa2/dpaa2_eventdev.c @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright 2017 NXP - * + * Copyright 2017,2019 NXP */ #include @@ -26,18 +24,17 @@ #include #include #include -#include +#include #include #include +#include #include #include #include #include #include -#ifdef RTE_LIBRTE_SECURITY #include -#endif #include "dpaa2_eventdev.h" #include "dpaa2_eventdev_logs.h" #include @@ -51,8 +48,7 @@ * Soft Event Flow is DPCI Instance */ -/* Dynamic logging identified for mempool */ -int dpaa2_logtype_event; +#define DPAA2_EV_TX_RETRY_COUNT 10000 static uint16_t dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], @@ -63,7 +59,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], struct dpaa2_dpio_dev *dpio_dev; uint32_t queue_id = ev[0].queue_id; struct dpaa2_eventq *evq_info; - uint32_t fqid; + uint32_t fqid, retry_count; struct qbman_swp *swp; struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; uint32_t loop, frames_to_send; @@ -76,7 +72,9 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], /* Affine current thread context to a qman portal */ ret = dpaa2_affine_qbman_swp(); if (ret < 0) { - DPAA2_EVENTDEV_ERR("Failure in affining portal"); + DPAA2_EVENTDEV_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return 0; } } @@ -133,8 +131,9 @@ skip_linking: qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); if (event->sched_type == RTE_SCHED_TYPE_ATOMIC - && event->mbuf->seqn) { - uint8_t dqrr_index = event->mbuf->seqn - 1; + && *dpaa2_seqn(event->mbuf)) { + uint8_t dqrr_index = + *dpaa2_seqn(event->mbuf) - 1; qbman_eq_desc_set_dca(&eqdesc[loop], 1, dqrr_index, 0); @@ -166,13 +165,25 @@ skip_linking: } send_partial: loop = 0; + retry_count = 0; while (loop < frames_to_send) { - loop += qbman_swp_enqueue_multiple_desc(swp, + ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], &fd_arr[loop], frames_to_send - loop); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_EV_TX_RETRY_COUNT) { + num_tx += loop; + nb_events -= loop; + return num_tx + loop; + } + } else { + loop += ret; + retry_count = 0; + } } - num_tx += frames_to_send; - nb_events -= frames_to_send; + num_tx += loop; + nb_events -= loop; } return num_tx; @@ -239,7 +250,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); rte_free(ev_temp); - ev->mbuf->seqn = dqrr_index + 1; + *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; @@ -263,7 +274,9 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], /* Affine current thread context to a qman portal */ ret = dpaa2_affine_qbman_swp(); if (ret < 0) { - DPAA2_EVENTDEV_ERR("Failure in affining portal"); + DPAA2_EVENTDEV_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return 0; } } @@ -302,7 +315,7 @@ skip_linking: if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { qbman_swp_dqrr_idx_consume(swp, i); DPAA2_PER_LCORE_DQRR_SIZE--; - DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn = + *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) = DPAA2_INVALID_MBUF_SEQN; } i++; @@ -381,7 +394,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev, dev_info->max_event_priority_levels = DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); - /* we only support dpio upto number of cores*/ + /* we only support dpio up to number of cores */ if (dev_info->max_event_ports > rte_lcore_count()) dev_info->max_event_ports = rte_lcore_count(); dev_info->max_event_port_dequeue_depth = @@ -393,7 +406,9 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev, RTE_EVENT_DEV_CAP_BURST_MODE| RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | - RTE_EVENT_DEV_CAP_NONSEQ_MODE; + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; } @@ -467,11 +482,11 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, RTE_SET_USED(dev); RTE_SET_USED(queue_id); - RTE_SET_USED(queue_conf); queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; - queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC | - RTE_SCHED_TYPE_PARALLEL; + queue_conf->nb_atomic_order_sequences = + DPAA2_EVENT_QUEUE_ORDER_SEQUENCES; + queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; } @@ -487,8 +502,9 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, switch (queue_conf->schedule_type) { case RTE_SCHED_TYPE_PARALLEL: case RTE_SCHED_TYPE_ATOMIC: - break; case RTE_SCHED_TYPE_ORDERED: + break; + default: DPAA2_EVENTDEV_ERR("Schedule type is not supported."); return -1; } @@ -522,7 +538,7 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; port_conf->enqueue_depth = DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; - port_conf->disable_implicit_release = 0; + port_conf->event_port_cfg = 0; } static int @@ -555,14 +571,14 @@ dpaa2_eventdev_port_release(void *port) EVENTDEV_INIT_FUNC_TRACE(); + if (portal == NULL) + return; + /* TODO: Cleanup is required when ports are in linked state. */ if (portal->is_port_linked) DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); - if (portal) - rte_free(portal); - - portal = NULL; + rte_free(portal); } static int @@ -640,7 +656,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, EVENTDEV_INIT_FUNC_TRACE(); RTE_SET_USED(dev); - *timeout_ticks = ns * scale; + *timeout_ticks = ns / scale; return 0; } @@ -680,14 +696,14 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, { struct dpaa2_eventdev *priv = dev->data->dev_private; uint8_t ev_qid = queue_conf->ev.queue_id; - uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; int i, ret; EVENTDEV_INIT_FUNC_TRACE(); for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { ret = dpaa2_eth_eventq_attach(eth_dev, i, - dpcon_id, queue_conf); + dpcon, queue_conf); if (ret) { DPAA2_EVENTDEV_ERR( "Event queue attach failed: err(%d)", ret); @@ -710,7 +726,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, { struct dpaa2_eventdev *priv = dev->data->dev_private; uint8_t ev_qid = queue_conf->ev.queue_id; - uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; int ret; EVENTDEV_INIT_FUNC_TRACE(); @@ -720,7 +736,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, eth_dev, queue_conf); ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, - dpcon_id, queue_conf); + dpcon, queue_conf); if (ret) { DPAA2_EVENTDEV_ERR( "Event queue attach failed: err(%d)", ret); @@ -797,7 +813,6 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, return 0; } -#ifdef RTE_LIBRTE_SECURITY static int dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev, @@ -824,14 +839,13 @@ dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, { struct dpaa2_eventdev *priv = dev->data->dev_private; uint8_t ev_qid = ev->queue_id; - uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; int i, ret; EVENTDEV_INIT_FUNC_TRACE(); for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { - ret = dpaa2_sec_eventq_attach(cryptodev, i, - dpcon_id, ev); + ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev); if (ret) { DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", ret); @@ -854,7 +868,7 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, { struct dpaa2_eventdev *priv = dev->data->dev_private; uint8_t ev_qid = ev->queue_id; - uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; + struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; int ret; EVENTDEV_INIT_FUNC_TRACE(); @@ -864,7 +878,7 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, cryptodev, ev); ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, - dpcon_id, ev); + dpcon, ev); if (ret) { DPAA2_EVENTDEV_ERR( "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); @@ -940,9 +954,68 @@ dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, return 0; } -#endif -static struct rte_eventdev_ops dpaa2_eventdev_ops = { +static int +dpaa2_eventdev_tx_adapter_create(uint8_t id, + const struct rte_eventdev *dev) +{ + RTE_SET_USED(id); + RTE_SET_USED(dev); + + /* Nothing to do. Simply return. */ + return 0; +} + +static int +dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, + const struct rte_eth_dev *eth_dev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); + + *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; + return 0; +} + +static uint16_t +dpaa2_eventdev_txa_enqueue_same_dest(void *port, + struct rte_event ev[], + uint16_t nb_events) +{ + struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; + uint8_t qid, i; + + RTE_SET_USED(port); + + m0 = (struct rte_mbuf *)ev[0].mbuf; + qid = rte_event_eth_tx_adapter_txq_get(m0); + + for (i = 0; i < nb_events; i++) + m[i] = (struct rte_mbuf *)ev[i].mbuf; + + return rte_eth_tx_burst(m0->port, qid, m, nb_events); +} + +static uint16_t +dpaa2_eventdev_txa_enqueue(void *port, + struct rte_event ev[], + uint16_t nb_events) +{ + struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf; + uint8_t qid, i; + + RTE_SET_USED(port); + + for (i = 0; i < nb_events; i++) { + qid = rte_event_eth_tx_adapter_txq_get(m); + rte_eth_tx_burst(m->port, qid, &m, 1); + } + + return nb_events; +} + +static struct eventdev_ops dpaa2_eventdev_ops = { .dev_infos_get = dpaa2_eventdev_info_get, .dev_configure = dpaa2_eventdev_configure, .dev_start = dpaa2_eventdev_start, @@ -958,18 +1031,19 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = { .port_unlink = dpaa2_eventdev_port_unlink, .timeout_ticks = dpaa2_eventdev_timeout_ticks, .dump = dpaa2_eventdev_dump, - .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, - .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, - .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, - .eth_rx_adapter_start = dpaa2_eventdev_eth_start, - .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, -#ifdef RTE_LIBRTE_SECURITY + .dev_selftest = test_eventdev_dpaa2, + .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, + .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, + .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, + .eth_rx_adapter_start = dpaa2_eventdev_eth_start, + .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, + .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps, + .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create, .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, .crypto_adapter_start = dpaa2_eventdev_crypto_start, .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, -#endif }; static int @@ -1031,10 +1105,12 @@ dpaa2_eventdev_create(const char *name) eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; eventdev->dequeue = dpaa2_eventdev_dequeue; eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; + eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue; + eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest; /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; + goto done; priv = eventdev->data->dev_private; priv->max_event_queues = 0; @@ -1063,11 +1139,46 @@ dpaa2_eventdev_create(const char *name) RTE_LOG(INFO, PMD, "%s eventdev created\n", name); +done: + event_dev_probing_finish(eventdev); return 0; fail: return -EFAULT; } +static int +dpaa2_eventdev_destroy(const char *name) +{ + struct rte_eventdev *eventdev; + struct dpaa2_eventdev *priv; + int i; + + eventdev = rte_event_pmd_get_named_dev(name); + if (eventdev == NULL) { + RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name); + return -1; + } + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + priv = eventdev->data->dev_private; + for (i = 0; i < priv->max_event_queues; i++) { + if (priv->evq_info[i].dpcon) + rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon); + + if (priv->evq_info[i].dpci) + rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci); + + } + priv->max_event_queues = 0; + + RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name); + return 0; +} + + static int dpaa2_eventdev_probe(struct rte_vdev_device *vdev) { @@ -1086,6 +1197,8 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev) name = rte_vdev_device_name(vdev); DPAA2_EVENTDEV_INFO("Closing %s", name); + dpaa2_eventdev_destroy(name); + return rte_event_pmd_vdev_uninit(name); } @@ -1095,10 +1208,4 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { }; RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); - -RTE_INIT(dpaa2_eventdev_init_log) -{ - dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2"); - if (dpaa2_logtype_event >= 0) - rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE);