X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fdpaa%2Fdpaa_eventdev.c;h=a7d57edce7e3856a68a38c6e52326f5ae8aecbb2;hb=a2c6d3f34f9065a75fbbe70699610388e711ac6c;hp=1e247e4f4841231dcaabaaf7662c62da9bd54272;hpb=77b5311d0ece90b1c197c1dc0f38bc6dbdcc28a8;p=dpdk.git diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c index 1e247e4f48..a7d57edce7 100644 --- a/drivers/event/dpaa/dpaa_eventdev.c +++ b/drivers/event/dpaa/dpaa_eventdev.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 NXP + * Copyright 2017-2019 NXP */ #include @@ -27,12 +27,15 @@ #include #include #include +#include +#include #include #include #include #include #include +#include #include "dpaa_eventdev.h" #include @@ -322,7 +325,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev, EVENTDEV_INIT_FUNC_TRACE(); RTE_SET_USED(dev); - dev_info->driver_name = "event_dpaa"; + dev_info->driver_name = "event_dpaa1"; dev_info->min_dequeue_timeout_ns = DPAA_EVENT_MIN_DEQUEUE_TIMEOUT; dev_info->max_dequeue_timeout_ns = @@ -469,6 +472,7 @@ dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, RTE_SET_USED(queue_id); memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); + queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS; queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; } @@ -718,6 +722,209 @@ dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev, return 0; } +static int +dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + const char *name = cdev->data->name; + + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + + if (!strncmp(name, "dpaa_sec-", 9)) + *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP; + else + return -1; + + return 0; +} + +static int +dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + const struct rte_event *ev) +{ + struct dpaa_eventdev *priv = dev->data->dev_private; + uint8_t ev_qid = ev->queue_id; + u16 ch_id = priv->evq_info[ev_qid].ch_id; + int i, ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { + ret = dpaa_sec_eventq_attach(cryptodev, i, + ch_id, ev); + if (ret) { + DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n", + ret); + goto fail; + } + } + return 0; +fail: + for (i = (i - 1); i >= 0 ; i--) + dpaa_sec_eventq_detach(cryptodev, i); + + return ret; +} + +static int +dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + int32_t rx_queue_id, + const struct rte_event *ev) +{ + struct dpaa_eventdev *priv = dev->data->dev_private; + uint8_t ev_qid = ev->queue_id; + u16 ch_id = priv->evq_info[ev_qid].ch_id; + int ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + if (rx_queue_id == -1) + return dpaa_eventdev_crypto_queue_add_all(dev, + cryptodev, ev); + + ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id, + ch_id, ev); + if (ret) { + DPAA_EVENTDEV_ERR( + "dpaa_sec_eventq_attach failed: ret: %d\n", ret); + return ret; + } + return 0; +} + +static int +dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev) +{ + int i, ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + + for (i = 0; i < cdev->data->nb_queue_pairs; i++) { + ret = dpaa_sec_eventq_detach(cdev, i); + if (ret) { + DPAA_EVENTDEV_ERR( + "dpaa_sec_eventq_detach failed:ret %d\n", ret); + return ret; + } + } + + return 0; +} + +static int +dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev, + int32_t rx_queue_id) +{ + int ret; + + EVENTDEV_INIT_FUNC_TRACE(); + + if (rx_queue_id == -1) + return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev); + + ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id); + if (ret) { + DPAA_EVENTDEV_ERR( + "dpaa_sec_eventq_detach failed: ret: %d\n", ret); + return ret; + } + + return 0; +} + +static int +dpaa_eventdev_crypto_start(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev) +{ + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + RTE_SET_USED(cryptodev); + + return 0; +} + +static int +dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev, + const struct rte_cryptodev *cryptodev) +{ + EVENTDEV_INIT_FUNC_TRACE(); + + RTE_SET_USED(dev); + RTE_SET_USED(cryptodev); + + return 0; +} + +static int +dpaa_eventdev_tx_adapter_create(uint8_t id, + const struct rte_eventdev *dev) +{ + RTE_SET_USED(id); + RTE_SET_USED(dev); + + /* Nothing to do. Simply return. */ + return 0; +} + +static int +dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, + const struct rte_eth_dev *eth_dev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); + + *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; + return 0; +} + +static uint16_t +dpaa_eventdev_txa_enqueue_same_dest(void *port, + struct rte_event ev[], + uint16_t nb_events) +{ + struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; + uint8_t qid, i; + + RTE_SET_USED(port); + + m0 = (struct rte_mbuf *)ev[0].mbuf; + qid = rte_event_eth_tx_adapter_txq_get(m0); + + for (i = 0; i < nb_events; i++) + m[i] = (struct rte_mbuf *)ev[i].mbuf; + + return rte_eth_tx_burst(m0->port, qid, m, nb_events); +} + +static uint16_t +dpaa_eventdev_txa_enqueue(void *port, + struct rte_event ev[], + uint16_t nb_events) +{ + struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf; + uint8_t qid, i; + + RTE_SET_USED(port); + + for (i = 0; i < nb_events; i++) { + qid = rte_event_eth_tx_adapter_txq_get(m); + rte_eth_tx_burst(m->port, qid, &m, 1); + } + + return nb_events; +} + static struct rte_eventdev_ops dpaa_eventdev_ops = { .dev_infos_get = dpaa_event_dev_info_get, .dev_configure = dpaa_event_dev_configure, @@ -733,11 +940,18 @@ static struct rte_eventdev_ops dpaa_eventdev_ops = { .port_link = dpaa_event_port_link, .port_unlink = dpaa_event_port_unlink, .timeout_ticks = dpaa_event_dequeue_timeout_ticks, - .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get, - .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add, - .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del, - .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start, - .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop, + .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get, + .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add, + .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del, + .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start, + .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop, + .eth_tx_adapter_caps_get = dpaa_eventdev_tx_adapter_caps, + .eth_tx_adapter_create = dpaa_eventdev_tx_adapter_create, + .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get, + .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add, + .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del, + .crypto_adapter_start = dpaa_eventdev_crypto_start, + .crypto_adapter_stop = dpaa_eventdev_crypto_stop, }; static int flag_check_handler(__rte_unused const char *key, @@ -805,6 +1019,10 @@ dpaa_event_dev_create(const char *name, const char *params) eventdev->dequeue = dpaa_event_dequeue_intr; eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr; } + eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue; + eventdev->txa_enqueue_same_dest = dpaa_eventdev_txa_enqueue_same_dest; + + RTE_LOG(INFO, PMD, "%s eventdev added", name); /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY)