/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 NXP
+ * Copyright 2017-2019 NXP
*/
#include <assert.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_ethdev.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_cryptodev.h>
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <dpaa_ethdev.h>
+#include <dpaa_sec_event.h>
#include "dpaa_eventdev.h"
#include <dpaa_mempool.h>
* Eventqueue = Channel Instance
* 1 Eventdev can have N Eventqueue
*/
+RTE_LOG_REGISTER(dpaa_logtype_eventdev, pmd.event.dpaa, NOTICE);
#define DISABLE_INTR_MODE "disable_intr"
int ret;
u16 ch_id;
void *buffers[8];
- u32 num_frames, i, irq = 0;
+ u32 num_frames, i;
uint64_t cur_ticks = 0, wait_time_ticks = 0;
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
do {
/* Lets dequeue the frames */
num_frames = qman_portal_dequeue(ev, nb_events, buffers);
- if (irq)
- irq = 0;
if (num_frames)
break;
cur_ticks = rte_get_timer_cycles();
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
- dev_info->driver_name = "event_dpaa";
+ dev_info->driver_name = "event_dpaa1";
dev_info->min_dequeue_timeout_ns =
DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
dev_info->max_dequeue_timeout_ns =
RTE_SET_USED(queue_id);
memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
}
return 0;
}
+static int
+dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ const char *name = cdev->data->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strncmp(name, "dpaa_sec-", 9))
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ const struct rte_event *ev)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ u16 ch_id = priv->evq_info[ev_qid].ch_id;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
+ ret = dpaa_sec_eventq_attach(cryptodev, i,
+ ch_id, ev);
+ if (ret) {
+ DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
+ ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa_sec_eventq_detach(cryptodev, i);
+
+ return ret;
+}
+
+static int
+dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id,
+ const struct rte_event *ev)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ u16 ch_id = priv->evq_info[ev_qid].ch_id;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa_eventdev_crypto_queue_add_all(dev,
+ cryptodev, ev);
+
+ ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
+ ch_id, ev);
+ if (ret) {
+ DPAA_EVENTDEV_ERR(
+ "dpaa_sec_eventq_attach failed: ret: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
+ ret = dpaa_sec_eventq_detach(cdev, i);
+ if (ret) {
+ DPAA_EVENTDEV_ERR(
+ "dpaa_sec_eventq_detach failed:ret %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
+
+ ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
+ if (ret) {
+ DPAA_EVENTDEV_ERR(
+ "dpaa_sec_eventq_detach failed: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa_eventdev_tx_adapter_create(uint8_t id,
+ const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+
+ /* Nothing to do. Simply return. */
+ return 0;
+}
+
+static int
+dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ return 0;
+}
+
+static uint16_t
+dpaa_eventdev_txa_enqueue_same_dest(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ m0 = (struct rte_mbuf *)ev[0].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m0);
+
+ for (i = 0; i < nb_events; i++)
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+
+ return rte_eth_tx_burst(m0->port, qid, m, nb_events);
+}
+
+static uint16_t
+dpaa_eventdev_txa_enqueue(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ for (i = 0; i < nb_events; i++) {
+ qid = rte_event_eth_tx_adapter_txq_get(m);
+ rte_eth_tx_burst(m->port, qid, &m, 1);
+ }
+
+ return nb_events;
+}
+
static struct rte_eventdev_ops dpaa_eventdev_ops = {
.dev_infos_get = dpaa_event_dev_info_get,
.dev_configure = dpaa_event_dev_configure,
.port_link = dpaa_event_port_link,
.port_unlink = dpaa_event_port_unlink,
.timeout_ticks = dpaa_event_dequeue_timeout_ticks,
- .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
- .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
- .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
- .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
- .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
+ .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
+ .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
+ .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
+ .eth_tx_adapter_caps_get = dpaa_eventdev_tx_adapter_caps,
+ .eth_tx_adapter_create = dpaa_eventdev_tx_adapter_create,
+ .crypto_adapter_caps_get = dpaa_eventdev_crypto_caps_get,
+ .crypto_adapter_queue_pair_add = dpaa_eventdev_crypto_queue_add,
+ .crypto_adapter_queue_pair_del = dpaa_eventdev_crypto_queue_del,
+ .crypto_adapter_start = dpaa_eventdev_crypto_start,
+ .crypto_adapter_stop = dpaa_eventdev_crypto_stop,
};
static int flag_check_handler(__rte_unused const char *key,
eventdev->dequeue = dpaa_event_dequeue_intr;
eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
}
+ eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
+ eventdev->txa_enqueue_same_dest = dpaa_eventdev_txa_enqueue_same_dest;
+
+ RTE_LOG(INFO, PMD, "%s eventdev added", name);
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)