/* SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright 2017 NXP
- *
+ * Copyright 2017,2019 NXP
*/
#include <assert.h>
#include <rte_memory.h>
#include <rte_pci.h>
#include <rte_bus_vdev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_cryptodev.h>
+#include <ethdev_driver.h>
+#include <cryptodev_pmd.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
* Soft Event Flow is DPCI Instance
*/
-/* Dynamic logging identified for mempool */
-int dpaa2_logtype_event;
+#define DPAA2_EV_TX_RETRY_COUNT 10000
static uint16_t
dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
struct dpaa2_dpio_dev *dpio_dev;
uint32_t queue_id = ev[0].queue_id;
struct dpaa2_eventq *evq_info;
- uint32_t fqid;
+ uint32_t fqid, retry_count;
struct qbman_swp *swp;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
uint32_t loop, frames_to_send;
/* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
- && event->mbuf->seqn) {
- uint8_t dqrr_index = event->mbuf->seqn - 1;
+ && *dpaa2_seqn(event->mbuf)) {
+ uint8_t dqrr_index =
+ *dpaa2_seqn(event->mbuf) - 1;
qbman_eq_desc_set_dca(&eqdesc[loop], 1,
dqrr_index, 0);
}
send_partial:
loop = 0;
+ retry_count = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_enqueue_multiple_desc(swp,
+ ret = qbman_swp_enqueue_multiple_desc(swp,
&eqdesc[loop], &fd_arr[loop],
frames_to_send - loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
+ num_tx += loop;
+ nb_events -= loop;
+ return num_tx + loop;
+ }
+ } else {
+ loop += ret;
+ retry_count = 0;
+ }
}
- num_tx += frames_to_send;
- nb_events -= frames_to_send;
+ num_tx += loop;
+ nb_events -= loop;
}
return num_tx;
rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
rte_free(ev_temp);
- ev->mbuf->seqn = dqrr_index + 1;
+ *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
DPAA2_PER_LCORE_DQRR_SIZE++;
DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
/* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
qbman_swp_dqrr_idx_consume(swp, i);
DPAA2_PER_LCORE_DQRR_SIZE--;
- DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
+ *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) =
DPAA2_INVALID_MBUF_SEQN;
}
i++;
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
- /* we only support dpio upto number of cores*/
+ /* we only support dpio up to number of cores */
if (dev_info->max_event_ports > rte_lcore_count())
dev_info->max_event_ports = rte_lcore_count();
dev_info->max_event_port_dequeue_depth =
RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
+ RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
- RTE_SET_USED(queue_conf);
queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
- queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
- RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->nb_atomic_order_sequences =
+ DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
switch (queue_conf->schedule_type) {
case RTE_SCHED_TYPE_PARALLEL:
case RTE_SCHED_TYPE_ATOMIC:
- break;
case RTE_SCHED_TYPE_ORDERED:
+ break;
+ default:
DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
return -1;
}
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
port_conf->enqueue_depth =
DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
- port_conf->disable_implicit_release = 0;
+ port_conf->event_port_cfg = 0;
}
static int
EVENTDEV_INIT_FUNC_TRACE();
+ if (portal == NULL)
+ return;
+
/* TODO: Cleanup is required when ports are in linked state. */
if (portal->is_port_linked)
DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
- if (portal)
- rte_free(portal);
-
- portal = NULL;
+ rte_free(portal);
}
static int
EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
- *timeout_ticks = ns * scale;
+ *timeout_ticks = ns / scale;
return 0;
}
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = queue_conf->ev.queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int i, ret;
EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_attach(eth_dev, i,
- dpcon_id, queue_conf);
+ dpcon, queue_conf);
if (ret) {
DPAA2_EVENTDEV_ERR(
"Event queue attach failed: err(%d)", ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = queue_conf->ev.queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int ret;
EVENTDEV_INIT_FUNC_TRACE();
eth_dev, queue_conf);
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
- dpcon_id, queue_conf);
+ dpcon, queue_conf);
if (ret) {
DPAA2_EVENTDEV_ERR(
"Event queue attach failed: err(%d)", ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = ev->queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int i, ret;
EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
- ret = dpaa2_sec_eventq_attach(cryptodev, i,
- dpcon_id, ev);
+ ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
if (ret) {
DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = ev->queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int ret;
EVENTDEV_INIT_FUNC_TRACE();
cryptodev, ev);
ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
- dpcon_id, ev);
+ dpcon, ev);
if (ret) {
DPAA2_EVENTDEV_ERR(
"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
return 0;
}
-static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static int
+dpaa2_eventdev_tx_adapter_create(uint8_t id,
+ const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+
+ /* Nothing to do. Simply return. */
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ return 0;
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue_same_dest(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ m0 = (struct rte_mbuf *)ev[0].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m0);
+
+ for (i = 0; i < nb_events; i++)
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+
+ return rte_eth_tx_burst(m0->port, qid, m, nb_events);
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ for (i = 0; i < nb_events; i++) {
+ qid = rte_event_eth_tx_adapter_txq_get(m);
+ rte_eth_tx_burst(m->port, qid, &m, 1);
+ }
+
+ return nb_events;
+}
+
+static struct eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
.dev_start = dpaa2_eventdev_start,
.port_unlink = dpaa2_eventdev_port_unlink,
.timeout_ticks = dpaa2_eventdev_timeout_ticks,
.dump = dpaa2_eventdev_dump,
- .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
- .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
- .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
- .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
- .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .dev_selftest = test_eventdev_dpaa2,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps,
+ .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create,
.crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
.crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
.crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
eventdev->dequeue = dpaa2_eventdev_dequeue;
eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+ eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue;
+ eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
+ goto done;
priv = eventdev->data->dev_private;
priv->max_event_queues = 0;
RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
+done:
+ event_dev_probing_finish(eventdev);
return 0;
fail:
return -EFAULT;
}
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa2_eventdev *priv;
+ int i;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL) {
+ RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+ return -1;
+ }
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ for (i = 0; i < priv->max_event_queues; i++) {
+ if (priv->evq_info[i].dpcon)
+ rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+ if (priv->evq_info[i].dpci)
+ rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+ }
+ priv->max_event_queues = 0;
+
+ RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+ return 0;
+}
+
+
static int
dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
{
name = rte_vdev_device_name(vdev);
DPAA2_EVENTDEV_INFO("Closing %s", name);
+ dpaa2_eventdev_destroy(name);
+
return rte_event_pmd_vdev_uninit(name);
}
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
-
-RTE_INIT(dpaa2_eventdev_init_log)
-{
- dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
- if (dpaa2_logtype_event >= 0)
- rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE);