#include <rte_ethdev_driver.h>
#include <rte_cryptodev.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
* Soft Event Flow is DPCI Instance
*/
-/* Dynamic logging identified for mempool */
-int dpaa2_logtype_event;
#define DPAA2_EV_TX_RETRY_COUNT 10000
static uint16_t
/* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
/* Affine current thread context to a qman portal */
ret = dpaa2_affine_qbman_swp();
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ DPAA2_EVENTDEV_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
- /* we only support dpio upto number of cores*/
+ /* we only support dpio up to number of cores */
if (dev_info->max_event_ports > rte_lcore_count())
dev_info->max_event_ports = rte_lcore_count();
dev_info->max_event_port_dequeue_depth =
RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
- RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
}
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
- RTE_SET_USED(queue_conf);
queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->nb_atomic_order_sequences =
+ DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
switch (queue_conf->schedule_type) {
case RTE_SCHED_TYPE_PARALLEL:
case RTE_SCHED_TYPE_ATOMIC:
- break;
case RTE_SCHED_TYPE_ORDERED:
+ break;
+ default:
DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
return -1;
}
return 0;
}
+static int
+dpaa2_eventdev_tx_adapter_create(uint8_t id,
+ const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+
+ /* Nothing to do. Simply return. */
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ return 0;
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue_same_dest(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ m0 = (struct rte_mbuf *)ev[0].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m0);
+
+ for (i = 0; i < nb_events; i++)
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+
+ return rte_eth_tx_burst(m0->port, qid, m, nb_events);
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ for (i = 0; i < nb_events; i++) {
+ qid = rte_event_eth_tx_adapter_txq_get(m);
+ rte_eth_tx_burst(m->port, qid, &m, 1);
+ }
+
+ return nb_events;
+}
+
static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
.timeout_ticks = dpaa2_eventdev_timeout_ticks,
.dump = dpaa2_eventdev_dump,
.dev_selftest = test_eventdev_dpaa2,
- .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
- .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
- .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
- .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
- .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps,
+ .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create,
.crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
.crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
.crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
eventdev->dequeue = dpaa2_eventdev_dequeue;
eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
+ eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue;
+ eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
-
-RTE_INIT(dpaa2_eventdev_init_log)
-{
- dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
- if (dpaa2_logtype_event >= 0)
- rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_event, pmd.event.dpaa2, NOTICE);