#include "test_perf_common.h"
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
static inline int
atq_nb_event_queues(struct evt_options *opt)
{
/* nb_queues = number of producers */
return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
- rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores);
+ rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_mark_fwd_latency(struct rte_event *const ev)
{
if (unlikely(ev->sub_event_type == 0)) {
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
const uint8_t nb_stages)
{
static int
perf_atq_worker(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
+ uint16_t enq = 0, deq = 0;
struct rte_event ev;
+ PERF_WORKER_INIT;
while (t->done == false) {
- uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+ deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (enable_fwd_latency)
- rte_prefetch0(ev.event_ptr);
-
- if (!event) {
+ if (!deq) {
rte_pause();
continue;
}
- if (enable_fwd_latency)
+ if (prod_crypto_type &&
+ (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev.event_ptr;
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (op->sym->m_dst == NULL)
+ ev.event_ptr = op->sym->m_src;
+ else
+ ev.event_ptr = op->sym->m_dst;
+ rte_crypto_op_free(op);
+ }
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
+ if (enable_fwd_latency && !prod_timer_type)
/* first stage in pipeline, mark ts to compute fwd latency */
atq_mark_fwd_latency(&ev);
bufs, sz, cnt);
} else {
atq_fwd_event(&ev, sched_type_list, nb_stages);
- while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
- rte_pause();
+ do {
+ enq = rte_event_enqueue_burst(dev, port, &ev,
+ 1);
+ } while (!enq && !t->done);
}
}
+
+ perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
+
return 0;
}
static int
perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
{
- PERF_WORKER_INIT;
- uint16_t i;
/* +1 to avoid prefetch out of array check */
struct rte_event ev[BURST_SIZE + 1];
+ uint16_t enq = 0, nb_rx = 0;
+ PERF_WORKER_INIT;
+ uint16_t i;
while (t->done == false) {
- uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
- BURST_SIZE, 0);
+ nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
if (!nb_rx) {
rte_pause();
}
for (i = 0; i < nb_rx; i++) {
- if (enable_fwd_latency) {
+ if (prod_crypto_type &&
+ (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
+ struct rte_crypto_op *op = ev[i].event_ptr;
+
+ if (op->status ==
+ RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (op->sym->m_dst == NULL)
+ ev[i].event_ptr =
+ op->sym->m_src;
+ else
+ ev[i].event_ptr =
+ op->sym->m_dst;
+ rte_crypto_op_free(op);
+ } else {
+ rte_crypto_op_free(op);
+ continue;
+ }
+ }
+
+ if (enable_fwd_latency && !prod_timer_type) {
rte_prefetch0(ev[i+1].event_ptr);
/* first stage in pipeline.
* mark time stamp to compute fwd latency
}
}
- uint16_t enq;
-
enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
- while (enq < nb_rx) {
+ while ((enq < nb_rx) && !t->done) {
enq += rte_event_enqueue_burst(dev, port,
ev + enq, nb_rx - enq);
}
}
+
+ perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
+
return 0;
}
uint8_t queue;
uint8_t nb_queues;
uint8_t nb_ports;
+ uint16_t prod;
struct rte_event_dev_info dev_info;
+ struct test_perf *t = evt_test_priv(test);
nb_ports = evt_nr_active_lcores(opt->wlcores);
- nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+ nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
evt_nr_active_lcores(opt->plcores);
nb_queues = atq_nb_event_queues(opt);
return ret;
}
- const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = dev_info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- dev_info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- dev_info.max_event_port_enqueue_depth,
- };
-
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
return ret;
}
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
+ prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+ printf("%s: Port[%d] using Rx adapter[%d] started\n",
+ __func__, prod, prod);
+ }
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
+ ret = rte_event_timer_adapter_start(
+ t->timer_adptr[prod]);
+ if (ret) {
+ evt_err("failed to Start event timer adapter %d"
+ , prod);
+ return ret;
+ }
+ }
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
+ uint8_t cdev_id, cdev_count;
+
+ cdev_count = rte_cryptodev_count();
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ ret = rte_cryptodev_start(cdev_id);
+ if (ret) {
+ evt_err("Failed to start cryptodev %u",
+ cdev_id);
+ return ret;
+ }
+ }
+ }
+
return 0;
}
.opt_dump = perf_atq_opt_dump,
.test_setup = perf_test_setup,
.ethdev_setup = perf_ethdev_setup,
+ .cryptodev_setup = perf_cryptodev_setup,
+ .ethdev_rx_stop = perf_ethdev_rx_stop,
.mempool_setup = perf_mempool_setup,
.eventdev_setup = perf_atq_eventdev_setup,
.launch_lcores = perf_atq_launch_lcores,
.eventdev_destroy = perf_eventdev_destroy,
.mempool_destroy = perf_mempool_destroy,
.ethdev_destroy = perf_ethdev_destroy,
+ .cryptodev_destroy = perf_cryptodev_destroy,
.test_result = perf_test_result,
.test_destroy = perf_test_destroy,
};