{
int ret;
uint8_t queue;
+ uint8_t nb_queues;
+ uint8_t nb_ports;
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+ evt_nr_active_lcores(opt->plcores);
+
+ nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+ rte_eth_dev_count() : atq_nb_event_queues(opt);
const struct rte_event_dev_config config = {
- .nb_event_queues = atq_nb_event_queues(opt),
- .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
.nb_events_limit = 4096,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth = 128,
.nb_atomic_order_sequences = opt->nb_flows,
};
/* queue configurations */
- for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
+ for (queue = 0; queue < nb_queues; queue++) {
ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
if (ret) {
evt_err("failed to setup queue=%d", queue);
}
ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
- atq_nb_event_queues(opt));
+ nb_queues);
if (ret)
return ret;
return 0;
}
+static int
+perf_producer_wrapper(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ /* Launch the producer function only in case of synthetic producer. */
+ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
+ return perf_producer(arg);
+ return 0;
+}
+
static inline uint64_t
processed_pkts(struct test_perf *t)
{
if (!(opt->plcores[lcore_id]))
continue;
- ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx],
- lcore_id);
+ ret = rte_eal_remote_launch(perf_producer_wrapper,
+ &t->prod[port_idx], lcore_id);
if (ret) {
evt_err("failed to launch perf_producer %d", lcore_id);
return ret;
fflush(stdout);
if (remaining <= 0) {
- t->done = true;
t->result = EVT_TEST_SUCCESS;
- rte_smp_wmb();
- break;
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ t->done = true;
+ rte_smp_wmb();
+ break;
+ }
}
}
- if (new_cycles - dead_lock_cycles > dead_lock_sample) {
+ if (new_cycles - dead_lock_cycles > dead_lock_sample &&
+ opt->prod_type == EVT_PROD_TYPE_SYNT) {
remaining = t->outstand_pkts - processed_pkts(t);
if (dead_lock_remaining == remaining) {
rte_event_dev_dump(opt->dev_id, stdout);
#include <unistd.h>
#include <rte_cycles.h>
+#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
uint8_t queue;
int nb_stages = opt->nb_stages;
int ret;
+ int nb_ports;
+ int nb_queues;
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+ evt_nr_active_lcores(opt->plcores);
+
+ nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+ rte_eth_dev_count() * nb_stages :
+ perf_queue_nb_event_queues(opt);
const struct rte_event_dev_config config = {
- .nb_event_queues = perf_queue_nb_event_queues(opt),
- .nb_event_ports = perf_nb_event_ports(opt),
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
.nb_events_limit = 4096,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth = 128,
}
ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
- perf_queue_nb_event_queues(opt));
+ nb_queues);
if (ret)
return ret;