static inline int
perf_producer(void *arg)
{
+ int i;
struct prod_data *p = arg;
struct test_perf *t = p->t;
struct evt_options *opt = t->opt;
const uint32_t nb_flows = t->nb_flows;
uint32_t flow_counter = 0;
uint64_t count = 0;
- struct perf_elt *m;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
struct rte_event ev;
if (opt->verbose_level > 1)
ev.sub_event_type = 0; /* stage 0 */
while (count < nb_pkts && t->done == false) {
- if (rte_mempool_get(pool, (void **)&m) < 0)
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
continue;
-
- ev.flow_id = flow_counter++ % nb_flows;
- ev.event_ptr = m;
- m->timestamp = rte_get_timer_cycles();
- while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
- if (t->done)
- break;
- rte_pause();
- m->timestamp = rte_get_timer_cycles();
+ for (i = 0; i < BURST_SIZE; i++) {
+ ev.flow_id = flow_counter++ % nb_flows;
+ ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_enqueue_burst(dev_id,
+ port, &ev, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
}
- count++;
+ count += BURST_SIZE;
}
return 0;
static inline int
perf_event_timer_producer(void *arg)
{
+ int i;
struct prod_data *p = arg;
struct test_perf *t = p->t;
struct evt_options *opt = t->opt;
const uint32_t nb_flows = t->nb_flows;
const uint64_t nb_timers = opt->nb_timers;
struct rte_mempool *pool = t->pool;
- struct perf_elt *m;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+ memset(&tim, 0, sizeof(struct rte_event_timer));
timeout_ticks = opt->optm_timer_tick_nsec ?
(timeout_ticks * opt->timer_tick_nsec)
/ opt->optm_timer_tick_nsec : timeout_ticks;
timeout_ticks += timeout_ticks ? 0 : 1;
- const struct rte_event_timer tim = {
- .ev.op = RTE_EVENT_OP_NEW,
- .ev.queue_id = p->queue_id,
- .ev.sched_type = t->opt->sched_type_list[0],
- .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .ev.event_type = RTE_EVENT_TYPE_TIMER,
- .state = RTE_EVENT_TIMER_NOT_ARMED,
- .timeout_ticks = timeout_ticks,
- };
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
if (opt->verbose_level > 1)
printf("%s(): lcore %d\n", __func__, rte_lcore_id());
while (count < nb_timers && t->done == false) {
- if (rte_mempool_get(pool, (void **)&m) < 0)
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
continue;
-
- m->tim = tim;
- m->tim.ev.flow_id = flow_counter++ % nb_flows;
- m->tim.ev.event_ptr = m;
- m->timestamp = rte_get_timer_cycles();
- while (rte_event_timer_arm_burst(
- adptr[flow_counter % nb_timer_adptrs],
- (struct rte_event_timer **)&m, 1) != 1) {
- if (t->done)
- break;
- rte_pause();
- m->timestamp = rte_get_timer_cycles();
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m[i], 1) != 1) {
+ if (t->done)
+ break;
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
}
- arm_latency += rte_get_timer_cycles() - m->timestamp;
- count++;
+ count += BURST_SIZE;
}
fflush(stdout);
rte_delay_ms(1000);
struct rte_mempool *pool = t->pool;
struct perf_elt *m[BURST_SIZE + 1] = {NULL};
struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+ memset(&tim, 0, sizeof(struct rte_event_timer));
timeout_ticks = opt->optm_timer_tick_nsec ?
(timeout_ticks * opt->timer_tick_nsec)
/ opt->optm_timer_tick_nsec : timeout_ticks;
timeout_ticks += timeout_ticks ? 0 : 1;
- const struct rte_event_timer tim = {
- .ev.op = RTE_EVENT_OP_NEW,
- .ev.queue_id = p->queue_id,
- .ev.sched_type = t->opt->sched_type_list[0],
- .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .ev.event_type = RTE_EVENT_TYPE_TIMER,
- .state = RTE_EVENT_TIMER_NOT_ARMED,
- .timeout_ticks = timeout_ticks,
- };
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
if (opt->verbose_level > 1)
printf("%s(): lcore %d\n", __func__, rte_lcore_id());
}
if (new_cycles - dead_lock_cycles > dead_lock_sample &&
- opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
remaining = t->outstand_pkts - processed_pkts(t);
if (dead_lock_remaining == remaining) {
rte_event_dev_dump(opt->dev_id, stdout);
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
queue_conf.ev.sched_type = opt->sched_type_list[0];
- for (prod = 0; prod < rte_eth_dev_count(); prod++) {
+ RTE_ETH_FOREACH_DEV(prod) {
uint32_t cap;
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
return ret;
}
}
-
- ret = rte_eth_dev_start(prod);
- if (ret) {
- evt_err("Ethernet dev [%d] failed to start."
- " Using synthetic producer", prod);
- return ret;
- }
-
- ret = rte_event_eth_rx_adapter_start(prod);
- if (ret) {
- evt_err("Rx adapter[%d] start failed", prod);
- return ret;
- }
- printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
- prod, prod);
}
return ret;
.timer_adapter_id = i,
.timer_tick_ns = t->opt->timer_tick_nsec,
.max_tmo_ns = t->opt->max_tmo_nsec,
- .nb_timers = 2 * 1024 * 1024,
+ .nb_timers = t->opt->pool_sz,
.flags = flags,
};
}
rte_service_runstate_set(service_id, 1);
}
-
- ret = rte_event_timer_adapter_start(wl);
- if (ret) {
- evt_err("failed to Start event timer adapter %d", i);
- return ret;
- }
t->timer_adptr[i] = wl;
}
return 0;
return -1;
}
- if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
/* Validate producer lcores */
if (evt_lcores_has_overlap(opt->plcores,
rte_get_master_lcore())) {
int
perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
+ int ret;
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .hw_vlan_strip = 0,
- .hw_vlan_extend = 0,
- .jumbo_frame = 0,
- .hw_strip_crc = 1,
},
.rx_adv_conf = {
.rss_conf = {
opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
return 0;
- if (!rte_eth_dev_count()) {
+ if (!rte_eth_dev_count_avail()) {
evt_err("No ethernet ports found.");
return -ENODEV;
}
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ ret = rte_eth_dev_info_get(i, &dev_info);
+ if (ret != 0) {
+ evt_err("Error during getting device (port %u) info: %s\n",
+ i, strerror(-ret));
+ return ret;
+ }
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
- if (rte_eth_dev_configure(i, 1, 1,
- &port_conf)
- < 0) {
+ if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
evt_err("Failed to configure eth port [%d]", i);
return -EINVAL;
}
return -EINVAL;
}
- rte_eth_promiscuous_enable(i);
+ ret = rte_eth_promiscuous_enable(i);
+ if (ret != 0) {
+ evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+ i, rte_strerror(-ret));
+ return ret;
+ }
}
return 0;
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
RTE_SET_USED(test);
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
rte_eth_dev_stop(i);
- rte_eth_dev_close(i);
}
}
}