1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include "test_perf_common.h"
10 perf_test_result(struct evt_test *test, struct evt_options *opt)
15 struct test_perf *t = evt_test_priv(test);
17 printf("Packet distribution across worker cores :\n");
18 for (i = 0; i < t->nb_workers; i++)
19 total += t->worker[i].processed_pkts;
20 for (i = 0; i < t->nb_workers; i++)
21 printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
22 CLGRN" %3.2f"CLNRM"\n", i,
23 t->worker[i].processed_pkts,
24 (((double)t->worker[i].processed_pkts)/total)
31 perf_producer(void *arg)
34 struct prod_data *p = arg;
35 struct test_perf *t = p->t;
36 struct evt_options *opt = t->opt;
37 const uint8_t dev_id = p->dev_id;
38 const uint8_t port = p->port_id;
39 struct rte_mempool *pool = t->pool;
40 const uint64_t nb_pkts = t->nb_pkts;
41 const uint32_t nb_flows = t->nb_flows;
42 uint32_t flow_counter = 0;
44 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
47 if (opt->verbose_level > 1)
48 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
49 rte_lcore_id(), dev_id, port, p->queue_id);
52 ev.op = RTE_EVENT_OP_NEW;
53 ev.queue_id = p->queue_id;
54 ev.sched_type = t->opt->sched_type_list[0];
55 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
56 ev.event_type = RTE_EVENT_TYPE_CPU;
57 ev.sub_event_type = 0; /* stage 0 */
59 while (count < nb_pkts && t->done == false) {
60 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
62 for (i = 0; i < BURST_SIZE; i++) {
63 ev.flow_id = flow_counter++ % nb_flows;
65 m[i]->timestamp = rte_get_timer_cycles();
66 while (rte_event_enqueue_burst(dev_id,
71 m[i]->timestamp = rte_get_timer_cycles();
81 perf_producer_burst(void *arg)
85 struct rte_event_dev_info dev_info;
86 struct prod_data *p = arg;
87 struct test_perf *t = p->t;
88 struct evt_options *opt = t->opt;
89 const uint8_t dev_id = p->dev_id;
90 const uint8_t port = p->port_id;
91 struct rte_mempool *pool = t->pool;
92 const uint64_t nb_pkts = t->nb_pkts;
93 const uint32_t nb_flows = t->nb_flows;
94 uint32_t flow_counter = 0;
97 struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
98 struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
99 uint32_t burst_size = opt->prod_enq_burst_sz;
101 memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
102 rte_event_dev_info_get(dev_id, &dev_info);
103 if (dev_info.max_event_port_enqueue_depth < burst_size)
104 burst_size = dev_info.max_event_port_enqueue_depth;
106 if (opt->verbose_level > 1)
107 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
108 rte_lcore_id(), dev_id, port, p->queue_id);
110 for (i = 0; i < burst_size; i++) {
111 ev[i].op = RTE_EVENT_OP_NEW;
112 ev[i].queue_id = p->queue_id;
113 ev[i].sched_type = t->opt->sched_type_list[0];
114 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
115 ev[i].event_type = RTE_EVENT_TYPE_CPU;
116 ev[i].sub_event_type = 0; /* stage 0 */
119 while (count < nb_pkts && t->done == false) {
120 if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
122 timestamp = rte_get_timer_cycles();
123 for (i = 0; i < burst_size; i++) {
124 ev[i].flow_id = flow_counter++ % nb_flows;
125 ev[i].event_ptr = m[i];
126 m[i]->timestamp = timestamp;
128 enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
129 while (enq < burst_size) {
130 enq += rte_event_enqueue_burst(dev_id, port,
136 timestamp = rte_get_timer_cycles();
137 for (i = enq; i < burst_size; i++)
138 m[i]->timestamp = timestamp;
146 perf_event_timer_producer(void *arg)
149 struct prod_data *p = arg;
150 struct test_perf *t = p->t;
151 struct evt_options *opt = t->opt;
152 uint32_t flow_counter = 0;
154 uint64_t arm_latency = 0;
155 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
156 const uint32_t nb_flows = t->nb_flows;
157 const uint64_t nb_timers = opt->nb_timers;
158 struct rte_mempool *pool = t->pool;
159 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
160 struct rte_event_timer_adapter **adptr = t->timer_adptr;
161 struct rte_event_timer tim;
162 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
164 memset(&tim, 0, sizeof(struct rte_event_timer));
166 opt->optm_timer_tick_nsec
167 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
168 opt->optm_timer_tick_nsec)
170 timeout_ticks += timeout_ticks ? 0 : 1;
171 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
172 tim.ev.op = RTE_EVENT_OP_NEW;
173 tim.ev.sched_type = t->opt->sched_type_list[0];
174 tim.ev.queue_id = p->queue_id;
175 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
176 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
177 tim.timeout_ticks = timeout_ticks;
179 if (opt->verbose_level > 1)
180 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
182 while (count < nb_timers && t->done == false) {
183 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
185 for (i = 0; i < BURST_SIZE; i++) {
186 rte_prefetch0(m[i + 1]);
188 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
189 m[i]->tim.ev.event_ptr = m[i];
190 m[i]->timestamp = rte_get_timer_cycles();
191 while (rte_event_timer_arm_burst(
192 adptr[flow_counter % nb_timer_adptrs],
193 (struct rte_event_timer **)&m[i], 1) != 1) {
196 m[i]->timestamp = rte_get_timer_cycles();
198 arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
204 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
205 __func__, rte_lcore_id(),
206 count ? (float)(arm_latency / count) /
207 (rte_get_timer_hz() / 1000000) : 0);
212 perf_event_timer_producer_burst(void *arg)
215 struct prod_data *p = arg;
216 struct test_perf *t = p->t;
217 struct evt_options *opt = t->opt;
218 uint32_t flow_counter = 0;
220 uint64_t arm_latency = 0;
221 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
222 const uint32_t nb_flows = t->nb_flows;
223 const uint64_t nb_timers = opt->nb_timers;
224 struct rte_mempool *pool = t->pool;
225 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
226 struct rte_event_timer_adapter **adptr = t->timer_adptr;
227 struct rte_event_timer tim;
228 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
230 memset(&tim, 0, sizeof(struct rte_event_timer));
232 opt->optm_timer_tick_nsec
233 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
234 opt->optm_timer_tick_nsec)
236 timeout_ticks += timeout_ticks ? 0 : 1;
237 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
238 tim.ev.op = RTE_EVENT_OP_NEW;
239 tim.ev.sched_type = t->opt->sched_type_list[0];
240 tim.ev.queue_id = p->queue_id;
241 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
242 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
243 tim.timeout_ticks = timeout_ticks;
245 if (opt->verbose_level > 1)
246 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
248 while (count < nb_timers && t->done == false) {
249 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
251 for (i = 0; i < BURST_SIZE; i++) {
252 rte_prefetch0(m[i + 1]);
254 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
255 m[i]->tim.ev.event_ptr = m[i];
256 m[i]->timestamp = rte_get_timer_cycles();
258 rte_event_timer_arm_tmo_tick_burst(
259 adptr[flow_counter % nb_timer_adptrs],
260 (struct rte_event_timer **)m,
263 arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
268 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
269 __func__, rte_lcore_id(),
270 count ? (float)(arm_latency / count) /
271 (rte_get_timer_hz() / 1000000) : 0);
276 perf_producer_wrapper(void *arg)
278 struct prod_data *p = arg;
279 struct test_perf *t = p->t;
280 bool burst = evt_has_burst_mode(p->dev_id);
282 /* In case of synthetic producer, launch perf_producer or
283 * perf_producer_burst depending on producer enqueue burst size
285 if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
286 t->opt->prod_enq_burst_sz == 1)
287 return perf_producer(arg);
288 else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
289 t->opt->prod_enq_burst_sz > 1) {
291 evt_err("This event device does not support burst mode");
293 return perf_producer_burst(arg);
295 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
296 !t->opt->timdev_use_burst)
297 return perf_event_timer_producer(arg);
298 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
299 t->opt->timdev_use_burst)
300 return perf_event_timer_producer_burst(arg);
304 static inline uint64_t
305 processed_pkts(struct test_perf *t)
310 for (i = 0; i < t->nb_workers; i++)
311 total += t->worker[i].processed_pkts;
316 static inline uint64_t
317 total_latency(struct test_perf *t)
322 for (i = 0; i < t->nb_workers; i++)
323 total += t->worker[i].latency;
330 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
331 int (*worker)(void *))
334 struct test_perf *t = evt_test_priv(test);
338 RTE_LCORE_FOREACH_WORKER(lcore_id) {
339 if (!(opt->wlcores[lcore_id]))
342 ret = rte_eal_remote_launch(worker,
343 &t->worker[port_idx], lcore_id);
345 evt_err("failed to launch worker %d", lcore_id);
351 /* launch producers */
352 RTE_LCORE_FOREACH_WORKER(lcore_id) {
353 if (!(opt->plcores[lcore_id]))
356 ret = rte_eal_remote_launch(perf_producer_wrapper,
357 &t->prod[port_idx], lcore_id);
359 evt_err("failed to launch perf_producer %d", lcore_id);
365 const uint64_t total_pkts = t->outstand_pkts;
367 uint64_t dead_lock_cycles = rte_get_timer_cycles();
368 int64_t dead_lock_remaining = total_pkts;
369 const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
371 uint64_t perf_cycles = rte_get_timer_cycles();
372 int64_t perf_remaining = total_pkts;
373 const uint64_t perf_sample = rte_get_timer_hz();
375 static float total_mpps;
376 static uint64_t samples;
378 const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
379 int64_t remaining = t->outstand_pkts - processed_pkts(t);
381 while (t->done == false) {
382 const uint64_t new_cycles = rte_get_timer_cycles();
384 if ((new_cycles - perf_cycles) > perf_sample) {
385 const uint64_t latency = total_latency(t);
386 const uint64_t pkts = processed_pkts(t);
388 remaining = t->outstand_pkts - pkts;
389 float mpps = (float)(perf_remaining-remaining)/1000000;
391 perf_remaining = remaining;
392 perf_cycles = new_cycles;
395 if (opt->fwd_latency && pkts > 0) {
396 printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
397 mpps, total_mpps/samples,
398 (float)(latency/pkts)/freq_mhz);
400 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
401 mpps, total_mpps/samples);
405 if (remaining <= 0) {
406 t->result = EVT_TEST_SUCCESS;
407 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
409 EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
416 if (new_cycles - dead_lock_cycles > dead_lock_sample &&
417 (opt->prod_type == EVT_PROD_TYPE_SYNT ||
418 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
419 remaining = t->outstand_pkts - processed_pkts(t);
420 if (dead_lock_remaining == remaining) {
421 rte_event_dev_dump(opt->dev_id, stdout);
422 evt_err("No schedules for seconds, deadlock");
426 dead_lock_remaining = remaining;
427 dead_lock_cycles = new_cycles;
435 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
436 struct rte_event_port_conf prod_conf)
440 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
442 memset(&queue_conf, 0,
443 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
444 queue_conf.ev.sched_type = opt->sched_type_list[0];
445 RTE_ETH_FOREACH_DEV(prod) {
448 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
451 evt_err("failed to get event rx adapter[%d]"
456 queue_conf.ev.queue_id = prod * stride;
457 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
460 evt_err("failed to create rx adapter[%d]", prod);
463 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
466 evt_err("failed to add rx queues to adapter[%d]", prod);
470 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
473 rte_event_eth_rx_adapter_service_id_get(prod,
475 ret = evt_service_setup(service_id);
477 evt_err("Failed to setup service core"
478 " for Rx adapter\n");
488 perf_event_timer_adapter_setup(struct test_perf *t)
492 struct rte_event_timer_adapter_info adapter_info;
493 struct rte_event_timer_adapter *wl;
494 uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
495 uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
497 if (nb_producers == 1)
498 flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
500 for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
501 struct rte_event_timer_adapter_conf config = {
502 .event_dev_id = t->opt->dev_id,
503 .timer_adapter_id = i,
504 .timer_tick_ns = t->opt->timer_tick_nsec,
505 .max_tmo_ns = t->opt->max_tmo_nsec,
506 .nb_timers = t->opt->pool_sz,
510 wl = rte_event_timer_adapter_create(&config);
512 evt_err("failed to create event timer ring %d", i);
516 memset(&adapter_info, 0,
517 sizeof(struct rte_event_timer_adapter_info));
518 rte_event_timer_adapter_get_info(wl, &adapter_info);
519 t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
521 if (!(adapter_info.caps &
522 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
523 uint32_t service_id = -1U;
525 rte_event_timer_adapter_service_id_get(wl,
527 ret = evt_service_setup(service_id);
529 evt_err("Failed to setup service core"
530 " for timer adapter\n");
533 rte_service_runstate_set(service_id, 1);
535 t->timer_adptr[i] = wl;
541 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
542 uint8_t stride, uint8_t nb_queues,
543 const struct rte_event_port_conf *port_conf)
545 struct test_perf *t = evt_test_priv(test);
549 /* setup one port per worker, linking to all queues */
550 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
552 struct worker_data *w = &t->worker[port];
554 w->dev_id = opt->dev_id;
557 w->processed_pkts = 0;
560 struct rte_event_port_conf conf = *port_conf;
561 conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
563 ret = rte_event_port_setup(opt->dev_id, port, &conf);
565 evt_err("failed to setup port %d", port);
569 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
570 if (ret != nb_queues) {
571 evt_err("failed to link all queues to port %d", port);
576 /* port for producers, no links */
577 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
578 for ( ; port < perf_nb_event_ports(opt); port++) {
579 struct prod_data *p = &t->prod[port];
583 struct rte_event_port_conf conf = *port_conf;
584 conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
586 ret = perf_event_rx_adapter_setup(opt, stride, conf);
589 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
591 for ( ; port < perf_nb_event_ports(opt); port++) {
592 struct prod_data *p = &t->prod[port];
593 p->queue_id = prod * stride;
598 ret = perf_event_timer_adapter_setup(t);
603 for ( ; port < perf_nb_event_ports(opt); port++) {
604 struct prod_data *p = &t->prod[port];
606 p->dev_id = opt->dev_id;
608 p->queue_id = prod * stride;
611 struct rte_event_port_conf conf = *port_conf;
612 conf.event_port_cfg |=
613 RTE_EVENT_PORT_CFG_HINT_PRODUCER |
614 RTE_EVENT_PORT_CFG_HINT_CONSUMER;
616 ret = rte_event_port_setup(opt->dev_id, port, &conf);
618 evt_err("failed to setup port %d", port);
629 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
633 /* N producer + N worker + main when producer cores are used
634 * Else N worker + main when Rx adapter is used
636 lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
638 if (rte_lcore_count() < lcores) {
639 evt_err("test need minimum %d lcores", lcores);
643 /* Validate worker lcores */
644 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
645 evt_err("worker lcores overlaps with main lcore");
648 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
649 evt_err("worker lcores overlaps producer lcores");
652 if (evt_has_disabled_lcore(opt->wlcores)) {
653 evt_err("one or more workers lcores are not enabled");
656 if (!evt_has_active_lcore(opt->wlcores)) {
657 evt_err("minimum one worker is required");
661 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
662 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
663 /* Validate producer lcores */
664 if (evt_lcores_has_overlap(opt->plcores,
665 rte_get_main_lcore())) {
666 evt_err("producer lcores overlaps with main lcore");
669 if (evt_has_disabled_lcore(opt->plcores)) {
670 evt_err("one or more producer lcores are not enabled");
673 if (!evt_has_active_lcore(opt->plcores)) {
674 evt_err("minimum one producer is required");
679 if (evt_has_invalid_stage(opt))
682 if (evt_has_invalid_sched_type(opt))
685 if (nb_queues > EVT_MAX_QUEUES) {
686 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
689 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
690 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
695 if ((opt->nb_stages == 1 &&
696 opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
698 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
699 opt->fwd_latency = 0;
702 if (opt->fwd_latency && !opt->q_priority) {
703 evt_info("enabled queue priority for latency measurement");
706 if (opt->nb_pkts == 0)
707 opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
713 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
715 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
716 evt_dump_producer_lcores(opt);
717 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
718 evt_dump_worker_lcores(opt);
719 evt_dump_nb_stages(opt);
720 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
721 evt_dump("nb_evdev_queues", "%d", nb_queues);
722 evt_dump_queue_priority(opt);
723 evt_dump_sched_type_list(opt);
724 evt_dump_producer_type(opt);
725 evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
729 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
732 struct test_perf *t = evt_test_priv(test);
734 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
735 for (i = 0; i < opt->nb_timer_adptrs; i++)
736 rte_event_timer_adapter_stop(t->timer_adptr[i]);
738 rte_event_dev_stop(opt->dev_id);
739 rte_event_dev_close(opt->dev_id);
743 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
744 void *obj, unsigned i __rte_unused)
746 memset(obj, 0, mp->elt_size);
749 #define NB_RX_DESC 128
750 #define NB_TX_DESC 512
752 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
756 struct test_perf *t = evt_test_priv(test);
757 struct rte_eth_conf port_conf = {
759 .mq_mode = RTE_ETH_MQ_RX_RSS,
765 .rss_hf = RTE_ETH_RSS_IP,
770 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
771 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
774 if (!rte_eth_dev_count_avail()) {
775 evt_err("No ethernet ports found.");
779 RTE_ETH_FOREACH_DEV(i) {
780 struct rte_eth_dev_info dev_info;
781 struct rte_eth_conf local_port_conf = port_conf;
783 ret = rte_eth_dev_info_get(i, &dev_info);
785 evt_err("Error during getting device (port %u) info: %s\n",
790 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
791 dev_info.flow_type_rss_offloads;
792 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
793 port_conf.rx_adv_conf.rss_conf.rss_hf) {
794 evt_info("Port %u modified RSS hash function based on hardware support,"
795 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
797 port_conf.rx_adv_conf.rss_conf.rss_hf,
798 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
801 if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
802 evt_err("Failed to configure eth port [%d]", i);
806 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
807 rte_socket_id(), NULL, t->pool) < 0) {
808 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
813 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
814 rte_socket_id(), NULL) < 0) {
815 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
820 ret = rte_eth_promiscuous_enable(i);
822 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
823 i, rte_strerror(-ret));
831 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
836 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
837 RTE_ETH_FOREACH_DEV(i) {
838 rte_event_eth_rx_adapter_stop(i);
845 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
847 struct test_perf *t = evt_test_priv(test);
849 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
850 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
851 t->pool = rte_mempool_create(test->name, /* mempool name */
852 opt->pool_sz, /* number of elements*/
853 sizeof(struct perf_elt), /* element size*/
856 perf_elt_init, /* obj constructor */
857 NULL, opt->socket_id, 0); /* flags */
859 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
860 opt->pool_sz, /* number of elements*/
863 RTE_MBUF_DEFAULT_BUF_SIZE,
864 opt->socket_id); /* flags */
868 if (t->pool == NULL) {
869 evt_err("failed to create mempool");
877 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
880 struct test_perf *t = evt_test_priv(test);
882 rte_mempool_free(t->pool);
886 perf_test_setup(struct evt_test *test, struct evt_options *opt)
890 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
891 RTE_CACHE_LINE_SIZE, opt->socket_id);
892 if (test_perf == NULL) {
893 evt_err("failed to allocate test_perf memory");
896 test->test_priv = test_perf;
898 struct test_perf *t = evt_test_priv(test);
900 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
901 t->outstand_pkts = opt->nb_timers *
902 evt_nr_active_lcores(opt->plcores);
903 t->nb_pkts = opt->nb_timers;
905 t->outstand_pkts = opt->nb_pkts *
906 evt_nr_active_lcores(opt->plcores);
907 t->nb_pkts = opt->nb_pkts;
910 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
912 t->nb_flows = opt->nb_flows;
913 t->result = EVT_TEST_FAILED;
915 memcpy(t->sched_type_list, opt->sched_type_list,
916 sizeof(opt->sched_type_list));
923 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
927 rte_free(test->test_priv);