1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include "test_perf_common.h"
10 perf_test_result(struct evt_test *test, struct evt_options *opt)
15 struct test_perf *t = evt_test_priv(test);
17 printf("Packet distribution across worker cores :\n");
18 for (i = 0; i < t->nb_workers; i++)
19 total += t->worker[i].processed_pkts;
20 for (i = 0; i < t->nb_workers; i++)
21 printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
22 CLGRN" %3.2f\n"CLNRM, i,
23 t->worker[i].processed_pkts,
24 (((double)t->worker[i].processed_pkts)/total)
31 perf_producer(void *arg)
34 struct prod_data *p = arg;
35 struct test_perf *t = p->t;
36 struct evt_options *opt = t->opt;
37 const uint8_t dev_id = p->dev_id;
38 const uint8_t port = p->port_id;
39 struct rte_mempool *pool = t->pool;
40 const uint64_t nb_pkts = t->nb_pkts;
41 const uint32_t nb_flows = t->nb_flows;
42 uint32_t flow_counter = 0;
44 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
47 if (opt->verbose_level > 1)
48 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
49 rte_lcore_id(), dev_id, port, p->queue_id);
52 ev.op = RTE_EVENT_OP_NEW;
53 ev.queue_id = p->queue_id;
54 ev.sched_type = t->opt->sched_type_list[0];
55 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
56 ev.event_type = RTE_EVENT_TYPE_CPU;
57 ev.sub_event_type = 0; /* stage 0 */
59 while (count < nb_pkts && t->done == false) {
60 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
62 for (i = 0; i < BURST_SIZE; i++) {
63 ev.flow_id = flow_counter++ % nb_flows;
65 m[i]->timestamp = rte_get_timer_cycles();
66 while (rte_event_enqueue_burst(dev_id,
71 m[i]->timestamp = rte_get_timer_cycles();
81 perf_event_timer_producer(void *arg)
84 struct prod_data *p = arg;
85 struct test_perf *t = p->t;
86 struct evt_options *opt = t->opt;
87 uint32_t flow_counter = 0;
89 uint64_t arm_latency = 0;
90 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
91 const uint32_t nb_flows = t->nb_flows;
92 const uint64_t nb_timers = opt->nb_timers;
93 struct rte_mempool *pool = t->pool;
94 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
95 struct rte_event_timer_adapter **adptr = t->timer_adptr;
96 struct rte_event_timer tim;
97 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
99 memset(&tim, 0, sizeof(struct rte_event_timer));
101 opt->optm_timer_tick_nsec
102 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
103 opt->optm_timer_tick_nsec)
105 timeout_ticks += timeout_ticks ? 0 : 1;
106 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
107 tim.ev.op = RTE_EVENT_OP_NEW;
108 tim.ev.sched_type = t->opt->sched_type_list[0];
109 tim.ev.queue_id = p->queue_id;
110 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
111 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
112 tim.timeout_ticks = timeout_ticks;
114 if (opt->verbose_level > 1)
115 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
117 while (count < nb_timers && t->done == false) {
118 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
120 for (i = 0; i < BURST_SIZE; i++) {
121 rte_prefetch0(m[i + 1]);
123 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
124 m[i]->tim.ev.event_ptr = m[i];
125 m[i]->timestamp = rte_get_timer_cycles();
126 while (rte_event_timer_arm_burst(
127 adptr[flow_counter % nb_timer_adptrs],
128 (struct rte_event_timer **)&m[i], 1) != 1) {
131 m[i]->timestamp = rte_get_timer_cycles();
133 arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
139 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
140 __func__, rte_lcore_id(),
141 count ? (float)(arm_latency / count) /
142 (rte_get_timer_hz() / 1000000) : 0);
147 perf_event_timer_producer_burst(void *arg)
150 struct prod_data *p = arg;
151 struct test_perf *t = p->t;
152 struct evt_options *opt = t->opt;
153 uint32_t flow_counter = 0;
155 uint64_t arm_latency = 0;
156 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
157 const uint32_t nb_flows = t->nb_flows;
158 const uint64_t nb_timers = opt->nb_timers;
159 struct rte_mempool *pool = t->pool;
160 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
161 struct rte_event_timer_adapter **adptr = t->timer_adptr;
162 struct rte_event_timer tim;
163 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
165 memset(&tim, 0, sizeof(struct rte_event_timer));
167 opt->optm_timer_tick_nsec
168 ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
169 opt->optm_timer_tick_nsec)
171 timeout_ticks += timeout_ticks ? 0 : 1;
172 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
173 tim.ev.op = RTE_EVENT_OP_NEW;
174 tim.ev.sched_type = t->opt->sched_type_list[0];
175 tim.ev.queue_id = p->queue_id;
176 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
177 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
178 tim.timeout_ticks = timeout_ticks;
180 if (opt->verbose_level > 1)
181 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
183 while (count < nb_timers && t->done == false) {
184 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
186 for (i = 0; i < BURST_SIZE; i++) {
187 rte_prefetch0(m[i + 1]);
189 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
190 m[i]->tim.ev.event_ptr = m[i];
191 m[i]->timestamp = rte_get_timer_cycles();
193 rte_event_timer_arm_tmo_tick_burst(
194 adptr[flow_counter % nb_timer_adptrs],
195 (struct rte_event_timer **)m,
198 arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
203 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
204 __func__, rte_lcore_id(),
205 count ? (float)(arm_latency / count) /
206 (rte_get_timer_hz() / 1000000) : 0);
211 perf_producer_wrapper(void *arg)
213 struct prod_data *p = arg;
214 struct test_perf *t = p->t;
215 /* Launch the producer function only in case of synthetic producer. */
216 if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
217 return perf_producer(arg);
218 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
219 !t->opt->timdev_use_burst)
220 return perf_event_timer_producer(arg);
221 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
222 t->opt->timdev_use_burst)
223 return perf_event_timer_producer_burst(arg);
227 static inline uint64_t
228 processed_pkts(struct test_perf *t)
233 for (i = 0; i < t->nb_workers; i++)
234 total += t->worker[i].processed_pkts;
239 static inline uint64_t
240 total_latency(struct test_perf *t)
245 for (i = 0; i < t->nb_workers; i++)
246 total += t->worker[i].latency;
253 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
254 int (*worker)(void *))
257 struct test_perf *t = evt_test_priv(test);
261 RTE_LCORE_FOREACH_WORKER(lcore_id) {
262 if (!(opt->wlcores[lcore_id]))
265 ret = rte_eal_remote_launch(worker,
266 &t->worker[port_idx], lcore_id);
268 evt_err("failed to launch worker %d", lcore_id);
274 /* launch producers */
275 RTE_LCORE_FOREACH_WORKER(lcore_id) {
276 if (!(opt->plcores[lcore_id]))
279 ret = rte_eal_remote_launch(perf_producer_wrapper,
280 &t->prod[port_idx], lcore_id);
282 evt_err("failed to launch perf_producer %d", lcore_id);
288 const uint64_t total_pkts = t->outstand_pkts;
290 uint64_t dead_lock_cycles = rte_get_timer_cycles();
291 int64_t dead_lock_remaining = total_pkts;
292 const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
294 uint64_t perf_cycles = rte_get_timer_cycles();
295 int64_t perf_remaining = total_pkts;
296 const uint64_t perf_sample = rte_get_timer_hz();
298 static float total_mpps;
299 static uint64_t samples;
301 const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
302 int64_t remaining = t->outstand_pkts - processed_pkts(t);
304 while (t->done == false) {
305 const uint64_t new_cycles = rte_get_timer_cycles();
307 if ((new_cycles - perf_cycles) > perf_sample) {
308 const uint64_t latency = total_latency(t);
309 const uint64_t pkts = processed_pkts(t);
311 remaining = t->outstand_pkts - pkts;
312 float mpps = (float)(perf_remaining-remaining)/1000000;
314 perf_remaining = remaining;
315 perf_cycles = new_cycles;
318 if (opt->fwd_latency && pkts > 0) {
319 printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
320 mpps, total_mpps/samples,
321 (float)(latency/pkts)/freq_mhz);
323 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
324 mpps, total_mpps/samples);
328 if (remaining <= 0) {
329 t->result = EVT_TEST_SUCCESS;
330 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
332 EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
339 if (new_cycles - dead_lock_cycles > dead_lock_sample &&
340 (opt->prod_type == EVT_PROD_TYPE_SYNT ||
341 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
342 remaining = t->outstand_pkts - processed_pkts(t);
343 if (dead_lock_remaining == remaining) {
344 rte_event_dev_dump(opt->dev_id, stdout);
345 evt_err("No schedules for seconds, deadlock");
349 dead_lock_remaining = remaining;
350 dead_lock_cycles = new_cycles;
358 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
359 struct rte_event_port_conf prod_conf)
363 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
365 memset(&queue_conf, 0,
366 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
367 queue_conf.ev.sched_type = opt->sched_type_list[0];
368 RTE_ETH_FOREACH_DEV(prod) {
371 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
374 evt_err("failed to get event rx adapter[%d]"
379 queue_conf.ev.queue_id = prod * stride;
380 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
383 evt_err("failed to create rx adapter[%d]", prod);
386 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
389 evt_err("failed to add rx queues to adapter[%d]", prod);
393 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
396 rte_event_eth_rx_adapter_service_id_get(prod,
398 ret = evt_service_setup(service_id);
400 evt_err("Failed to setup service core"
401 " for Rx adapter\n");
411 perf_event_timer_adapter_setup(struct test_perf *t)
415 struct rte_event_timer_adapter_info adapter_info;
416 struct rte_event_timer_adapter *wl;
417 uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
418 uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
420 if (nb_producers == 1)
421 flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
423 for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
424 struct rte_event_timer_adapter_conf config = {
425 .event_dev_id = t->opt->dev_id,
426 .timer_adapter_id = i,
427 .timer_tick_ns = t->opt->timer_tick_nsec,
428 .max_tmo_ns = t->opt->max_tmo_nsec,
429 .nb_timers = t->opt->pool_sz,
433 wl = rte_event_timer_adapter_create(&config);
435 evt_err("failed to create event timer ring %d", i);
439 memset(&adapter_info, 0,
440 sizeof(struct rte_event_timer_adapter_info));
441 rte_event_timer_adapter_get_info(wl, &adapter_info);
442 t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
444 if (!(adapter_info.caps &
445 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
446 uint32_t service_id = -1U;
448 rte_event_timer_adapter_service_id_get(wl,
450 ret = evt_service_setup(service_id);
452 evt_err("Failed to setup service core"
453 " for timer adapter\n");
456 rte_service_runstate_set(service_id, 1);
458 t->timer_adptr[i] = wl;
464 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
465 uint8_t stride, uint8_t nb_queues,
466 const struct rte_event_port_conf *port_conf)
468 struct test_perf *t = evt_test_priv(test);
472 /* setup one port per worker, linking to all queues */
473 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
475 struct worker_data *w = &t->worker[port];
477 w->dev_id = opt->dev_id;
480 w->processed_pkts = 0;
483 ret = rte_event_port_setup(opt->dev_id, port, port_conf);
485 evt_err("failed to setup port %d", port);
489 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
490 if (ret != nb_queues) {
491 evt_err("failed to link all queues to port %d", port);
496 /* port for producers, no links */
497 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
498 for ( ; port < perf_nb_event_ports(opt); port++) {
499 struct prod_data *p = &t->prod[port];
503 ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
506 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
508 for ( ; port < perf_nb_event_ports(opt); port++) {
509 struct prod_data *p = &t->prod[port];
510 p->queue_id = prod * stride;
515 ret = perf_event_timer_adapter_setup(t);
520 for ( ; port < perf_nb_event_ports(opt); port++) {
521 struct prod_data *p = &t->prod[port];
523 p->dev_id = opt->dev_id;
525 p->queue_id = prod * stride;
528 ret = rte_event_port_setup(opt->dev_id, port,
531 evt_err("failed to setup port %d", port);
542 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
546 /* N producer + N worker + main when producer cores are used
547 * Else N worker + main when Rx adapter is used
549 lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
551 if (rte_lcore_count() < lcores) {
552 evt_err("test need minimum %d lcores", lcores);
556 /* Validate worker lcores */
557 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
558 evt_err("worker lcores overlaps with main lcore");
561 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
562 evt_err("worker lcores overlaps producer lcores");
565 if (evt_has_disabled_lcore(opt->wlcores)) {
566 evt_err("one or more workers lcores are not enabled");
569 if (!evt_has_active_lcore(opt->wlcores)) {
570 evt_err("minimum one worker is required");
574 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
575 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
576 /* Validate producer lcores */
577 if (evt_lcores_has_overlap(opt->plcores,
578 rte_get_main_lcore())) {
579 evt_err("producer lcores overlaps with main lcore");
582 if (evt_has_disabled_lcore(opt->plcores)) {
583 evt_err("one or more producer lcores are not enabled");
586 if (!evt_has_active_lcore(opt->plcores)) {
587 evt_err("minimum one producer is required");
592 if (evt_has_invalid_stage(opt))
595 if (evt_has_invalid_sched_type(opt))
598 if (nb_queues > EVT_MAX_QUEUES) {
599 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
602 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
603 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
608 if ((opt->nb_stages == 1 &&
609 opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
611 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
612 opt->fwd_latency = 0;
615 if (opt->fwd_latency && !opt->q_priority) {
616 evt_info("enabled queue priority for latency measurement");
619 if (opt->nb_pkts == 0)
620 opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
626 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
628 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
629 evt_dump_producer_lcores(opt);
630 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
631 evt_dump_worker_lcores(opt);
632 evt_dump_nb_stages(opt);
633 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
634 evt_dump("nb_evdev_queues", "%d", nb_queues);
635 evt_dump_queue_priority(opt);
636 evt_dump_sched_type_list(opt);
637 evt_dump_producer_type(opt);
641 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
644 struct test_perf *t = evt_test_priv(test);
646 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
647 for (i = 0; i < opt->nb_timer_adptrs; i++)
648 rte_event_timer_adapter_stop(t->timer_adptr[i]);
650 rte_event_dev_stop(opt->dev_id);
651 rte_event_dev_close(opt->dev_id);
655 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
656 void *obj, unsigned i __rte_unused)
658 memset(obj, 0, mp->elt_size);
661 #define NB_RX_DESC 128
662 #define NB_TX_DESC 512
664 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
668 struct test_perf *t = evt_test_priv(test);
669 struct rte_eth_conf port_conf = {
671 .mq_mode = ETH_MQ_RX_RSS,
672 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
678 .rss_hf = ETH_RSS_IP,
683 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
684 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
687 if (!rte_eth_dev_count_avail()) {
688 evt_err("No ethernet ports found.");
692 RTE_ETH_FOREACH_DEV(i) {
693 struct rte_eth_dev_info dev_info;
694 struct rte_eth_conf local_port_conf = port_conf;
696 ret = rte_eth_dev_info_get(i, &dev_info);
698 evt_err("Error during getting device (port %u) info: %s\n",
703 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
704 dev_info.flow_type_rss_offloads;
705 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
706 port_conf.rx_adv_conf.rss_conf.rss_hf) {
707 evt_info("Port %u modified RSS hash function based on hardware support,"
708 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
710 port_conf.rx_adv_conf.rss_conf.rss_hf,
711 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
714 if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
715 evt_err("Failed to configure eth port [%d]", i);
719 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
720 rte_socket_id(), NULL, t->pool) < 0) {
721 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
726 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
727 rte_socket_id(), NULL) < 0) {
728 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
733 ret = rte_eth_promiscuous_enable(i);
735 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
736 i, rte_strerror(-ret));
744 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
749 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
750 RTE_ETH_FOREACH_DEV(i) {
751 rte_event_eth_rx_adapter_stop(i);
758 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
760 struct test_perf *t = evt_test_priv(test);
762 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
763 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
764 t->pool = rte_mempool_create(test->name, /* mempool name */
765 opt->pool_sz, /* number of elements*/
766 sizeof(struct perf_elt), /* element size*/
769 perf_elt_init, /* obj constructor */
770 NULL, opt->socket_id, 0); /* flags */
772 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
773 opt->pool_sz, /* number of elements*/
776 RTE_MBUF_DEFAULT_BUF_SIZE,
777 opt->socket_id); /* flags */
781 if (t->pool == NULL) {
782 evt_err("failed to create mempool");
790 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
793 struct test_perf *t = evt_test_priv(test);
795 rte_mempool_free(t->pool);
799 perf_test_setup(struct evt_test *test, struct evt_options *opt)
803 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
804 RTE_CACHE_LINE_SIZE, opt->socket_id);
805 if (test_perf == NULL) {
806 evt_err("failed to allocate test_perf memory");
809 test->test_priv = test_perf;
811 struct test_perf *t = evt_test_priv(test);
813 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
814 t->outstand_pkts = opt->nb_timers *
815 evt_nr_active_lcores(opt->plcores);
816 t->nb_pkts = opt->nb_timers;
818 t->outstand_pkts = opt->nb_pkts *
819 evt_nr_active_lcores(opt->plcores);
820 t->nb_pkts = opt->nb_pkts;
823 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
825 t->nb_flows = opt->nb_flows;
826 t->result = EVT_TEST_FAILED;
828 memcpy(t->sched_type_list, opt->sched_type_list,
829 sizeof(opt->sched_type_list));
836 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
840 rte_free(test->test_priv);