1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
8 perf_test_result(struct evt_test *test, struct evt_options *opt)
13 struct test_perf *t = evt_test_priv(test);
15 printf("Packet distribution across worker cores :\n");
16 for (i = 0; i < t->nb_workers; i++)
17 total += t->worker[i].processed_pkts;
18 for (i = 0; i < t->nb_workers; i++)
19 printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
20 CLGRN" %3.2f\n"CLNRM, i,
21 t->worker[i].processed_pkts,
22 (((double)t->worker[i].processed_pkts)/total)
29 perf_producer(void *arg)
32 struct prod_data *p = arg;
33 struct test_perf *t = p->t;
34 struct evt_options *opt = t->opt;
35 const uint8_t dev_id = p->dev_id;
36 const uint8_t port = p->port_id;
37 struct rte_mempool *pool = t->pool;
38 const uint64_t nb_pkts = t->nb_pkts;
39 const uint32_t nb_flows = t->nb_flows;
40 uint32_t flow_counter = 0;
42 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
45 if (opt->verbose_level > 1)
46 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
47 rte_lcore_id(), dev_id, port, p->queue_id);
50 ev.op = RTE_EVENT_OP_NEW;
51 ev.queue_id = p->queue_id;
52 ev.sched_type = t->opt->sched_type_list[0];
53 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
54 ev.event_type = RTE_EVENT_TYPE_CPU;
55 ev.sub_event_type = 0; /* stage 0 */
57 while (count < nb_pkts && t->done == false) {
58 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
60 for (i = 0; i < BURST_SIZE; i++) {
61 ev.flow_id = flow_counter++ % nb_flows;
63 m[i]->timestamp = rte_get_timer_cycles();
64 while (rte_event_enqueue_burst(dev_id,
69 m[i]->timestamp = rte_get_timer_cycles();
79 perf_event_timer_producer(void *arg)
82 struct prod_data *p = arg;
83 struct test_perf *t = p->t;
84 struct evt_options *opt = t->opt;
85 uint32_t flow_counter = 0;
87 uint64_t arm_latency = 0;
88 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
89 const uint32_t nb_flows = t->nb_flows;
90 const uint64_t nb_timers = opt->nb_timers;
91 struct rte_mempool *pool = t->pool;
92 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
93 struct rte_event_timer_adapter **adptr = t->timer_adptr;
94 struct rte_event_timer tim;
95 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
97 memset(&tim, 0, sizeof(struct rte_event_timer));
98 timeout_ticks = opt->optm_timer_tick_nsec ?
99 (timeout_ticks * opt->timer_tick_nsec)
100 / opt->optm_timer_tick_nsec : timeout_ticks;
101 timeout_ticks += timeout_ticks ? 0 : 1;
102 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
103 tim.ev.op = RTE_EVENT_OP_NEW;
104 tim.ev.sched_type = t->opt->sched_type_list[0];
105 tim.ev.queue_id = p->queue_id;
106 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
107 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
108 tim.timeout_ticks = timeout_ticks;
110 if (opt->verbose_level > 1)
111 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
113 while (count < nb_timers && t->done == false) {
114 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
116 for (i = 0; i < BURST_SIZE; i++) {
117 rte_prefetch0(m[i + 1]);
119 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
120 m[i]->tim.ev.event_ptr = m[i];
121 m[i]->timestamp = rte_get_timer_cycles();
122 while (rte_event_timer_arm_burst(
123 adptr[flow_counter % nb_timer_adptrs],
124 (struct rte_event_timer **)&m[i], 1) != 1) {
127 m[i]->timestamp = rte_get_timer_cycles();
129 arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
135 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
136 __func__, rte_lcore_id(), (float)(arm_latency / count) /
137 (rte_get_timer_hz() / 1000000));
142 perf_event_timer_producer_burst(void *arg)
145 struct prod_data *p = arg;
146 struct test_perf *t = p->t;
147 struct evt_options *opt = t->opt;
148 uint32_t flow_counter = 0;
150 uint64_t arm_latency = 0;
151 const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
152 const uint32_t nb_flows = t->nb_flows;
153 const uint64_t nb_timers = opt->nb_timers;
154 struct rte_mempool *pool = t->pool;
155 struct perf_elt *m[BURST_SIZE + 1] = {NULL};
156 struct rte_event_timer_adapter **adptr = t->timer_adptr;
157 struct rte_event_timer tim;
158 uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
160 memset(&tim, 0, sizeof(struct rte_event_timer));
161 timeout_ticks = opt->optm_timer_tick_nsec ?
162 (timeout_ticks * opt->timer_tick_nsec)
163 / opt->optm_timer_tick_nsec : timeout_ticks;
164 timeout_ticks += timeout_ticks ? 0 : 1;
165 tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
166 tim.ev.op = RTE_EVENT_OP_NEW;
167 tim.ev.sched_type = t->opt->sched_type_list[0];
168 tim.ev.queue_id = p->queue_id;
169 tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
170 tim.state = RTE_EVENT_TIMER_NOT_ARMED;
171 tim.timeout_ticks = timeout_ticks;
173 if (opt->verbose_level > 1)
174 printf("%s(): lcore %d\n", __func__, rte_lcore_id());
176 while (count < nb_timers && t->done == false) {
177 if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
179 for (i = 0; i < BURST_SIZE; i++) {
180 rte_prefetch0(m[i + 1]);
182 m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
183 m[i]->tim.ev.event_ptr = m[i];
184 m[i]->timestamp = rte_get_timer_cycles();
186 rte_event_timer_arm_tmo_tick_burst(
187 adptr[flow_counter % nb_timer_adptrs],
188 (struct rte_event_timer **)m,
191 arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
196 printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
197 __func__, rte_lcore_id(), (float)(arm_latency / count) /
198 (rte_get_timer_hz() / 1000000));
203 perf_producer_wrapper(void *arg)
205 struct prod_data *p = arg;
206 struct test_perf *t = p->t;
207 /* Launch the producer function only in case of synthetic producer. */
208 if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
209 return perf_producer(arg);
210 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
211 !t->opt->timdev_use_burst)
212 return perf_event_timer_producer(arg);
213 else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
214 t->opt->timdev_use_burst)
215 return perf_event_timer_producer_burst(arg);
219 static inline uint64_t
220 processed_pkts(struct test_perf *t)
226 for (i = 0; i < t->nb_workers; i++)
227 total += t->worker[i].processed_pkts;
232 static inline uint64_t
233 total_latency(struct test_perf *t)
239 for (i = 0; i < t->nb_workers; i++)
240 total += t->worker[i].latency;
247 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
248 int (*worker)(void *))
251 struct test_perf *t = evt_test_priv(test);
255 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
256 if (!(opt->wlcores[lcore_id]))
259 ret = rte_eal_remote_launch(worker,
260 &t->worker[port_idx], lcore_id);
262 evt_err("failed to launch worker %d", lcore_id);
268 /* launch producers */
269 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
270 if (!(opt->plcores[lcore_id]))
273 ret = rte_eal_remote_launch(perf_producer_wrapper,
274 &t->prod[port_idx], lcore_id);
276 evt_err("failed to launch perf_producer %d", lcore_id);
282 const uint64_t total_pkts = t->outstand_pkts;
284 uint64_t dead_lock_cycles = rte_get_timer_cycles();
285 int64_t dead_lock_remaining = total_pkts;
286 const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
288 uint64_t perf_cycles = rte_get_timer_cycles();
289 int64_t perf_remaining = total_pkts;
290 const uint64_t perf_sample = rte_get_timer_hz();
292 static float total_mpps;
293 static uint64_t samples;
295 const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
296 int64_t remaining = t->outstand_pkts - processed_pkts(t);
298 while (t->done == false) {
299 const uint64_t new_cycles = rte_get_timer_cycles();
301 if ((new_cycles - perf_cycles) > perf_sample) {
302 const uint64_t latency = total_latency(t);
303 const uint64_t pkts = processed_pkts(t);
305 remaining = t->outstand_pkts - pkts;
306 float mpps = (float)(perf_remaining-remaining)/1000000;
308 perf_remaining = remaining;
309 perf_cycles = new_cycles;
312 if (opt->fwd_latency && pkts > 0) {
313 printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
314 mpps, total_mpps/samples,
315 (float)(latency/pkts)/freq_mhz);
317 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
318 mpps, total_mpps/samples);
322 if (remaining <= 0) {
323 t->result = EVT_TEST_SUCCESS;
324 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
326 EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
334 if (new_cycles - dead_lock_cycles > dead_lock_sample &&
335 (opt->prod_type == EVT_PROD_TYPE_SYNT ||
336 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
337 remaining = t->outstand_pkts - processed_pkts(t);
338 if (dead_lock_remaining == remaining) {
339 rte_event_dev_dump(opt->dev_id, stdout);
340 evt_err("No schedules for seconds, deadlock");
345 dead_lock_remaining = remaining;
346 dead_lock_cycles = new_cycles;
354 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
355 struct rte_event_port_conf prod_conf)
359 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
361 memset(&queue_conf, 0,
362 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
363 queue_conf.ev.sched_type = opt->sched_type_list[0];
364 RTE_ETH_FOREACH_DEV(prod) {
367 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
370 evt_err("failed to get event rx adapter[%d]"
375 queue_conf.ev.queue_id = prod * stride;
376 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
379 evt_err("failed to create rx adapter[%d]", prod);
382 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
385 evt_err("failed to add rx queues to adapter[%d]", prod);
389 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
392 rte_event_eth_rx_adapter_service_id_get(prod,
394 ret = evt_service_setup(service_id);
396 evt_err("Failed to setup service core"
397 " for Rx adapter\n");
407 perf_event_timer_adapter_setup(struct test_perf *t)
411 struct rte_event_timer_adapter_info adapter_info;
412 struct rte_event_timer_adapter *wl;
413 uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
414 uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
416 if (nb_producers == 1)
417 flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
419 for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
420 struct rte_event_timer_adapter_conf config = {
421 .event_dev_id = t->opt->dev_id,
422 .timer_adapter_id = i,
423 .timer_tick_ns = t->opt->timer_tick_nsec,
424 .max_tmo_ns = t->opt->max_tmo_nsec,
425 .nb_timers = t->opt->pool_sz,
429 wl = rte_event_timer_adapter_create(&config);
431 evt_err("failed to create event timer ring %d", i);
435 memset(&adapter_info, 0,
436 sizeof(struct rte_event_timer_adapter_info));
437 rte_event_timer_adapter_get_info(wl, &adapter_info);
438 t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
440 if (!(adapter_info.caps &
441 RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
444 rte_event_timer_adapter_service_id_get(wl,
446 ret = evt_service_setup(service_id);
448 evt_err("Failed to setup service core"
449 " for timer adapter\n");
452 rte_service_runstate_set(service_id, 1);
454 t->timer_adptr[i] = wl;
460 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
461 uint8_t stride, uint8_t nb_queues,
462 const struct rte_event_port_conf *port_conf)
464 struct test_perf *t = evt_test_priv(test);
468 /* setup one port per worker, linking to all queues */
469 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
471 struct worker_data *w = &t->worker[port];
473 w->dev_id = opt->dev_id;
476 w->processed_pkts = 0;
479 ret = rte_event_port_setup(opt->dev_id, port, port_conf);
481 evt_err("failed to setup port %d", port);
485 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
486 if (ret != nb_queues) {
487 evt_err("failed to link all queues to port %d", port);
492 /* port for producers, no links */
493 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
494 for ( ; port < perf_nb_event_ports(opt); port++) {
495 struct prod_data *p = &t->prod[port];
499 ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
502 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
504 for ( ; port < perf_nb_event_ports(opt); port++) {
505 struct prod_data *p = &t->prod[port];
506 p->queue_id = prod * stride;
511 ret = perf_event_timer_adapter_setup(t);
516 for ( ; port < perf_nb_event_ports(opt); port++) {
517 struct prod_data *p = &t->prod[port];
519 p->dev_id = opt->dev_id;
521 p->queue_id = prod * stride;
524 ret = rte_event_port_setup(opt->dev_id, port,
527 evt_err("failed to setup port %d", port);
538 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
542 /* N producer + N worker + 1 master when producer cores are used
543 * Else N worker + 1 master when Rx adapter is used
545 lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
547 if (rte_lcore_count() < lcores) {
548 evt_err("test need minimum %d lcores", lcores);
552 /* Validate worker lcores */
553 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
554 evt_err("worker lcores overlaps with master lcore");
557 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
558 evt_err("worker lcores overlaps producer lcores");
561 if (evt_has_disabled_lcore(opt->wlcores)) {
562 evt_err("one or more workers lcores are not enabled");
565 if (!evt_has_active_lcore(opt->wlcores)) {
566 evt_err("minimum one worker is required");
570 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
571 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
572 /* Validate producer lcores */
573 if (evt_lcores_has_overlap(opt->plcores,
574 rte_get_master_lcore())) {
575 evt_err("producer lcores overlaps with master lcore");
578 if (evt_has_disabled_lcore(opt->plcores)) {
579 evt_err("one or more producer lcores are not enabled");
582 if (!evt_has_active_lcore(opt->plcores)) {
583 evt_err("minimum one producer is required");
588 if (evt_has_invalid_stage(opt))
591 if (evt_has_invalid_sched_type(opt))
594 if (nb_queues > EVT_MAX_QUEUES) {
595 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
598 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
599 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
604 if ((opt->nb_stages == 1 &&
605 opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
607 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
608 opt->fwd_latency = 0;
611 if (opt->fwd_latency && !opt->q_priority) {
612 evt_info("enabled queue priority for latency measurement");
615 if (opt->nb_pkts == 0)
616 opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
622 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
624 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
625 evt_dump_producer_lcores(opt);
626 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
627 evt_dump_worker_lcores(opt);
628 evt_dump_nb_stages(opt);
629 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
630 evt_dump("nb_evdev_queues", "%d", nb_queues);
631 evt_dump_queue_priority(opt);
632 evt_dump_sched_type_list(opt);
633 evt_dump_producer_type(opt);
637 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
640 struct test_perf *t = evt_test_priv(test);
642 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
643 for (i = 0; i < opt->nb_timer_adptrs; i++)
644 rte_event_timer_adapter_stop(t->timer_adptr[i]);
646 rte_event_dev_stop(opt->dev_id);
647 rte_event_dev_close(opt->dev_id);
651 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
652 void *obj, unsigned i __rte_unused)
654 memset(obj, 0, mp->elt_size);
657 #define NB_RX_DESC 128
658 #define NB_TX_DESC 512
660 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
663 struct test_perf *t = evt_test_priv(test);
664 struct rte_eth_conf port_conf = {
666 .mq_mode = ETH_MQ_RX_RSS,
667 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
673 .rss_hf = ETH_RSS_IP,
678 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
679 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
682 if (!rte_eth_dev_count_avail()) {
683 evt_err("No ethernet ports found.");
687 RTE_ETH_FOREACH_DEV(i) {
688 struct rte_eth_dev_info dev_info;
689 struct rte_eth_conf local_port_conf = port_conf;
691 rte_eth_dev_info_get(i, &dev_info);
693 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
694 dev_info.flow_type_rss_offloads;
695 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
696 port_conf.rx_adv_conf.rss_conf.rss_hf) {
697 evt_info("Port %u modified RSS hash function based on hardware support,"
698 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
700 port_conf.rx_adv_conf.rss_conf.rss_hf,
701 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
704 if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
705 evt_err("Failed to configure eth port [%d]", i);
709 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
710 rte_socket_id(), NULL, t->pool) < 0) {
711 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
716 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
717 rte_socket_id(), NULL) < 0) {
718 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
723 rte_eth_promiscuous_enable(i);
729 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
734 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
735 RTE_ETH_FOREACH_DEV(i) {
736 rte_event_eth_rx_adapter_stop(i);
743 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
745 struct test_perf *t = evt_test_priv(test);
747 if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
748 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
749 t->pool = rte_mempool_create(test->name, /* mempool name */
750 opt->pool_sz, /* number of elements*/
751 sizeof(struct perf_elt), /* element size*/
754 perf_elt_init, /* obj constructor */
755 NULL, opt->socket_id, 0); /* flags */
757 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
758 opt->pool_sz, /* number of elements*/
761 RTE_MBUF_DEFAULT_BUF_SIZE,
762 opt->socket_id); /* flags */
766 if (t->pool == NULL) {
767 evt_err("failed to create mempool");
775 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
778 struct test_perf *t = evt_test_priv(test);
780 rte_mempool_free(t->pool);
784 perf_test_setup(struct evt_test *test, struct evt_options *opt)
788 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
789 RTE_CACHE_LINE_SIZE, opt->socket_id);
790 if (test_perf == NULL) {
791 evt_err("failed to allocate test_perf memory");
794 test->test_priv = test_perf;
796 struct test_perf *t = evt_test_priv(test);
798 if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
799 t->outstand_pkts = opt->nb_timers *
800 evt_nr_active_lcores(opt->plcores);
801 t->nb_pkts = opt->nb_timers;
803 t->outstand_pkts = opt->nb_pkts *
804 evt_nr_active_lcores(opt->plcores);
805 t->nb_pkts = opt->nb_pkts;
808 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
810 t->nb_flows = opt->nb_flows;
811 t->result = EVT_TEST_FAILED;
813 memcpy(t->sched_type_list, opt->sched_type_list,
814 sizeof(opt->sched_type_list));
821 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
825 rte_free(test->test_priv);