1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "test_perf_common.h"
8 perf_test_result(struct evt_test *test, struct evt_options *opt)
11 struct test_perf *t = evt_test_priv(test);
17 perf_producer(void *arg)
19 struct prod_data *p = arg;
20 struct test_perf *t = p->t;
21 struct evt_options *opt = t->opt;
22 const uint8_t dev_id = p->dev_id;
23 const uint8_t port = p->port_id;
24 struct rte_mempool *pool = t->pool;
25 const uint64_t nb_pkts = t->nb_pkts;
26 const uint32_t nb_flows = t->nb_flows;
27 uint32_t flow_counter = 0;
32 if (opt->verbose_level > 1)
33 printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
34 rte_lcore_id(), dev_id, port, p->queue_id);
37 ev.op = RTE_EVENT_OP_NEW;
38 ev.queue_id = p->queue_id;
39 ev.sched_type = t->opt->sched_type_list[0];
40 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41 ev.event_type = RTE_EVENT_TYPE_CPU;
42 ev.sub_event_type = 0; /* stage 0 */
44 while (count < nb_pkts && t->done == false) {
45 if (rte_mempool_get(pool, (void **)&m) < 0)
48 ev.flow_id = flow_counter++ % nb_flows;
50 m->timestamp = rte_get_timer_cycles();
51 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
55 m->timestamp = rte_get_timer_cycles();
64 perf_producer_wrapper(void *arg)
66 struct prod_data *p = arg;
67 struct test_perf *t = p->t;
68 /* Launch the producer function only in case of synthetic producer. */
69 if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
70 return perf_producer(arg);
74 static inline uint64_t
75 processed_pkts(struct test_perf *t)
81 for (i = 0; i < t->nb_workers; i++)
82 total += t->worker[i].processed_pkts;
87 static inline uint64_t
88 total_latency(struct test_perf *t)
94 for (i = 0; i < t->nb_workers; i++)
95 total += t->worker[i].latency;
102 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
103 int (*worker)(void *))
106 struct test_perf *t = evt_test_priv(test);
110 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
111 if (!(opt->wlcores[lcore_id]))
114 ret = rte_eal_remote_launch(worker,
115 &t->worker[port_idx], lcore_id);
117 evt_err("failed to launch worker %d", lcore_id);
123 /* launch producers */
124 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
125 if (!(opt->plcores[lcore_id]))
128 ret = rte_eal_remote_launch(perf_producer_wrapper,
129 &t->prod[port_idx], lcore_id);
131 evt_err("failed to launch perf_producer %d", lcore_id);
137 const uint64_t total_pkts = opt->nb_pkts *
138 evt_nr_active_lcores(opt->plcores);
140 uint64_t dead_lock_cycles = rte_get_timer_cycles();
141 int64_t dead_lock_remaining = total_pkts;
142 const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
144 uint64_t perf_cycles = rte_get_timer_cycles();
145 int64_t perf_remaining = total_pkts;
146 const uint64_t perf_sample = rte_get_timer_hz();
148 static float total_mpps;
149 static uint64_t samples;
151 const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
152 int64_t remaining = t->outstand_pkts - processed_pkts(t);
154 while (t->done == false) {
155 const uint64_t new_cycles = rte_get_timer_cycles();
157 if ((new_cycles - perf_cycles) > perf_sample) {
158 const uint64_t latency = total_latency(t);
159 const uint64_t pkts = processed_pkts(t);
161 remaining = t->outstand_pkts - pkts;
162 float mpps = (float)(perf_remaining-remaining)/1000000;
164 perf_remaining = remaining;
165 perf_cycles = new_cycles;
168 if (opt->fwd_latency && pkts > 0) {
169 printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
170 mpps, total_mpps/samples,
171 (float)(latency/pkts)/freq_mhz);
173 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
174 mpps, total_mpps/samples);
178 if (remaining <= 0) {
179 t->result = EVT_TEST_SUCCESS;
180 if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
188 if (new_cycles - dead_lock_cycles > dead_lock_sample &&
189 opt->prod_type == EVT_PROD_TYPE_SYNT) {
190 remaining = t->outstand_pkts - processed_pkts(t);
191 if (dead_lock_remaining == remaining) {
192 rte_event_dev_dump(opt->dev_id, stdout);
193 evt_err("No schedules for seconds, deadlock");
198 dead_lock_remaining = remaining;
199 dead_lock_cycles = new_cycles;
207 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
208 struct rte_event_port_conf prod_conf)
212 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
214 memset(&queue_conf, 0,
215 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
216 queue_conf.ev.sched_type = opt->sched_type_list[0];
217 for (prod = 0; prod < rte_eth_dev_count(); prod++) {
220 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
223 evt_err("failed to get event rx adapter[%d]"
228 queue_conf.ev.queue_id = prod * stride;
229 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
232 evt_err("failed to create rx adapter[%d]", prod);
235 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
238 evt_err("failed to add rx queues to adapter[%d]", prod);
242 ret = rte_eth_dev_start(prod);
244 evt_err("Ethernet dev [%d] failed to start."
245 " Using synthetic producer", prod);
249 ret = rte_event_eth_rx_adapter_start(prod);
251 evt_err("Rx adapter[%d] start failed", prod);
254 printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
262 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
263 uint8_t stride, uint8_t nb_queues)
265 struct test_perf *t = evt_test_priv(test);
268 struct rte_event_port_conf port_conf;
270 memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
271 rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
273 /* port configuration */
274 const struct rte_event_port_conf wkr_p_conf = {
275 .dequeue_depth = opt->wkr_deq_dep,
276 .enqueue_depth = port_conf.enqueue_depth,
277 .new_event_threshold = port_conf.new_event_threshold,
280 /* setup one port per worker, linking to all queues */
281 for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
283 struct worker_data *w = &t->worker[port];
285 w->dev_id = opt->dev_id;
288 w->processed_pkts = 0;
291 ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
293 evt_err("failed to setup port %d", port);
297 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
298 if (ret != nb_queues) {
299 evt_err("failed to link all queues to port %d", port);
304 /* port for producers, no links */
305 struct rte_event_port_conf prod_conf = {
306 .dequeue_depth = port_conf.dequeue_depth,
307 .enqueue_depth = port_conf.enqueue_depth,
308 .new_event_threshold = port_conf.new_event_threshold,
310 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
311 for ( ; port < perf_nb_event_ports(opt); port++) {
312 struct prod_data *p = &t->prod[port];
316 ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
321 for ( ; port < perf_nb_event_ports(opt); port++) {
322 struct prod_data *p = &t->prod[port];
324 p->dev_id = opt->dev_id;
326 p->queue_id = prod * stride;
329 ret = rte_event_port_setup(opt->dev_id, port,
332 evt_err("failed to setup port %d", port);
343 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
347 /* N producer + N worker + 1 master when producer cores are used
348 * Else N worker + 1 master when Rx adapter is used
350 lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
352 if (rte_lcore_count() < lcores) {
353 evt_err("test need minimum %d lcores", lcores);
357 /* Validate worker lcores */
358 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
359 evt_err("worker lcores overlaps with master lcore");
362 if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
363 evt_err("worker lcores overlaps producer lcores");
366 if (evt_has_disabled_lcore(opt->wlcores)) {
367 evt_err("one or more workers lcores are not enabled");
370 if (!evt_has_active_lcore(opt->wlcores)) {
371 evt_err("minimum one worker is required");
375 if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
376 /* Validate producer lcores */
377 if (evt_lcores_has_overlap(opt->plcores,
378 rte_get_master_lcore())) {
379 evt_err("producer lcores overlaps with master lcore");
382 if (evt_has_disabled_lcore(opt->plcores)) {
383 evt_err("one or more producer lcores are not enabled");
386 if (!evt_has_active_lcore(opt->plcores)) {
387 evt_err("minimum one producer is required");
392 if (evt_has_invalid_stage(opt))
395 if (evt_has_invalid_sched_type(opt))
398 if (nb_queues > EVT_MAX_QUEUES) {
399 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
402 if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
403 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
408 if (opt->nb_stages == 1 && opt->fwd_latency) {
409 evt_info("fwd_latency is valid when nb_stages > 1, disabling");
410 opt->fwd_latency = 0;
412 if (opt->fwd_latency && !opt->q_priority) {
413 evt_info("enabled queue priority for latency measurement");
416 if (opt->nb_pkts == 0)
417 opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
423 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
425 evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
426 evt_dump_producer_lcores(opt);
427 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
428 evt_dump_worker_lcores(opt);
429 evt_dump_nb_stages(opt);
430 evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
431 evt_dump("nb_evdev_queues", "%d", nb_queues);
432 evt_dump_queue_priority(opt);
433 evt_dump_sched_type_list(opt);
434 evt_dump_producer_type(opt);
438 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
442 rte_event_dev_stop(opt->dev_id);
443 rte_event_dev_close(opt->dev_id);
447 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
448 void *obj, unsigned i __rte_unused)
450 memset(obj, 0, mp->elt_size);
453 #define NB_RX_DESC 128
454 #define NB_TX_DESC 512
456 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
459 struct test_perf *t = evt_test_priv(test);
460 struct rte_eth_conf port_conf = {
462 .mq_mode = ETH_MQ_RX_RSS,
463 .max_rx_pkt_len = ETHER_MAX_LEN,
476 .rss_hf = ETH_RSS_IP,
481 if (opt->prod_type == EVT_PROD_TYPE_SYNT)
484 if (!rte_eth_dev_count()) {
485 evt_err("No ethernet ports found.");
489 for (i = 0; i < rte_eth_dev_count(); i++) {
491 if (rte_eth_dev_configure(i, 1, 1,
494 evt_err("Failed to configure eth port [%d]", i);
498 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
499 rte_socket_id(), NULL, t->pool) < 0) {
500 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
505 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
506 rte_socket_id(), NULL) < 0) {
507 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
512 rte_eth_promiscuous_enable(i);
518 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
523 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
524 for (i = 0; i < rte_eth_dev_count(); i++) {
525 rte_event_eth_rx_adapter_stop(i);
527 rte_eth_dev_close(i);
533 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
535 struct test_perf *t = evt_test_priv(test);
537 if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
538 t->pool = rte_mempool_create(test->name, /* mempool name */
539 opt->pool_sz, /* number of elements*/
540 sizeof(struct perf_elt), /* element size*/
543 perf_elt_init, /* obj constructor */
544 NULL, opt->socket_id, 0); /* flags */
546 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
547 opt->pool_sz, /* number of elements*/
550 RTE_MBUF_DEFAULT_BUF_SIZE,
551 opt->socket_id); /* flags */
555 if (t->pool == NULL) {
556 evt_err("failed to create mempool");
564 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
567 struct test_perf *t = evt_test_priv(test);
569 rte_mempool_free(t->pool);
573 perf_test_setup(struct evt_test *test, struct evt_options *opt)
577 test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
578 RTE_CACHE_LINE_SIZE, opt->socket_id);
579 if (test_perf == NULL) {
580 evt_err("failed to allocate test_perf memory");
583 test->test_priv = test_perf;
585 struct test_perf *t = evt_test_priv(test);
587 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
588 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
590 t->nb_pkts = opt->nb_pkts;
591 t->nb_flows = opt->nb_flows;
592 t->result = EVT_TEST_FAILED;
594 memcpy(t->sched_type_list, opt->sched_type_list,
595 sizeof(opt->sched_type_list));
602 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
606 rte_free(test->test_priv);