From 3617aae53f92e3fe49d0c730699aa6c5143ae721 Mon Sep 17 00:00:00 2001 From: Pavan Nikhilesh Date: Mon, 11 Dec 2017 20:43:44 +0530 Subject: [PATCH] app/eventdev: add event Rx adapter setup Add functions to setup and configure Rx adapter based on the number of ethdev ports setup. Signed-off-by: Pavan Nikhilesh Acked-by: Jerin Jacob --- app/test-eventdev/test_perf_atq.c | 19 +++-- app/test-eventdev/test_perf_common.c | 110 ++++++++++++++++++++++----- app/test-eventdev/test_perf_common.h | 1 + app/test-eventdev/test_perf_queue.c | 16 +++- 4 files changed, 119 insertions(+), 27 deletions(-) diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c index 4beb8b31b6..4b9b439d7e 100644 --- a/app/test-eventdev/test_perf_atq.c +++ b/app/test-eventdev/test_perf_atq.c @@ -159,6 +159,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) uint8_t queue; uint8_t nb_queues; uint8_t nb_ports; + struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : @@ -167,13 +168,22 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? rte_eth_dev_count() : atq_nb_event_queues(opt); + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } + const struct rte_event_dev_config config = { .nb_event_queues = nb_queues, .nb_event_ports = nb_ports, - .nb_events_limit = 4096, + .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, + .nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); @@ -197,8 +207,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) } } - ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, - nb_queues); + ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues); if (ret) return ret; diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c index 7379539a45..26122a5e8a 100644 --- a/app/test-eventdev/test_perf_common.c +++ b/app/test-eventdev/test_perf_common.c @@ -203,19 +203,78 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt, return 0; } +static int +perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + struct rte_event_port_conf prod_conf) +{ + int ret = 0; + uint16_t prod; + struct rte_event_eth_rx_adapter_queue_conf queue_conf; + + memset(&queue_conf, 0, + sizeof(struct rte_event_eth_rx_adapter_queue_conf)); + queue_conf.ev.sched_type = opt->sched_type_list[0]; + for (prod = 0; prod < rte_eth_dev_count(); prod++) { + uint32_t cap; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, + prod, &cap); + if (ret) { + evt_err("failed to get event rx adapter[%d]" + " capabilities", + opt->dev_id); + return ret; + } + queue_conf.ev.queue_id = prod * stride; + ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, + &prod_conf); + if (ret) { + evt_err("failed to create rx adapter[%d]", prod); + return ret; + } + ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, + &queue_conf); + if (ret) { + evt_err("failed to add rx queues to adapter[%d]", prod); + return ret; + } + + ret = rte_eth_dev_start(prod); + if (ret) { + evt_err("Ethernet dev [%d] failed to start." + " Using synthetic producer", prod); + return ret; + } + + ret = rte_event_eth_rx_adapter_start(prod); + if (ret) { + evt_err("Rx adapter[%d] start failed", prod); + return ret; + } + printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__, + prod, prod); + } + + return ret; +} + int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t stride, uint8_t nb_queues) { struct test_perf *t = evt_test_priv(test); - uint8_t port, prod; + uint16_t port, prod; int ret = -1; + struct rte_event_port_conf port_conf; + + memset(&port_conf, 0, sizeof(struct rte_event_port_conf)); + rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf); /* port configuration */ const struct rte_event_port_conf wkr_p_conf = { .dequeue_depth = opt->wkr_deq_dep, - .enqueue_depth = 64, - .new_event_threshold = 4096, + .enqueue_depth = port_conf.enqueue_depth, + .new_event_threshold = port_conf.new_event_threshold, }; /* setup one port per worker, linking to all queues */ @@ -243,26 +302,38 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, } /* port for producers, no links */ - const struct rte_event_port_conf prod_conf = { - .dequeue_depth = 8, - .enqueue_depth = 32, - .new_event_threshold = 1200, + struct rte_event_port_conf prod_conf = { + .dequeue_depth = port_conf.dequeue_depth, + .enqueue_depth = port_conf.enqueue_depth, + .new_event_threshold = port_conf.new_event_threshold, }; - prod = 0; - for ( ; port < perf_nb_event_ports(opt); port++) { - struct prod_data *p = &t->prod[port]; - - p->dev_id = opt->dev_id; - p->port_id = port; - p->queue_id = prod * stride; - p->t = t; + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + p->t = t; + } - ret = rte_event_port_setup(opt->dev_id, port, &prod_conf); - if (ret) { - evt_err("failed to setup port %d", port); + ret = perf_event_rx_adapter_setup(opt, stride, prod_conf); + if (ret) return ret; + } else { + prod = 0; + for ( ; port < perf_nb_event_ports(opt); port++) { + struct prod_data *p = &t->prod[port]; + + p->dev_id = opt->dev_id; + p->port_id = port; + p->queue_id = prod * stride; + p->t = t; + + ret = rte_event_port_setup(opt->dev_id, port, + &prod_conf); + if (ret) { + evt_err("failed to setup port %d", port); + return ret; + } + prod++; } - prod++; } return ret; @@ -451,6 +522,7 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { for (i = 0; i < rte_eth_dev_count(); i++) { + rte_event_eth_rx_adapter_stop(i); rte_eth_dev_stop(i); rte_eth_dev_close(i); } diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index d81ea9a53d..f8d516ce42 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c index 7aa0aea5a1..f8a5cd0be3 100644 --- a/app/test-eventdev/test_perf_queue.c +++ b/app/test-eventdev/test_perf_queue.c @@ -156,6 +156,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) int ret; int nb_ports; int nb_queues; + struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : @@ -165,13 +166,22 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) rte_eth_dev_count() * nb_stages : perf_queue_nb_event_queues(opt); + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } + const struct rte_event_dev_config config = { .nb_event_queues = nb_queues, .nb_event_ports = nb_ports, - .nb_events_limit = 4096, + .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, + .nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); -- 2.20.1