X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_perf_queue.c;h=db8f2f3e58ce5a1897e21b708235cec4e8119ac1;hb=f05e26051c15dd208277c7e27c77f67720d5d4b1;hp=7aa0aea5a103607681dea91c3f68793d18cf493c;hpb=7f3daf34269e42ec31bf330fa6f869b05a9fec2f;p=dpdk.git diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c index 7aa0aea5a1..db8f2f3e58 100644 --- a/app/test-eventdev/test_perf_queue.c +++ b/app/test-eventdev/test_perf_queue.c @@ -10,7 +10,9 @@ static inline int perf_queue_nb_event_queues(struct evt_options *opt) { /* nb_queues = number of producers * number of stages */ - return evt_nr_active_lcores(opt->plcores) * opt->nb_stages; + uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? + rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores); + return nb_prod * opt->nb_stages; } static inline __attribute__((always_inline)) void @@ -156,22 +158,30 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) int ret; int nb_ports; int nb_queues; + struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 : evt_nr_active_lcores(opt->plcores); - nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? - rte_eth_dev_count() * nb_stages : - perf_queue_nb_event_queues(opt); + nb_queues = perf_queue_nb_event_queues(opt); + + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } const struct rte_event_dev_config config = { .nb_event_queues = nb_queues, .nb_event_ports = nb_ports, - .nb_events_limit = 4096, + .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, + .nb_event_port_dequeue_depth = + dev_info.max_event_port_dequeue_depth, + .nb_event_port_enqueue_depth = + dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); @@ -186,7 +196,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) .nb_atomic_order_sequences = opt->nb_flows, }; /* queue configurations */ - for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) { + for (queue = 0; queue < nb_queues; queue++) { q_conf.schedule_type = (opt->sched_type_list[queue % nb_stages]); @@ -209,15 +219,29 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) } } + if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; + + /* port configuration */ + const struct rte_event_port_conf p_conf = { + .dequeue_depth = opt->wkr_deq_dep, + .enqueue_depth = dev_info.max_event_port_dequeue_depth, + .new_event_threshold = dev_info.max_num_events, + }; + ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */, - nb_queues); + nb_queues, &p_conf); if (ret) return ret; - ret = evt_service_setup(opt->dev_id); - if (ret) { - evt_err("No service lcore found to run event dev."); - return ret; + if (!evt_has_distributed_sched(opt->dev_id)) { + uint32_t service_id; + rte_event_dev_service_id_get(opt->dev_id, &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("No service lcore found to run event dev."); + return ret; + } } ret = rte_event_dev_start(opt->dev_id);