app/testeventdev: add perf opt dump and check functions
authorJerin Jacob <jerin.jacob@caviumnetworks.com>
Tue, 4 Jul 2017 04:53:16 +0000 (10:23 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 7 Jul 2017 07:34:28 +0000 (09:34 +0200)
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
app/test-eventdev/test_perf_common.c
app/test-eventdev/test_perf_common.h

index a44f2df..f889b1a 100644 (file)
@@ -41,6 +41,115 @@ perf_test_result(struct evt_test *test, struct evt_options *opt)
        return t->result;
 }
 
+int
+perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+       unsigned int lcores;
+       bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
+
+       /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
+       lcores = need_slcore ? 4 : 3;
+
+       if (rte_lcore_count() < lcores) {
+               evt_err("test need minimum %d lcores", lcores);
+               return -1;
+       }
+
+       /* Validate worker lcores */
+       if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+               evt_err("worker lcores overlaps with master lcore");
+               return -1;
+       }
+       if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
+               evt_err("worker lcores overlaps with scheduler lcore");
+               return -1;
+       }
+       if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
+               evt_err("worker lcores overlaps producer lcores");
+               return -1;
+       }
+       if (evt_has_disabled_lcore(opt->wlcores)) {
+               evt_err("one or more workers lcores are not enabled");
+               return -1;
+       }
+       if (!evt_has_active_lcore(opt->wlcores)) {
+               evt_err("minimum one worker is required");
+               return -1;
+       }
+
+       /* Validate producer lcores */
+       if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
+               evt_err("producer lcores overlaps with master lcore");
+               return -1;
+       }
+       if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
+               evt_err("producer lcores overlaps with scheduler lcore");
+               return -1;
+       }
+       if (evt_has_disabled_lcore(opt->plcores)) {
+               evt_err("one or more producer lcores are not enabled");
+               return -1;
+       }
+       if (!evt_has_active_lcore(opt->plcores)) {
+               evt_err("minimum one producer is required");
+               return -1;
+       }
+
+       /* Validate scheduler lcore */
+       if (!evt_has_distributed_sched(opt->dev_id) &&
+                       opt->slcore == (int)rte_get_master_lcore()) {
+               evt_err("scheduler lcore and master lcore should be different");
+               return -1;
+       }
+       if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
+               evt_err("scheduler lcore is not enabled");
+               return -1;
+       }
+
+       if (evt_has_invalid_stage(opt))
+               return -1;
+
+       if (evt_has_invalid_sched_type(opt))
+               return -1;
+
+       if (nb_queues > EVT_MAX_QUEUES) {
+               evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+               return -1;
+       }
+       if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
+               evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+               return -1;
+       }
+
+       /* Fixups */
+       if (opt->nb_stages == 1 && opt->fwd_latency) {
+               evt_info("fwd_latency is valid when nb_stages > 1, disabling");
+               opt->fwd_latency = 0;
+       }
+       if (opt->fwd_latency && !opt->q_priority) {
+               evt_info("enabled queue priority for latency measurement");
+               opt->q_priority = 1;
+       }
+
+       return 0;
+}
+
+void
+perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+       evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
+       evt_dump_producer_lcores(opt);
+       evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+       evt_dump_worker_lcores(opt);
+       if (!evt_has_distributed_sched(opt->dev_id))
+               evt_dump_scheduler_lcore(opt);
+       evt_dump_nb_stages(opt);
+       evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
+       evt_dump("nb_evdev_queues", "%d", nb_queues);
+       evt_dump_queue_priority(opt);
+       evt_dump_sched_type_list(opt);
+}
+
 void
 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
 {
index 442ec99..5c56766 100644 (file)
@@ -86,9 +86,18 @@ struct perf_elt {
        uint64_t timestamp;
 } __rte_cache_aligned;
 
+static inline int
+perf_nb_event_ports(struct evt_options *opt)
+{
+       return evt_nr_active_lcores(opt->wlcores) +
+                       evt_nr_active_lcores(opt->plcores);
+}
+
 int perf_test_result(struct evt_test *test, struct evt_options *opt);
+int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
 int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
+void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
 void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
 void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
 void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);