app/testeventdev: add perf port setup
authorJerin Jacob <jerin.jacob@caviumnetworks.com>
Tue, 4 Jul 2017 04:53:17 +0000 (10:23 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 7 Jul 2017 07:34:39 +0000 (09:34 +0200)
Setup one port per worker and link to all queues and setup
N producer ports to inject the events.

Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
app/test-eventdev/test_perf_common.c
app/test-eventdev/test_perf_common.h

index f889b1a..46dd057 100644 (file)
@@ -41,6 +41,71 @@ perf_test_result(struct evt_test *test, struct evt_options *opt)
        return t->result;
 }
 
+int
+perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+                               uint8_t stride, uint8_t nb_queues)
+{
+       struct test_perf *t = evt_test_priv(test);
+       uint8_t port, prod;
+       int ret = -1;
+
+       /* port configuration */
+       const struct rte_event_port_conf wkr_p_conf = {
+                       .dequeue_depth = opt->wkr_deq_dep,
+                       .enqueue_depth = 64,
+                       .new_event_threshold = 4096,
+       };
+
+       /* setup one port per worker, linking to all queues */
+       for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
+                               port++) {
+               struct worker_data *w = &t->worker[port];
+
+               w->dev_id = opt->dev_id;
+               w->port_id = port;
+               w->t = t;
+               w->processed_pkts = 0;
+               w->latency = 0;
+
+               ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+               if (ret) {
+                       evt_err("failed to setup port %d", port);
+                       return ret;
+               }
+
+               ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+               if (ret != nb_queues) {
+                       evt_err("failed to link all queues to port %d", port);
+                       return -EINVAL;
+               }
+       }
+
+       /* port for producers, no links */
+       const struct rte_event_port_conf prod_conf = {
+                       .dequeue_depth = 8,
+                       .enqueue_depth = 32,
+                       .new_event_threshold = 1200,
+       };
+       prod = 0;
+       for ( ; port < perf_nb_event_ports(opt); port++) {
+               struct prod_data *p = &t->prod[port];
+
+               p->dev_id = opt->dev_id;
+               p->port_id = port;
+               p->queue_id = prod * stride;
+               p->t = t;
+
+               ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
+               if (ret) {
+                       evt_err("failed to setup port %d", port);
+                       return ret;
+               }
+               prod++;
+       }
+
+       return ret;
+}
+
 int
 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
 {
index 5c56766..06e887b 100644 (file)
@@ -97,6 +97,8 @@ int perf_test_result(struct evt_test *test, struct evt_options *opt);
 int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
 int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+                               uint8_t stride, uint8_t nb_queues);
 void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
 void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
 void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);