app/eventdev: modify setup to support ethdev
authorPavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Mon, 11 Dec 2017 15:13:40 +0000 (20:43 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 19 Jan 2018 15:09:56 +0000 (16:09 +0100)
Modify app setup to accommodate event port and queue setup based on the
number of ethernet ports.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
app/test-eventdev/test_perf_atq.c
app/test-eventdev/test_perf_common.c
app/test-eventdev/test_perf_common.h
app/test-eventdev/test_perf_queue.c

index f58eab6..08bf5a9 100644 (file)
@@ -157,10 +157,19 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 {
        int ret;
        uint8_t queue;
+       uint8_t nb_queues;
+       uint8_t nb_ports;
+
+       nb_ports = evt_nr_active_lcores(opt->wlcores);
+       nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+               evt_nr_active_lcores(opt->plcores);
+
+       nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+               rte_eth_dev_count() : atq_nb_event_queues(opt);
 
        const struct rte_event_dev_config config = {
-                       .nb_event_queues = atq_nb_event_queues(opt),
-                       .nb_event_ports = perf_nb_event_ports(opt),
+                       .nb_event_queues = nb_queues,
+                       .nb_event_ports = nb_ports,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = opt->nb_flows,
                        .nb_event_port_dequeue_depth = 128,
@@ -180,7 +189,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                        .nb_atomic_order_sequences = opt->nb_flows,
        };
        /* queue configurations */
-       for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
+       for (queue = 0; queue < nb_queues; queue++) {
                ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
                if (ret) {
                        evt_err("failed to setup queue=%d", queue);
@@ -189,7 +198,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        }
 
        ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
-                                       atq_nb_event_queues(opt));
+                                       nb_queues);
        if (ret)
                return ret;
 
index 373c9ce..a9710b2 100644 (file)
@@ -60,6 +60,17 @@ perf_producer(void *arg)
        return 0;
 }
 
+static int
+perf_producer_wrapper(void *arg)
+{
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       /* Launch the producer function only in case of synthetic producer. */
+       if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
+               return perf_producer(arg);
+       return 0;
+}
+
 static inline uint64_t
 processed_pkts(struct test_perf *t)
 {
@@ -114,8 +125,8 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                if (!(opt->plcores[lcore_id]))
                        continue;
 
-               ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx],
-                                        lcore_id);
+               ret = rte_eal_remote_launch(perf_producer_wrapper,
+                               &t->prod[port_idx], lcore_id);
                if (ret) {
                        evt_err("failed to launch perf_producer %d", lcore_id);
                        return ret;
@@ -165,14 +176,17 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                        fflush(stdout);
 
                        if (remaining <= 0) {
-                               t->done = true;
                                t->result = EVT_TEST_SUCCESS;
-                               rte_smp_wmb();
-                               break;
+                               if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+                                       t->done = true;
+                                       rte_smp_wmb();
+                                       break;
+                               }
                        }
                }
 
-               if (new_cycles - dead_lock_cycles > dead_lock_sample) {
+               if (new_cycles - dead_lock_cycles > dead_lock_sample &&
+                               opt->prod_type == EVT_PROD_TYPE_SYNT) {
                        remaining = t->outstand_pkts - processed_pkts(t);
                        if (dead_lock_remaining == remaining) {
                                rte_event_dev_dump(opt->dev_id, stdout);
index 1103dd1..0877b9b 100644 (file)
@@ -10,6 +10,7 @@
 #include <unistd.h>
 
 #include <rte_cycles.h>
+#include <rte_ethdev.h>
 #include <rte_eventdev.h>
 #include <rte_lcore.h>
 #include <rte_malloc.h>
index bfe01a7..e3bd061 100644 (file)
@@ -154,10 +154,20 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        uint8_t queue;
        int nb_stages = opt->nb_stages;
        int ret;
+       int nb_ports;
+       int nb_queues;
+
+       nb_ports = evt_nr_active_lcores(opt->wlcores);
+       nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+               evt_nr_active_lcores(opt->plcores);
+
+       nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+               rte_eth_dev_count() * nb_stages :
+               perf_queue_nb_event_queues(opt);
 
        const struct rte_event_dev_config config = {
-                       .nb_event_queues = perf_queue_nb_event_queues(opt),
-                       .nb_event_ports = perf_nb_event_ports(opt),
+                       .nb_event_queues = nb_queues,
+                       .nb_event_ports = nb_ports,
                        .nb_events_limit  = 4096,
                        .nb_event_queue_flows = opt->nb_flows,
                        .nb_event_port_dequeue_depth = 128,
@@ -200,7 +210,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        }
 
        ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
-                                       perf_queue_nb_event_queues(opt));
+                                       nb_queues);
        if (ret)
                return ret;