]> git.droids-corp.org - dpdk.git/commitdiff
app/eventdev: fix port dequeue depth configuration
authorPavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Wed, 24 Jan 2018 09:30:33 +0000 (15:00 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Wed, 31 Jan 2018 06:18:57 +0000 (07:18 +0100)
The port dequeue depth value has to be compared against the maximum
allowed dequeue depth reported by the event drivers.

Fixes: 3617aae53f92 ("app/eventdev: add event Rx adapter setup")
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
app/test-eventdev/test_perf_atq.c
app/test-eventdev/test_perf_common.c
app/test-eventdev/test_perf_common.h
app/test-eventdev/test_perf_queue.c
app/test-eventdev/test_pipeline_atq.c
app/test-eventdev/test_pipeline_queue.c

index d07a05425475ac3d5cbb271131f0dd71e64af30c..b36b22a77515ba46000b168924ca021d19f3d5a2 100644 (file)
@@ -207,7 +207,18 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
-       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
+       if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+       /* port configuration */
+       const struct rte_event_port_conf p_conf = {
+                       .dequeue_depth = opt->wkr_deq_dep,
+                       .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+                       .new_event_threshold = dev_info.max_num_events,
+       };
+
+       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+                       &p_conf);
        if (ret)
                return ret;
 
index e279d81a5f5bbd87e33646c07dabe18858387feb..59fa0a49ee7600180be5c184e4d87d195dc64bd8 100644 (file)
@@ -285,22 +285,12 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 
 int
 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
-                               uint8_t stride, uint8_t nb_queues)
+                               uint8_t stride, uint8_t nb_queues,
+                               const struct rte_event_port_conf *port_conf)
 {
        struct test_perf *t = evt_test_priv(test);
        uint16_t port, prod;
        int ret = -1;
-       struct rte_event_port_conf port_conf;
-
-       memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
-       rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
-
-       /* port configuration */
-       const struct rte_event_port_conf wkr_p_conf = {
-                       .dequeue_depth = opt->wkr_deq_dep,
-                       .enqueue_depth = port_conf.enqueue_depth,
-                       .new_event_threshold = port_conf.new_event_threshold,
-       };
 
        /* setup one port per worker, linking to all queues */
        for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
@@ -313,7 +303,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
                w->processed_pkts = 0;
                w->latency = 0;
 
-               ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+               ret = rte_event_port_setup(opt->dev_id, port, port_conf);
                if (ret) {
                        evt_err("failed to setup port %d", port);
                        return ret;
@@ -327,18 +317,13 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
        }
 
        /* port for producers, no links */
-       struct rte_event_port_conf prod_conf = {
-                       .dequeue_depth = port_conf.dequeue_depth,
-                       .enqueue_depth = port_conf.enqueue_depth,
-                       .new_event_threshold = port_conf.new_event_threshold,
-       };
        if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
                for ( ; port < perf_nb_event_ports(opt); port++) {
                        struct prod_data *p = &t->prod[port];
                        p->t = t;
                }
 
-               ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
+               ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
                if (ret)
                        return ret;
        } else {
@@ -352,7 +337,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
                        p->t = t;
 
                        ret = rte_event_port_setup(opt->dev_id, port,
-                                       &prod_conf);
+                                       port_conf);
                        if (ret) {
                                evt_err("failed to setup port %d", port);
                                return ret;
index f8d516ce420fac05f3f667c00b267d5dea9b59f0..9ad99733bf85e043e46d8f1404faf9d4fb2c79e5 100644 (file)
@@ -133,7 +133,8 @@ int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
 int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
-                               uint8_t stride, uint8_t nb_queues);
+                               uint8_t stride, uint8_t nb_queues,
+                               const struct rte_event_port_conf *port_conf);
 int perf_event_dev_service_setup(uint8_t dev_id);
 int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                int (*worker)(void *));
index d5b8908761d00e8f8087653161c69bf316cd42c3..db8f2f3e58ce5a1897e21b708235cec4e8119ac1 100644 (file)
@@ -219,8 +219,18 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
+       if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+       /* port configuration */
+       const struct rte_event_port_conf p_conf = {
+                       .dequeue_depth = opt->wkr_deq_dep,
+                       .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+                       .new_event_threshold = dev_info.max_num_events,
+       };
+
        ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
-                                       nb_queues);
+                                       nb_queues, &p_conf);
        if (ret)
                return ret;
 
index 36abbddda3afe6bf53d887fc1f6050d8e3f45c81..dd7189776307da56449c7f9cef29b4fd1ccf340c 100644 (file)
@@ -378,6 +378,9 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
+       if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
        /* port configuration */
        const struct rte_event_port_conf p_conf = {
                        .dequeue_depth = opt->wkr_deq_dep,
index 835fe0782f8ba08dd6c10a409f133d3d2981d097..02fc27cf87e51bc7b521f3df507a4f0589b91776 100644 (file)
@@ -397,6 +397,9 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
+       if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
        /* port configuration */
        const struct rte_event_port_conf p_conf = {
                        .dequeue_depth = opt->wkr_deq_dep,