mempool: fix slow allocation of large mempools
[dpdk.git] / app / test-eventdev / test_perf_atq.c
index 5a48ffd..d0241ec 100644 (file)
@@ -4,13 +4,14 @@
 
 #include "test_perf_common.h"
 
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
 
 static inline int
 atq_nb_event_queues(struct evt_options *opt)
 {
        /* nb_queues = number of producers */
-       return evt_nr_active_lcores(opt->plcores);
+       return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+               rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
 }
 
 static inline __attribute__((always_inline)) void
@@ -42,15 +43,12 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
        while (t->done == false) {
                uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
 
-               if (enable_fwd_latency)
-                       rte_prefetch0(ev.event_ptr);
-
                if (!event) {
                        rte_pause();
                        continue;
                }
 
-               if (enable_fwd_latency)
+               if (enable_fwd_latency && !prod_timer_type)
                /* first stage in pipeline, mark ts to compute fwd latency */
                        atq_mark_fwd_latency(&ev);
 
@@ -89,7 +87,7 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
                }
 
                for (i = 0; i < nb_rx; i++) {
-                       if (enable_fwd_latency) {
+                       if (enable_fwd_latency && !prod_timer_type) {
                                rte_prefetch0(ev[i+1].event_ptr);
                                /* first stage in pipeline.
                                 * mark time stamp to compute fwd latency
@@ -159,14 +157,16 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        uint8_t queue;
        uint8_t nb_queues;
        uint8_t nb_ports;
+       uint16_t prod;
        struct rte_event_dev_info dev_info;
+       struct test_perf *t = evt_test_priv(test);
 
        nb_ports = evt_nr_active_lcores(opt->wlcores);
-       nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+       nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+                       opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
                evt_nr_active_lcores(opt->plcores);
 
-       nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
-               rte_eth_dev_count() : atq_nb_event_queues(opt);
+       nb_queues = atq_nb_event_queues(opt);
 
        memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
        ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
@@ -175,18 +175,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                return ret;
        }
 
-       const struct rte_event_dev_config config = {
-                       .nb_event_queues = nb_queues,
-                       .nb_event_ports = nb_ports,
-                       .nb_events_limit  = dev_info.max_num_events,
-                       .nb_event_queue_flows = opt->nb_flows,
-                       .nb_event_port_dequeue_depth =
-                               dev_info.max_event_port_dequeue_depth,
-                       .nb_event_port_enqueue_depth =
-                               dev_info.max_event_port_enqueue_depth,
-       };
-
-       ret = rte_event_dev_configure(opt->dev_id, &config);
+       ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
        if (ret) {
                evt_err("failed to configure eventdev %d", opt->dev_id);
                return ret;
@@ -207,7 +196,18 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
-       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
+       if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+       /* port configuration */
+       const struct rte_event_port_conf p_conf = {
+                       .dequeue_depth = opt->wkr_deq_dep,
+                       .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+                       .new_event_threshold = dev_info.max_num_events,
+       };
+
+       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+                       &p_conf);
        if (ret)
                return ret;
 
@@ -227,6 +227,35 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                return ret;
        }
 
+       if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+               RTE_ETH_FOREACH_DEV(prod) {
+                       ret = rte_eth_dev_start(prod);
+                       if (ret) {
+                               evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
+                                               prod);
+                               return ret;
+                       }
+
+                       ret = rte_event_eth_rx_adapter_start(prod);
+                       if (ret) {
+                               evt_err("Rx adapter[%d] start failed", prod);
+                               return ret;
+                       }
+                       printf("%s: Port[%d] using Rx adapter[%d] started\n",
+                                       __func__, prod, prod);
+               }
+       } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
+                       ret = rte_event_timer_adapter_start(
+                                       t->timer_adptr[prod]);
+                       if (ret) {
+                               evt_err("failed to Start event timer adapter %d"
+                                               , prod);
+                               return ret;
+                       }
+               }
+       }
+
        return 0;
 }