net: add rte prefix to ether defines
[dpdk.git] / app / test-eventdev / test_perf_common.c
index b6cc00e..01f7828 100644 (file)
@@ -87,21 +87,21 @@ perf_event_timer_producer(void *arg)
        struct rte_mempool *pool = t->pool;
        struct perf_elt *m;
        struct rte_event_timer_adapter **adptr = t->timer_adptr;
+       struct rte_event_timer tim;
        uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
 
+       memset(&tim, 0, sizeof(struct rte_event_timer));
        timeout_ticks = opt->optm_timer_tick_nsec ?
                        (timeout_ticks * opt->timer_tick_nsec)
                        / opt->optm_timer_tick_nsec : timeout_ticks;
        timeout_ticks += timeout_ticks ? 0 : 1;
-       const struct rte_event_timer tim = {
-               .ev.op = RTE_EVENT_OP_NEW,
-               .ev.queue_id = p->queue_id,
-               .ev.sched_type = t->opt->sched_type_list[0],
-               .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
-               .ev.event_type =  RTE_EVENT_TYPE_TIMER,
-               .state = RTE_EVENT_TIMER_NOT_ARMED,
-               .timeout_ticks = timeout_ticks,
-       };
+       tim.ev.event_type =  RTE_EVENT_TYPE_TIMER;
+       tim.ev.op = RTE_EVENT_OP_NEW;
+       tim.ev.sched_type = t->opt->sched_type_list[0];
+       tim.ev.queue_id = p->queue_id;
+       tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+       tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+       tim.timeout_ticks = timeout_ticks;
 
        if (opt->verbose_level > 1)
                printf("%s(): lcore %d\n", __func__, rte_lcore_id());
@@ -133,6 +133,67 @@ perf_event_timer_producer(void *arg)
        return 0;
 }
 
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+       int i;
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       struct evt_options *opt = t->opt;
+       uint32_t flow_counter = 0;
+       uint64_t count = 0;
+       uint64_t arm_latency = 0;
+       const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+       const uint32_t nb_flows = t->nb_flows;
+       const uint64_t nb_timers = opt->nb_timers;
+       struct rte_mempool *pool = t->pool;
+       struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+       struct rte_event_timer_adapter **adptr = t->timer_adptr;
+       struct rte_event_timer tim;
+       uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+       memset(&tim, 0, sizeof(struct rte_event_timer));
+       timeout_ticks = opt->optm_timer_tick_nsec ?
+                       (timeout_ticks * opt->timer_tick_nsec)
+                       / opt->optm_timer_tick_nsec : timeout_ticks;
+       timeout_ticks += timeout_ticks ? 0 : 1;
+       tim.ev.event_type =  RTE_EVENT_TYPE_TIMER;
+       tim.ev.op = RTE_EVENT_OP_NEW;
+       tim.ev.sched_type = t->opt->sched_type_list[0];
+       tim.ev.queue_id = p->queue_id;
+       tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+       tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+       tim.timeout_ticks = timeout_ticks;
+
+       if (opt->verbose_level > 1)
+               printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+       while (count < nb_timers && t->done == false) {
+               if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+                       continue;
+               for (i = 0; i < BURST_SIZE; i++) {
+                       rte_prefetch0(m[i + 1]);
+                       m[i]->tim = tim;
+                       m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+                       m[i]->tim.ev.event_ptr = m[i];
+                       m[i]->timestamp = rte_get_timer_cycles();
+               }
+               rte_event_timer_arm_tmo_tick_burst(
+                               adptr[flow_counter % nb_timer_adptrs],
+                               (struct rte_event_timer **)m,
+                               tim.timeout_ticks,
+                               BURST_SIZE);
+               arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+               count += BURST_SIZE;
+       }
+       fflush(stdout);
+       rte_delay_ms(1000);
+       printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+                       __func__, rte_lcore_id(), (float)(arm_latency / count) /
+                       (rte_get_timer_hz() / 1000000));
+       return 0;
+}
+
 static int
 perf_producer_wrapper(void *arg)
 {
@@ -141,8 +202,12 @@ perf_producer_wrapper(void *arg)
        /* Launch the producer function only in case of synthetic producer. */
        if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
                return perf_producer(arg);
-       else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+       else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+                       !t->opt->timdev_use_burst)
                return perf_event_timer_producer(arg);
+       else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+                       t->opt->timdev_use_burst)
+               return perf_event_timer_producer_burst(arg);
        return 0;
 }
 
@@ -262,7 +327,8 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                }
 
                if (new_cycles - dead_lock_cycles > dead_lock_sample &&
-                               opt->prod_type == EVT_PROD_TYPE_SYNT) {
+                   (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
                        remaining = t->outstand_pkts - processed_pkts(t);
                        if (dead_lock_remaining == remaining) {
                                rte_event_dev_dump(opt->dev_id, stdout);
@@ -290,7 +356,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
        memset(&queue_conf, 0,
                        sizeof(struct rte_event_eth_rx_adapter_queue_conf));
        queue_conf.ev.sched_type = opt->sched_type_list[0];
-       for (prod = 0; prod < rte_eth_dev_count(); prod++) {
+       RTE_ETH_FOREACH_DEV(prod) {
                uint32_t cap;
 
                ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
@@ -327,21 +393,6 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
                                return ret;
                        }
                }
-
-               ret = rte_eth_dev_start(prod);
-               if (ret) {
-                       evt_err("Ethernet dev [%d] failed to start."
-                                       " Using synthetic producer", prod);
-                       return ret;
-               }
-
-               ret = rte_event_eth_rx_adapter_start(prod);
-               if (ret) {
-                       evt_err("Rx adapter[%d] start failed", prod);
-                       return ret;
-               }
-               printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
-                               prod, prod);
        }
 
        return ret;
@@ -366,7 +417,7 @@ perf_event_timer_adapter_setup(struct test_perf *t)
                        .timer_adapter_id = i,
                        .timer_tick_ns = t->opt->timer_tick_nsec,
                        .max_tmo_ns = t->opt->max_tmo_nsec,
-                       .nb_timers = 2 * 1024 * 1024,
+                       .nb_timers = t->opt->pool_sz,
                        .flags = flags,
                };
 
@@ -395,12 +446,6 @@ perf_event_timer_adapter_setup(struct test_perf *t)
                        }
                        rte_service_runstate_set(service_id, 1);
                }
-
-               ret = rte_event_timer_adapter_start(wl);
-               if (ret) {
-                       evt_err("failed to Start event timer adapter %d", i);
-                       return ret;
-               }
                t->timer_adptr[i] = wl;
        }
        return 0;
@@ -608,20 +653,13 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
 int
 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 {
-       int i;
+       uint16_t i;
        struct test_perf *t = evt_test_priv(test);
        struct rte_eth_conf port_conf = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = ETHER_MAX_LEN,
+                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                        .split_hdr_size = 0,
-                       .header_split   = 0,
-                       .hw_ip_checksum = 0,
-                       .hw_vlan_filter = 0,
-                       .hw_vlan_strip  = 0,
-                       .hw_vlan_extend = 0,
-                       .jumbo_frame    = 0,
-                       .hw_strip_crc   = 1,
                },
                .rx_adv_conf = {
                        .rss_conf = {
@@ -635,16 +673,29 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
                        opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
                return 0;
 
-       if (!rte_eth_dev_count()) {
+       if (!rte_eth_dev_count_avail()) {
                evt_err("No ethernet ports found.");
                return -ENODEV;
        }
 
-       for (i = 0; i < rte_eth_dev_count(); i++) {
+       RTE_ETH_FOREACH_DEV(i) {
+               struct rte_eth_dev_info dev_info;
+               struct rte_eth_conf local_port_conf = port_conf;
+
+               rte_eth_dev_info_get(i, &dev_info);
+
+               local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+                       dev_info.flow_type_rss_offloads;
+               if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+                               port_conf.rx_adv_conf.rss_conf.rss_hf) {
+                       evt_info("Port %u modified RSS hash function based on hardware support,"
+                               "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+                               i,
+                               port_conf.rx_adv_conf.rss_conf.rss_hf,
+                               local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+               }
 
-               if (rte_eth_dev_configure(i, 1, 1,
-                                       &port_conf)
-                               < 0) {
+               if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
                        evt_err("Failed to configure eth port [%d]", i);
                        return -EINVAL;
                }
@@ -671,14 +722,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 
 void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
 {
-       int i;
+       uint16_t i;
        RTE_SET_USED(test);
 
        if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
-               for (i = 0; i < rte_eth_dev_count(); i++) {
+               RTE_ETH_FOREACH_DEV(i) {
                        rte_event_eth_rx_adapter_stop(i);
                        rte_eth_dev_stop(i);
-                       rte_eth_dev_close(i);
                }
        }
 }