ethdev: add namespace
[dpdk.git] / app / test-eventdev / test_perf_common.c
index 7b09299..9b73874 100644 (file)
@@ -1,49 +1,36 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc 2017.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
  */
 
+#include <math.h>
+
 #include "test_perf_common.h"
 
 int
 perf_test_result(struct evt_test *test, struct evt_options *opt)
 {
        RTE_SET_USED(opt);
+       int i;
+       uint64_t total = 0;
        struct test_perf *t = evt_test_priv(test);
 
+       printf("Packet distribution across worker cores :\n");
+       for (i = 0; i < t->nb_workers; i++)
+               total += t->worker[i].processed_pkts;
+       for (i = 0; i < t->nb_workers; i++)
+               printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
+                               CLGRN" %3.2f"CLNRM"\n", i,
+                               t->worker[i].processed_pkts,
+                               (((double)t->worker[i].processed_pkts)/total)
+                               * 100);
+
        return t->result;
 }
 
 static inline int
 perf_producer(void *arg)
 {
+       int i;
        struct prod_data *p  = arg;
        struct test_perf *t = p->t;
        struct evt_options *opt = t->opt;
@@ -54,7 +41,7 @@ perf_producer(void *arg)
        const uint32_t nb_flows = t->nb_flows;
        uint32_t flow_counter = 0;
        uint64_t count = 0;
-       struct perf_elt *m;
+       struct perf_elt *m[BURST_SIZE + 1] = {NULL};
        struct rte_event ev;
 
        if (opt->verbose_level > 1)
@@ -70,33 +57,247 @@ perf_producer(void *arg)
        ev.sub_event_type = 0; /* stage 0 */
 
        while (count < nb_pkts && t->done == false) {
-               if (rte_mempool_get(pool, (void **)&m) < 0)
+               if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
                        continue;
+               for (i = 0; i < BURST_SIZE; i++) {
+                       ev.flow_id = flow_counter++ % nb_flows;
+                       ev.event_ptr = m[i];
+                       m[i]->timestamp = rte_get_timer_cycles();
+                       while (rte_event_enqueue_burst(dev_id,
+                                                      port, &ev, 1) != 1) {
+                               if (t->done)
+                                       break;
+                               rte_pause();
+                               m[i]->timestamp = rte_get_timer_cycles();
+                       }
+               }
+               count += BURST_SIZE;
+       }
+
+       return 0;
+}
 
-               ev.flow_id = flow_counter++ % nb_flows;
-               ev.event_ptr = m;
-               m->timestamp = rte_get_timer_cycles();
-               while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+static inline int
+perf_producer_burst(void *arg)
+{
+       uint32_t i;
+       uint64_t timestamp;
+       struct rte_event_dev_info dev_info;
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       struct evt_options *opt = t->opt;
+       const uint8_t dev_id = p->dev_id;
+       const uint8_t port = p->port_id;
+       struct rte_mempool *pool = t->pool;
+       const uint64_t nb_pkts = t->nb_pkts;
+       const uint32_t nb_flows = t->nb_flows;
+       uint32_t flow_counter = 0;
+       uint16_t enq = 0;
+       uint64_t count = 0;
+       struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
+       struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
+       uint32_t burst_size = opt->prod_enq_burst_sz;
+
+       memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
+       rte_event_dev_info_get(dev_id, &dev_info);
+       if (dev_info.max_event_port_enqueue_depth < burst_size)
+               burst_size = dev_info.max_event_port_enqueue_depth;
+
+       if (opt->verbose_level > 1)
+               printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+                               rte_lcore_id(), dev_id, port, p->queue_id);
+
+       for (i = 0; i < burst_size; i++) {
+               ev[i].op = RTE_EVENT_OP_NEW;
+               ev[i].queue_id = p->queue_id;
+               ev[i].sched_type = t->opt->sched_type_list[0];
+               ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+               ev[i].event_type =  RTE_EVENT_TYPE_CPU;
+               ev[i].sub_event_type = 0; /* stage 0 */
+       }
+
+       while (count < nb_pkts && t->done == false) {
+               if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
+                       continue;
+               timestamp = rte_get_timer_cycles();
+               for (i = 0; i < burst_size; i++) {
+                       ev[i].flow_id = flow_counter++ % nb_flows;
+                       ev[i].event_ptr = m[i];
+                       m[i]->timestamp = timestamp;
+               }
+               enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
+               while (enq < burst_size) {
+                       enq += rte_event_enqueue_burst(dev_id, port,
+                                                       ev + enq,
+                                                       burst_size - enq);
                        if (t->done)
                                break;
                        rte_pause();
-                       m->timestamp = rte_get_timer_cycles();
+                       timestamp = rte_get_timer_cycles();
+                       for (i = enq; i < burst_size; i++)
+                               m[i]->timestamp = timestamp;
                }
-               count++;
+               count += burst_size;
        }
+       return 0;
+}
+
+static inline int
+perf_event_timer_producer(void *arg)
+{
+       int i;
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       struct evt_options *opt = t->opt;
+       uint32_t flow_counter = 0;
+       uint64_t count = 0;
+       uint64_t arm_latency = 0;
+       const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+       const uint32_t nb_flows = t->nb_flows;
+       const uint64_t nb_timers = opt->nb_timers;
+       struct rte_mempool *pool = t->pool;
+       struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+       struct rte_event_timer_adapter **adptr = t->timer_adptr;
+       struct rte_event_timer tim;
+       uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+       memset(&tim, 0, sizeof(struct rte_event_timer));
+       timeout_ticks =
+               opt->optm_timer_tick_nsec
+                       ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+                              opt->optm_timer_tick_nsec)
+                       : timeout_ticks;
+       timeout_ticks += timeout_ticks ? 0 : 1;
+       tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+       tim.ev.op = RTE_EVENT_OP_NEW;
+       tim.ev.sched_type = t->opt->sched_type_list[0];
+       tim.ev.queue_id = p->queue_id;
+       tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+       tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+       tim.timeout_ticks = timeout_ticks;
 
+       if (opt->verbose_level > 1)
+               printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+       while (count < nb_timers && t->done == false) {
+               if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+                       continue;
+               for (i = 0; i < BURST_SIZE; i++) {
+                       rte_prefetch0(m[i + 1]);
+                       m[i]->tim = tim;
+                       m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+                       m[i]->tim.ev.event_ptr = m[i];
+                       m[i]->timestamp = rte_get_timer_cycles();
+                       while (rte_event_timer_arm_burst(
+                              adptr[flow_counter % nb_timer_adptrs],
+                              (struct rte_event_timer **)&m[i], 1) != 1) {
+                               if (t->done)
+                                       break;
+                               m[i]->timestamp = rte_get_timer_cycles();
+                       }
+                       arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
+               }
+               count += BURST_SIZE;
+       }
+       fflush(stdout);
+       rte_delay_ms(1000);
+       printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+                       __func__, rte_lcore_id(),
+                       count ? (float)(arm_latency / count) /
+                       (rte_get_timer_hz() / 1000000) : 0);
        return 0;
 }
 
 static inline int
-scheduler(void *arg)
+perf_event_timer_producer_burst(void *arg)
 {
-       struct test_perf *t = arg;
-       const uint8_t dev_id = t->opt->dev_id;
+       int i;
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       struct evt_options *opt = t->opt;
+       uint32_t flow_counter = 0;
+       uint64_t count = 0;
+       uint64_t arm_latency = 0;
+       const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+       const uint32_t nb_flows = t->nb_flows;
+       const uint64_t nb_timers = opt->nb_timers;
+       struct rte_mempool *pool = t->pool;
+       struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+       struct rte_event_timer_adapter **adptr = t->timer_adptr;
+       struct rte_event_timer tim;
+       uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+       memset(&tim, 0, sizeof(struct rte_event_timer));
+       timeout_ticks =
+               opt->optm_timer_tick_nsec
+                       ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
+                              opt->optm_timer_tick_nsec)
+                       : timeout_ticks;
+       timeout_ticks += timeout_ticks ? 0 : 1;
+       tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+       tim.ev.op = RTE_EVENT_OP_NEW;
+       tim.ev.sched_type = t->opt->sched_type_list[0];
+       tim.ev.queue_id = p->queue_id;
+       tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+       tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+       tim.timeout_ticks = timeout_ticks;
 
-       while (t->done == false)
-               rte_event_schedule(dev_id);
+       if (opt->verbose_level > 1)
+               printf("%s(): lcore %d\n", __func__, rte_lcore_id());
 
+       while (count < nb_timers && t->done == false) {
+               if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+                       continue;
+               for (i = 0; i < BURST_SIZE; i++) {
+                       rte_prefetch0(m[i + 1]);
+                       m[i]->tim = tim;
+                       m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+                       m[i]->tim.ev.event_ptr = m[i];
+                       m[i]->timestamp = rte_get_timer_cycles();
+               }
+               rte_event_timer_arm_tmo_tick_burst(
+                               adptr[flow_counter % nb_timer_adptrs],
+                               (struct rte_event_timer **)m,
+                               tim.timeout_ticks,
+                               BURST_SIZE);
+               arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+               count += BURST_SIZE;
+       }
+       fflush(stdout);
+       rte_delay_ms(1000);
+       printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+                       __func__, rte_lcore_id(),
+                       count ? (float)(arm_latency / count) /
+                       (rte_get_timer_hz() / 1000000) : 0);
+       return 0;
+}
+
+static int
+perf_producer_wrapper(void *arg)
+{
+       struct prod_data *p  = arg;
+       struct test_perf *t = p->t;
+       bool burst = evt_has_burst_mode(p->dev_id);
+
+       /* In case of synthetic producer, launch perf_producer or
+        * perf_producer_burst depending on producer enqueue burst size
+        */
+       if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+                       t->opt->prod_enq_burst_sz == 1)
+               return perf_producer(arg);
+       else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
+                       t->opt->prod_enq_burst_sz > 1) {
+               if (!burst)
+                       evt_err("This event device does not support burst mode");
+               else
+                       return perf_producer_burst(arg);
+       }
+       else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+                       !t->opt->timdev_use_burst)
+               return perf_event_timer_producer(arg);
+       else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+                       t->opt->timdev_use_burst)
+               return perf_event_timer_producer_burst(arg);
        return 0;
 }
 
@@ -106,7 +307,6 @@ processed_pkts(struct test_perf *t)
        uint8_t i;
        uint64_t total = 0;
 
-       rte_smp_rmb();
        for (i = 0; i < t->nb_workers; i++)
                total += t->worker[i].processed_pkts;
 
@@ -119,7 +319,6 @@ total_latency(struct test_perf *t)
        uint8_t i;
        uint64_t total = 0;
 
-       rte_smp_rmb();
        for (i = 0; i < t->nb_workers; i++)
                total += t->worker[i].latency;
 
@@ -136,7 +335,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
 
        int port_idx = 0;
        /* launch workers */
-       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+       RTE_LCORE_FOREACH_WORKER(lcore_id) {
                if (!(opt->wlcores[lcore_id]))
                        continue;
 
@@ -150,12 +349,12 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
        }
 
        /* launch producers */
-       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+       RTE_LCORE_FOREACH_WORKER(lcore_id) {
                if (!(opt->plcores[lcore_id]))
                        continue;
 
-               ret = rte_eal_remote_launch(perf_producer, &t->prod[port_idx],
-                                        lcore_id);
+               ret = rte_eal_remote_launch(perf_producer_wrapper,
+                               &t->prod[port_idx], lcore_id);
                if (ret) {
                        evt_err("failed to launch perf_producer %d", lcore_id);
                        return ret;
@@ -163,17 +362,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                port_idx++;
        }
 
-       /* launch scheduler */
-       if (!evt_has_distributed_sched(opt->dev_id)) {
-               ret = rte_eal_remote_launch(scheduler, t, opt->slcore);
-               if (ret) {
-                       evt_err("failed to launch sched %d", opt->slcore);
-                       return ret;
-               }
-       }
-
-       const uint64_t total_pkts = opt->nb_pkts *
-                       evt_nr_active_lcores(opt->plcores);
+       const uint64_t total_pkts = t->outstand_pkts;
 
        uint64_t dead_lock_cycles = rte_get_timer_cycles();
        int64_t dead_lock_remaining  =  total_pkts;
@@ -214,20 +403,24 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
                        fflush(stdout);
 
                        if (remaining <= 0) {
-                               t->done = true;
                                t->result = EVT_TEST_SUCCESS;
-                               rte_smp_wmb();
-                               break;
+                               if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                                       opt->prod_type ==
+                                       EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+                                       t->done = true;
+                                       break;
+                               }
                        }
                }
 
-               if (new_cycles - dead_lock_cycles > dead_lock_sample) {
+               if (new_cycles - dead_lock_cycles > dead_lock_sample &&
+                   (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
                        remaining = t->outstand_pkts - processed_pkts(t);
                        if (dead_lock_remaining == remaining) {
                                rte_event_dev_dump(opt->dev_id, stdout);
                                evt_err("No schedules for seconds, deadlock");
                                t->done = true;
-                               rte_smp_wmb();
                                break;
                        }
                        dead_lock_remaining = remaining;
@@ -238,21 +431,121 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
        return 0;
 }
 
+static int
+perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+               struct rte_event_port_conf prod_conf)
+{
+       int ret = 0;
+       uint16_t prod;
+       struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+       memset(&queue_conf, 0,
+                       sizeof(struct rte_event_eth_rx_adapter_queue_conf));
+       queue_conf.ev.sched_type = opt->sched_type_list[0];
+       RTE_ETH_FOREACH_DEV(prod) {
+               uint32_t cap;
+
+               ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
+                               prod, &cap);
+               if (ret) {
+                       evt_err("failed to get event rx adapter[%d]"
+                                       " capabilities",
+                                       opt->dev_id);
+                       return ret;
+               }
+               queue_conf.ev.queue_id = prod * stride;
+               ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
+                               &prod_conf);
+               if (ret) {
+                       evt_err("failed to create rx adapter[%d]", prod);
+                       return ret;
+               }
+               ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
+                               &queue_conf);
+               if (ret) {
+                       evt_err("failed to add rx queues to adapter[%d]", prod);
+                       return ret;
+               }
+
+               if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+                       uint32_t service_id;
+
+                       rte_event_eth_rx_adapter_service_id_get(prod,
+                                       &service_id);
+                       ret = evt_service_setup(service_id);
+                       if (ret) {
+                               evt_err("Failed to setup service core"
+                                               " for Rx adapter\n");
+                               return ret;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+       int i;
+       int ret;
+       struct rte_event_timer_adapter_info adapter_info;
+       struct rte_event_timer_adapter *wl;
+       uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+       uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+       if (nb_producers == 1)
+               flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+       for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+               struct rte_event_timer_adapter_conf config = {
+                       .event_dev_id = t->opt->dev_id,
+                       .timer_adapter_id = i,
+                       .timer_tick_ns = t->opt->timer_tick_nsec,
+                       .max_tmo_ns = t->opt->max_tmo_nsec,
+                       .nb_timers = t->opt->pool_sz,
+                       .flags = flags,
+               };
+
+               wl = rte_event_timer_adapter_create(&config);
+               if (wl == NULL) {
+                       evt_err("failed to create event timer ring %d", i);
+                       return rte_errno;
+               }
+
+               memset(&adapter_info, 0,
+                               sizeof(struct rte_event_timer_adapter_info));
+               rte_event_timer_adapter_get_info(wl, &adapter_info);
+               t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
+
+               if (!(adapter_info.caps &
+                               RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+                       uint32_t service_id = -1U;
+
+                       rte_event_timer_adapter_service_id_get(wl,
+                                       &service_id);
+                       ret = evt_service_setup(service_id);
+                       if (ret) {
+                               evt_err("Failed to setup service core"
+                                               " for timer adapter\n");
+                               return ret;
+                       }
+                       rte_service_runstate_set(service_id, 1);
+               }
+               t->timer_adptr[i] = wl;
+       }
+       return 0;
+}
+
 int
 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
-                               uint8_t stride, uint8_t nb_queues)
+                               uint8_t stride, uint8_t nb_queues,
+                               const struct rte_event_port_conf *port_conf)
 {
        struct test_perf *t = evt_test_priv(test);
-       uint8_t port, prod;
+       uint16_t port, prod;
        int ret = -1;
 
-       /* port configuration */
-       const struct rte_event_port_conf wkr_p_conf = {
-                       .dequeue_depth = opt->wkr_deq_dep,
-                       .enqueue_depth = 64,
-                       .new_event_threshold = 4096,
-       };
-
        /* setup one port per worker, linking to all queues */
        for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
                                port++) {
@@ -264,7 +557,10 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
                w->processed_pkts = 0;
                w->latency = 0;
 
-               ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+               struct rte_event_port_conf conf = *port_conf;
+               conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
+
+               ret = rte_event_port_setup(opt->dev_id, port, &conf);
                if (ret) {
                        evt_err("failed to setup port %d", port);
                        return ret;
@@ -278,26 +574,52 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
        }
 
        /* port for producers, no links */
-       const struct rte_event_port_conf prod_conf = {
-                       .dequeue_depth = 8,
-                       .enqueue_depth = 32,
-                       .new_event_threshold = 1200,
-       };
-       prod = 0;
-       for ( ; port < perf_nb_event_ports(opt); port++) {
-               struct prod_data *p = &t->prod[port];
+       if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+               for ( ; port < perf_nb_event_ports(opt); port++) {
+                       struct prod_data *p = &t->prod[port];
+                       p->t = t;
+               }
 
-               p->dev_id = opt->dev_id;
-               p->port_id = port;
-               p->queue_id = prod * stride;
-               p->t = t;
+               struct rte_event_port_conf conf = *port_conf;
+               conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
 
-               ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
-               if (ret) {
-                       evt_err("failed to setup port %d", port);
+               ret = perf_event_rx_adapter_setup(opt, stride, conf);
+               if (ret)
+                       return ret;
+       } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               prod = 0;
+               for ( ; port < perf_nb_event_ports(opt); port++) {
+                       struct prod_data *p = &t->prod[port];
+                       p->queue_id = prod * stride;
+                       p->t = t;
+                       prod++;
+               }
+
+               ret = perf_event_timer_adapter_setup(t);
+               if (ret)
                        return ret;
+       } else {
+               prod = 0;
+               for ( ; port < perf_nb_event_ports(opt); port++) {
+                       struct prod_data *p = &t->prod[port];
+
+                       p->dev_id = opt->dev_id;
+                       p->port_id = port;
+                       p->queue_id = prod * stride;
+                       p->t = t;
+
+                       struct rte_event_port_conf conf = *port_conf;
+                       conf.event_port_cfg |=
+                               RTE_EVENT_PORT_CFG_HINT_PRODUCER |
+                               RTE_EVENT_PORT_CFG_HINT_CONSUMER;
+
+                       ret = rte_event_port_setup(opt->dev_id, port, &conf);
+                       if (ret) {
+                               evt_err("failed to setup port %d", port);
+                               return ret;
+                       }
+                       prod++;
                }
-               prod++;
        }
 
        return ret;
@@ -307,10 +629,11 @@ int
 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
 {
        unsigned int lcores;
-       bool need_slcore = !evt_has_distributed_sched(opt->dev_id);
 
-       /* N producer + N worker + 1 scheduler(based on dev capa) + 1 master */
-       lcores = need_slcore ? 4 : 3;
+       /* N producer + N worker + main when producer cores are used
+        * Else N worker + main when Rx adapter is used
+        */
+       lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
 
        if (rte_lcore_count() < lcores) {
                evt_err("test need minimum %d lcores", lcores);
@@ -318,12 +641,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
        }
 
        /* Validate worker lcores */
-       if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
-               evt_err("worker lcores overlaps with master lcore");
-               return -1;
-       }
-       if (need_slcore && evt_lcores_has_overlap(opt->wlcores, opt->slcore)) {
-               evt_err("worker lcores overlaps with scheduler lcore");
+       if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+               evt_err("worker lcores overlaps with main lcore");
                return -1;
        }
        if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
@@ -339,33 +658,22 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
                return -1;
        }
 
-       /* Validate producer lcores */
-       if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) {
-               evt_err("producer lcores overlaps with master lcore");
-               return -1;
-       }
-       if (need_slcore && evt_lcores_has_overlap(opt->plcores, opt->slcore)) {
-               evt_err("producer lcores overlaps with scheduler lcore");
-               return -1;
-       }
-       if (evt_has_disabled_lcore(opt->plcores)) {
-               evt_err("one or more producer lcores are not enabled");
-               return -1;
-       }
-       if (!evt_has_active_lcore(opt->plcores)) {
-               evt_err("minimum one producer is required");
-               return -1;
-       }
-
-       /* Validate scheduler lcore */
-       if (!evt_has_distributed_sched(opt->dev_id) &&
-                       opt->slcore == (int)rte_get_master_lcore()) {
-               evt_err("scheduler lcore and master lcore should be different");
-               return -1;
-       }
-       if (need_slcore && !rte_lcore_is_enabled(opt->slcore)) {
-               evt_err("scheduler lcore is not enabled");
-               return -1;
+       if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                       opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               /* Validate producer lcores */
+               if (evt_lcores_has_overlap(opt->plcores,
+                                       rte_get_main_lcore())) {
+                       evt_err("producer lcores overlaps with main lcore");
+                       return -1;
+               }
+               if (evt_has_disabled_lcore(opt->plcores)) {
+                       evt_err("one or more producer lcores are not enabled");
+                       return -1;
+               }
+               if (!evt_has_active_lcore(opt->plcores)) {
+                       evt_err("minimum one producer is required");
+                       return -1;
+               }
        }
 
        if (evt_has_invalid_stage(opt))
@@ -384,10 +692,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
        }
 
        /* Fixups */
-       if (opt->nb_stages == 1 && opt->fwd_latency) {
+       if ((opt->nb_stages == 1 &&
+                       opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+                       opt->fwd_latency) {
                evt_info("fwd_latency is valid when nb_stages > 1, disabling");
                opt->fwd_latency = 0;
        }
+
        if (opt->fwd_latency && !opt->q_priority) {
                evt_info("enabled queue priority for latency measurement");
                opt->q_priority = 1;
@@ -405,20 +716,25 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
        evt_dump_producer_lcores(opt);
        evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
        evt_dump_worker_lcores(opt);
-       if (!evt_has_distributed_sched(opt->dev_id))
-               evt_dump_scheduler_lcore(opt);
        evt_dump_nb_stages(opt);
        evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
        evt_dump("nb_evdev_queues", "%d", nb_queues);
        evt_dump_queue_priority(opt);
        evt_dump_sched_type_list(opt);
+       evt_dump_producer_type(opt);
+       evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
 }
 
 void
 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
 {
-       RTE_SET_USED(test);
+       int i;
+       struct test_perf *t = evt_test_priv(test);
 
+       if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               for (i = 0; i < opt->nb_timer_adptrs; i++)
+                       rte_event_timer_adapter_stop(t->timer_adptr[i]);
+       }
        rte_event_dev_stop(opt->dev_id);
        rte_event_dev_close(opt->dev_id);
 }
@@ -430,18 +746,125 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
        memset(obj, 0, mp->elt_size);
 }
 
+#define NB_RX_DESC                     128
+#define NB_TX_DESC                     512
+int
+perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+       uint16_t i;
+       int ret;
+       struct test_perf *t = evt_test_priv(test);
+       struct rte_eth_conf port_conf = {
+               .rxmode = {
+                       .mq_mode = RTE_ETH_MQ_RX_RSS,
+                       .split_hdr_size = 0,
+               },
+               .rx_adv_conf = {
+                       .rss_conf = {
+                               .rss_key = NULL,
+                               .rss_hf = RTE_ETH_RSS_IP,
+                       },
+               },
+       };
+
+       if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                       opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+               return 0;
+
+       if (!rte_eth_dev_count_avail()) {
+               evt_err("No ethernet ports found.");
+               return -ENODEV;
+       }
+
+       RTE_ETH_FOREACH_DEV(i) {
+               struct rte_eth_dev_info dev_info;
+               struct rte_eth_conf local_port_conf = port_conf;
+
+               ret = rte_eth_dev_info_get(i, &dev_info);
+               if (ret != 0) {
+                       evt_err("Error during getting device (port %u) info: %s\n",
+                                       i, strerror(-ret));
+                       return ret;
+               }
+
+               local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+                       dev_info.flow_type_rss_offloads;
+               if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+                               port_conf.rx_adv_conf.rss_conf.rss_hf) {
+                       evt_info("Port %u modified RSS hash function based on hardware support,"
+                               "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+                               i,
+                               port_conf.rx_adv_conf.rss_conf.rss_hf,
+                               local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+               }
+
+               if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
+                       evt_err("Failed to configure eth port [%d]", i);
+                       return -EINVAL;
+               }
+
+               if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
+                               rte_socket_id(), NULL, t->pool) < 0) {
+                       evt_err("Failed to setup eth port [%d] rx_queue: %d.",
+                                       i, 0);
+                       return -EINVAL;
+               }
+
+               if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
+                                       rte_socket_id(), NULL) < 0) {
+                       evt_err("Failed to setup eth port [%d] tx_queue: %d.",
+                                       i, 0);
+                       return -EINVAL;
+               }
+
+               ret = rte_eth_promiscuous_enable(i);
+               if (ret != 0) {
+                       evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+                               i, rte_strerror(-ret));
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+       uint16_t i;
+       RTE_SET_USED(test);
+
+       if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+               RTE_ETH_FOREACH_DEV(i) {
+                       rte_event_eth_rx_adapter_stop(i);
+                       rte_eth_dev_stop(i);
+               }
+       }
+}
+
 int
 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
 {
        struct test_perf *t = evt_test_priv(test);
 
-       t->pool = rte_mempool_create(test->name, /* mempool name */
+       if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+                       opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               t->pool = rte_mempool_create(test->name, /* mempool name */
                                opt->pool_sz, /* number of elements*/
                                sizeof(struct perf_elt), /* element size*/
                                512, /* cache size*/
                                0, NULL, NULL,
                                perf_elt_init, /* obj constructor */
                                NULL, opt->socket_id, 0); /* flags */
+       } else {
+               t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+                               opt->pool_sz, /* number of elements*/
+                               512, /* cache size*/
+                               0,
+                               RTE_MBUF_DEFAULT_BUF_SIZE,
+                               opt->socket_id); /* flags */
+
+       }
+
        if (t->pool == NULL) {
                evt_err("failed to create mempool");
                return -ENOMEM;
@@ -474,10 +897,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt)
 
        struct test_perf *t = evt_test_priv(test);
 
-       t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+       if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+               t->outstand_pkts = opt->nb_timers *
+                       evt_nr_active_lcores(opt->plcores);
+               t->nb_pkts = opt->nb_timers;
+       } else {
+               t->outstand_pkts = opt->nb_pkts *
+                       evt_nr_active_lcores(opt->plcores);
+               t->nb_pkts = opt->nb_pkts;
+       }
+
        t->nb_workers = evt_nr_active_lcores(opt->wlcores);
        t->done = false;
-       t->nb_pkts = opt->nb_pkts;
        t->nb_flows = opt->nb_flows;
        t->result = EVT_TEST_FAILED;
        t->opt = opt;