X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_perf_common.h;h=14dcf8042964ea0f329741f5aa9120c630888bfb;hb=39ddd5d1895e72ba2bc974eddbc12a3135639ed1;hp=f8d516ce420fac05f3f667c00b267d5dea9b59f0;hpb=3617aae53f92e3fe49d0c730699aa6c5143ae721;p=dpdk.git diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index f8d516ce42..14dcf80429 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ struct prod_data { struct test_perf *t; } __rte_cache_aligned; + struct test_perf { /* Don't change the offset of "done". Signal handler use this memory * to terminate all lcores work. @@ -54,13 +56,22 @@ struct test_perf { struct worker_data worker[EVT_MAX_PORTS]; struct evt_options *opt; uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; + struct rte_event_timer_adapter *timer_adptr[ + RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned; } __rte_cache_aligned; struct perf_elt { - uint64_t timestamp; + union { + struct rte_event_timer tim; + struct { + char pad[offsetof(struct rte_event_timer, user_meta)]; + uint64_t timestamp; + }; + }; } __rte_cache_aligned; #define BURST_SIZE 16 +#define MAX_PROD_ENQ_BURST_SIZE 128 #define PERF_WORKER_INIT\ struct worker_data *w = arg;\ @@ -68,6 +79,8 @@ struct perf_elt { struct evt_options *opt = t->opt;\ const uint8_t dev = w->dev_id;\ const uint8_t port = w->port_id;\ + const uint8_t prod_timer_type = \ + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\ uint8_t *const sched_type_list = &t->sched_type_list[0];\ struct rte_mempool *const pool = t->pool;\ const uint8_t nb_stages = t->opt->nb_stages;\ @@ -79,14 +92,19 @@ struct perf_elt { printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\ rte_lcore_id(), dev, port) -static inline __attribute__((always_inline)) int +static __rte_always_inline int perf_process_last_stage(struct rte_mempool *const pool, struct rte_event *const ev, struct worker_data *const w, void *bufs[], int const buf_sz, uint8_t count) { bufs[count++] = ev->event_ptr; + + /* release fence here ensures event_prt is + * stored before updating the number of + * processed packets for worker lcores + */ + rte_atomic_thread_fence(__ATOMIC_RELEASE); w->processed_pkts++; - rte_smp_wmb(); if (unlikely(count == buf_sz)) { count = 0; @@ -95,7 +113,7 @@ perf_process_last_stage(struct rte_mempool *const pool, return count; } -static inline __attribute__((always_inline)) uint8_t +static __rte_always_inline uint8_t perf_process_last_stage_latency(struct rte_mempool *const pool, struct rte_event *const ev, struct worker_data *const w, void *bufs[], int const buf_sz, uint8_t count) @@ -104,6 +122,12 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, struct perf_elt *const m = ev->event_ptr; bufs[count++] = ev->event_ptr; + + /* release fence here ensures event_prt is + * stored before updating the number of + * processed packets for worker lcores + */ + rte_atomic_thread_fence(__ATOMIC_RELEASE); w->processed_pkts++; if (unlikely(count == buf_sz)) { @@ -115,7 +139,6 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, } w->latency += latency; - rte_smp_wmb(); return count; } @@ -133,7 +156,8 @@ int perf_test_setup(struct evt_test *test, struct evt_options *opt); int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt); int perf_mempool_setup(struct evt_test *test, struct evt_options *opt); int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, - uint8_t stride, uint8_t nb_queues); + uint8_t stride, uint8_t nb_queues, + const struct rte_event_port_conf *port_conf); int perf_event_dev_service_setup(uint8_t dev_id); int perf_launch_lcores(struct evt_test *test, struct evt_options *opt, int (*worker)(void *));