X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_perf_common.h;h=14dcf8042964ea0f329741f5aa9120c630888bfb;hb=39ddd5d1895e72ba2bc974eddbc12a3135639ed1;hp=0877b9b44e23801d52f96731b83eec97eac331ee;hpb=59f697e3389f99b7586d91988be633b25699f398;p=dpdk.git diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h index 0877b9b44e..14dcf80429 100644 --- a/app/test-eventdev/test_perf_common.h +++ b/app/test-eventdev/test_perf_common.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -38,6 +40,7 @@ struct prod_data { struct test_perf *t; } __rte_cache_aligned; + struct test_perf { /* Don't change the offset of "done". Signal handler use this memory * to terminate all lcores work. @@ -53,13 +56,22 @@ struct test_perf { struct worker_data worker[EVT_MAX_PORTS]; struct evt_options *opt; uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; + struct rte_event_timer_adapter *timer_adptr[ + RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned; } __rte_cache_aligned; struct perf_elt { - uint64_t timestamp; + union { + struct rte_event_timer tim; + struct { + char pad[offsetof(struct rte_event_timer, user_meta)]; + uint64_t timestamp; + }; + }; } __rte_cache_aligned; #define BURST_SIZE 16 +#define MAX_PROD_ENQ_BURST_SIZE 128 #define PERF_WORKER_INIT\ struct worker_data *w = arg;\ @@ -67,6 +79,8 @@ struct perf_elt { struct evt_options *opt = t->opt;\ const uint8_t dev = w->dev_id;\ const uint8_t port = w->port_id;\ + const uint8_t prod_timer_type = \ + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\ uint8_t *const sched_type_list = &t->sched_type_list[0];\ struct rte_mempool *const pool = t->pool;\ const uint8_t nb_stages = t->opt->nb_stages;\ @@ -78,14 +92,19 @@ struct perf_elt { printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\ rte_lcore_id(), dev, port) -static inline __attribute__((always_inline)) int +static __rte_always_inline int perf_process_last_stage(struct rte_mempool *const pool, struct rte_event *const ev, struct worker_data *const w, void *bufs[], int const buf_sz, uint8_t count) { bufs[count++] = ev->event_ptr; + + /* release fence here ensures event_prt is + * stored before updating the number of + * processed packets for worker lcores + */ + rte_atomic_thread_fence(__ATOMIC_RELEASE); w->processed_pkts++; - rte_smp_wmb(); if (unlikely(count == buf_sz)) { count = 0; @@ -94,7 +113,7 @@ perf_process_last_stage(struct rte_mempool *const pool, return count; } -static inline __attribute__((always_inline)) uint8_t +static __rte_always_inline uint8_t perf_process_last_stage_latency(struct rte_mempool *const pool, struct rte_event *const ev, struct worker_data *const w, void *bufs[], int const buf_sz, uint8_t count) @@ -103,6 +122,12 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, struct perf_elt *const m = ev->event_ptr; bufs[count++] = ev->event_ptr; + + /* release fence here ensures event_prt is + * stored before updating the number of + * processed packets for worker lcores + */ + rte_atomic_thread_fence(__ATOMIC_RELEASE); w->processed_pkts++; if (unlikely(count == buf_sz)) { @@ -114,7 +139,6 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, } w->latency += latency; - rte_smp_wmb(); return count; } @@ -129,15 +153,18 @@ perf_nb_event_ports(struct evt_options *opt) int perf_test_result(struct evt_test *test, struct evt_options *opt); int perf_opt_check(struct evt_options *opt, uint64_t nb_queues); int perf_test_setup(struct evt_test *test, struct evt_options *opt); +int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt); int perf_mempool_setup(struct evt_test *test, struct evt_options *opt); int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, - uint8_t stride, uint8_t nb_queues); + uint8_t stride, uint8_t nb_queues, + const struct rte_event_port_conf *port_conf); int perf_event_dev_service_setup(uint8_t dev_id); int perf_launch_lcores(struct evt_test *test, struct evt_options *opt, int (*worker)(void *)); void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues); void perf_test_destroy(struct evt_test *test, struct evt_options *opt); void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt); +void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt); void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt); #endif /* _TEST_PERF_COMMON_ */