From: Jerin Jacob Date: Thu, 18 Aug 2016 11:51:30 +0000 (+0530) Subject: eal/armv8: use high-resolution cycle counter X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=f91bcbb2d9a6;p=dpdk.git eal/armv8: use high-resolution cycle counter Existing cntvct_el0 based rte_rdtsc() provides portable means to get wall clock counter at user space. Typically it runs at <= 100MHz. The alternative method to enable rte_rdtsc() for high resolution wall clock counter is through armv8 PMU subsystem. The PMU cycle counter runs at CPU frequency, However, access to PMU cycle counter from user space is not enabled by default in the arm64 linux kernel. It is possible to enable cycle counter at user space access by configuring the PMU from the privileged mode (kernel space). by default rte_rdtsc() implementation uses portable cntvct_el0 scheme. Application can choose the PMU based implementation with CONFIG_RTE_ARM_EAL_RDTSC_USE_PMU Signed-off-by: Jerin Jacob Acked-by: Hemant Agrawal --- diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h index 14f26120ac..867a9468c7 100644 --- a/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h +++ b/lib/librte_eal/common/include/arch/arm/rte_cycles_64.h @@ -45,6 +45,11 @@ extern "C" { * @return * The time base for this lcore. */ +#ifndef RTE_ARM_EAL_RDTSC_USE_PMU +/** + * This call is portable to any ARMv8 architecture, however, typically + * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks. + */ static inline uint64_t rte_rdtsc(void) { @@ -53,6 +58,34 @@ rte_rdtsc(void) asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); return tsc; } +#else +/** + * This is an alternative method to enable rte_rdtsc() with high resolution + * PMU cycles counter.The cycle counter runs at cpu frequency and this scheme + * uses ARMv8 PMU subsystem to get the cycle counter at userspace, However, + * access to PMU cycle counter from user space is not enabled by default in + * arm64 linux kernel. + * It is possible to enable cycle counter at user space access by configuring + * the PMU from the privileged mode (kernel space). + * + * asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)(0 << 31))); + * asm volatile("msr pmcntenset_el0, %0" :: "r" BIT(31)); + * asm volatile("msr pmuserenr_el0, %0" : : "r"(BIT(0) | BIT(2))); + * asm volatile("mrs %0, pmcr_el0" : "=r" (val)); + * val |= (BIT(0) | BIT(2)); + * isb(); + * asm volatile("msr pmcr_el0, %0" : : "r" (val)); + * + */ +static inline uint64_t +rte_rdtsc(void) +{ + uint64_t tsc; + + asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc)); + return tsc; +} +#endif static inline uint64_t rte_rdtsc_precise(void)