From: Honnappa Nagarahalli Date: Fri, 26 Jun 2020 20:35:01 +0000 (-0500) Subject: eal/armv8: fix timer frequency calibration with PMU X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;ds=sidebyside;h=97c910139baf007b729388b0c3c24b894e4b6d3e;p=dpdk.git eal/armv8: fix timer frequency calibration with PMU get_tsc_freq uses 'nanosleep' system call to calculate the CPU frequency. However, 'nanosleep' results in the process getting un-scheduled. The kernel saves and restores the PMU state. This ensures that the PMU cycles are not counted towards a sleeping process. When RTE_ARM_EAL_RDTSC_USE_PMU is defined, this results in incorrect CPU frequency calculation. This logic is replaced with generic counter based loop. Bugzilla ID: 450 Fixes: f91bcbb2d9a6 ("eal/armv8: use high-resolution cycle counter") Cc: stable@dpdk.org Signed-off-by: Honnappa Nagarahalli Reviewed-by: Ruifeng Wang Reviewed-by: Dharmik Thakkar Reviewed-by: Phil Yang Acked-by: Jerin Jacob --- diff --git a/lib/librte_eal/arm/include/rte_cycles_64.h b/lib/librte_eal/arm/include/rte_cycles_64.h index da557b6a10..e41f9dbd62 100644 --- a/lib/librte_eal/arm/include/rte_cycles_64.h +++ b/lib/librte_eal/arm/include/rte_cycles_64.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2015 Cavium, Inc + * Copyright(c) 2020 Arm Limited */ #ifndef _RTE_CYCLES_ARM64_H_ @@ -11,6 +12,33 @@ extern "C" { #include "generic/rte_cycles.h" +/** Read generic counter frequency */ +static __rte_always_inline uint64_t +__rte_arm64_cntfrq(void) +{ + uint64_t freq; + + asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); + return freq; +} + +/** Read generic counter */ +static __rte_always_inline uint64_t +__rte_arm64_cntvct(void) +{ + uint64_t tsc; + + asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); + return tsc; +} + +static __rte_always_inline uint64_t +__rte_arm64_cntvct_precise(void) +{ + asm volatile("isb" : : : "memory"); + return __rte_arm64_cntvct(); +} + /** * Read the time base register. * @@ -25,10 +53,7 @@ extern "C" { static inline uint64_t rte_rdtsc(void) { - uint64_t tsc; - - asm volatile("mrs %0, cntvct_el0" : "=r" (tsc)); - return tsc; + return __rte_arm64_cntvct(); } #else /** @@ -49,14 +74,22 @@ rte_rdtsc(void) * asm volatile("msr pmcr_el0, %0" : : "r" (val)); * */ -static inline uint64_t -rte_rdtsc(void) + +/** Read PMU cycle counter */ +static __rte_always_inline uint64_t +__rte_arm64_pmccntr(void) { uint64_t tsc; asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc)); return tsc; } + +static inline uint64_t +rte_rdtsc(void) +{ + return __rte_arm64_pmccntr(); +} #endif static inline uint64_t diff --git a/lib/librte_eal/arm/rte_cycles.c b/lib/librte_eal/arm/rte_cycles.c index 3500d523ef..5bd29b24b1 100644 --- a/lib/librte_eal/arm/rte_cycles.c +++ b/lib/librte_eal/arm/rte_cycles.c @@ -3,14 +3,35 @@ */ #include "eal_private.h" +#include "rte_cycles.h" uint64_t get_tsc_freq_arch(void) { #if defined RTE_ARCH_ARM64 && !defined RTE_ARM_EAL_RDTSC_USE_PMU - uint64_t freq; - asm volatile("mrs %0, cntfrq_el0" : "=r" (freq)); - return freq; + return __rte_arm64_cntfrq(); +#elif defined RTE_ARCH_ARM64 && defined RTE_ARM_EAL_RDTSC_USE_PMU +#define CYC_PER_1MHZ 1E6 + /* Use the generic counter ticks to calculate the PMU + * cycle frequency. + */ + uint64_t ticks; + uint64_t start_ticks, cur_ticks; + uint64_t start_pmu_cycles, end_pmu_cycles; + + /* Number of ticks for 1/10 second */ + ticks = __rte_arm64_cntfrq() / 10; + + start_ticks = __rte_arm64_cntvct_precise(); + start_pmu_cycles = rte_rdtsc_precise(); + do { + cur_ticks = __rte_arm64_cntvct(); + } while ((cur_ticks - start_ticks) < ticks); + end_pmu_cycles = rte_rdtsc_precise(); + + /* Adjust the cycles to next 1Mhz */ + return RTE_ALIGN_MUL_CEIL(end_pmu_cycles - start_pmu_cycles, + CYC_PER_1MHZ) * 10; #else return 0; #endif