From fa6b488998f0a57216a02c7d0ef8ce1e24cb39fe Mon Sep 17 00:00:00 2001 From: Gavin Hu Date: Wed, 7 Jul 2021 13:48:37 +0800 Subject: [PATCH] spinlock: use WFE to reduce contention on aarch64 In acquiring a spinlock, cores repeatedly poll the lock variable. This is replaced by rte_wait_until_equal API. Running micro benchmarking and testpmd and l3fwd traffic tests on ThunderX2, Ampere eMAG80 and Arm N1SDP, everything went well and no notable performance gain nor degradation was measured. Signed-off-by: Gavin Hu Reviewed-by: Ruifeng Wang Reviewed-by: Phil Yang Reviewed-by: Steve Capper Reviewed-by: Ola Liljedahl Reviewed-by: Honnappa Nagarahalli Tested-by: Pavan Nikhilesh Acked-by: Konstantin Ananyev Acked-by: Jerin Jacob --- lib/eal/include/generic/rte_spinlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/eal/include/generic/rte_spinlock.h b/lib/eal/include/generic/rte_spinlock.h index 87ae7a4f18..40fe49d5ad 100644 --- a/lib/eal/include/generic/rte_spinlock.h +++ b/lib/eal/include/generic/rte_spinlock.h @@ -65,8 +65,8 @@ rte_spinlock_lock(rte_spinlock_t *sl) while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { - while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED)) - rte_pause(); + rte_wait_until_equal_32((volatile uint32_t *)&sl->locked, + 0, __ATOMIC_RELAXED); exp = 0; } } -- 2.20.1