Support waiting in smp_cond_load_relaxed_timeout() via __cmpwait_relaxed(). Limit this to when the event-stream is enabled, to ensure that we wake from WFE periodically and don't block forever if there are no stores to the cacheline. In the unlikely event that the event-stream is unavailable, fallback to spin-waiting. Also set SMP_TIMEOUT_POLL_COUNT to 1 so we do the time-check for each iteration in smp_cond_load_relaxed_timeout(). Cc: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas Suggested-by: Will Deacon Signed-off-by: Ankur Arora --- arch/arm64/include/asm/barrier.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f5801b0ba9e9..92c16dfb8ca6 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -219,6 +219,19 @@ do { \ (typeof(*ptr))VAL; \ }) +#define SMP_TIMEOUT_POLL_COUNT 1 + +/* Re-declared here to avoid include dependency. */ +extern bool arch_timer_evtstrm_available(void); + +#define cpu_poll_relax(ptr, val) \ +do { \ + if (arch_timer_evtstrm_available()) \ + __cmpwait_relaxed(ptr, val); \ + else \ + cpu_relax(); \ +} while (0) + #include #endif /* __ASSEMBLY__ */ -- 2.43.5