Add smp_cond_load_relaxed_timewait(), a timed variant of smp_cond_load_relaxed(). This uses __cmpwait_relaxed() to do the actual waiting, with the event-stream guaranteeing that we wake up from WFE periodically and not block forever in case there are no stores to the cacheline. For cases when the event-stream is unavailable, fallback to spin-waiting. Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Suggested-by: Catalin Marinas Signed-off-by: Ankur Arora --- arch/arm64/include/asm/barrier.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f5801b0ba9e9..9b29abc212db 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -219,6 +219,28 @@ do { \ (typeof(*ptr))VAL; \ }) +extern bool arch_timer_evtstrm_available(void); + +#define smp_cond_load_relaxed_timewait(ptr, cond_expr, time_check_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + bool __wfe = arch_timer_evtstrm_available(); \ + \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + if (time_check_expr) \ + break; \ + if (likely(__wfe)) \ + __cmpwait_relaxed(__PTR, VAL); \ + else \ + cpu_relax(); \ + } \ + (typeof(*ptr)) VAL; \ +}) + #include #endif /* __ASSEMBLY__ */ -- 2.31.1