Support waiting in smp_cond_load_relaxed_timeout() via __cmpwait_relaxed(). To ensure that we wake from waiting in WFE periodically and don't block forever if there are no stores to ptr, this path is only used when the event-stream is enabled. Note that when using __cmpwait_relaxed() we ignore the timeout value, allowing an overshoot by upto the event-stream period. And, in the unlikely event that the event-stream is unavailable, fallback to spin-waiting. Also set SMP_TIMEOUT_POLL_COUNT to 1 so we do the time-check in each iteration of smp_cond_load_relaxed_timeout(). And finally define ARCH_HAS_CPU_RELAX to indicate that we have an optimized implementation of cpu_poll_relax(). Cc: Arnd Bergmann Cc: Will Deacon Cc: Catalin Marinas Cc: linux-arm-kernel@lists.infradead.org Suggested-by: Will Deacon Acked-by: Will Deacon Signed-off-by: Ankur Arora Note: This commit additionally defines ARCH_HAS_CPU_RELAX. Will: I've retained your acked-by. Please let me know if you don't agree with this change. --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 93173f0a09c7..239fdca8e2cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -22,6 +22,7 @@ config ARM64 select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CC_PLATFORM select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION + select ARCH_HAS_CPU_RELAX select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9495c4441a46..6190e178db51 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -12,6 +12,7 @@ #include #include +#include #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -219,6 +220,26 @@ do { \ (typeof(*ptr))VAL; \ }) +/* Re-declared here to avoid include dependency. */ +extern bool arch_timer_evtstrm_available(void); + +/* + * In the common case, cpu_poll_relax() sits waiting in __cmpwait_relaxed() + * for the ptr value to change. + * + * Since this period is reasonably long, choose SMP_TIMEOUT_POLL_COUNT + * to be 1, so smp_cond_load_{relaxed,acquire}_timeout() does a + * time-check in each iteration. + */ +#define SMP_TIMEOUT_POLL_COUNT 1 + +#define cpu_poll_relax(ptr, val, timeout_ns) do { \ + if (arch_timer_evtstrm_available()) \ + __cmpwait_relaxed(ptr, val); \ + else \ + cpu_relax(); \ +} while (0) + #include #endif /* __ASSEMBLER__ */ -- 2.31.1