Add smp_cond_load_relaxed_timewait(), which extends smp_cond_load_relaxed() to allow waiting for a finite duration. The additional parameter allows for the timeout check. The waiting is done via the usual cpu_relax() spin-wait around the condition variable with periodic evaluation of the time-check. The number of times we spin is defined by SMP_TIMEWAIT_SPIN_COUNT (chosen to be 200 by default) which, assuming each cpu_relax() iteration takes around 20-30 cycles (measured on a variety of x86 platforms), amounts to around 4000-6000 cycles. Cc: Arnd Bergmann Cc: Will Deacon Cc: Catalin Marinas Cc: Peter Zijlstra Cc: linux-arch@vger.kernel.org Signed-off-by: Ankur Arora --- include/asm-generic/barrier.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index d4f581c1e21d..c87d6fd8746f 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -273,6 +273,41 @@ do { \ }) #endif +#ifndef SMP_TIMEWAIT_SPIN_COUNT +#define SMP_TIMEWAIT_SPIN_COUNT 200 +#endif + +/** + * smp_cond_load_relaxed_timewait() - (Spin) wait for cond with no ordering + * guarantees until a timeout expires. + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * @time_check_expr: expression to decide when to bail out + * + * Equivalent to using READ_ONCE() on the condition variable. + */ +#ifndef smp_cond_load_relaxed_timewait +#define smp_cond_load_relaxed_timewait(ptr, cond_expr, time_check_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + u32 __n = 0, __spin = SMP_TIMEWAIT_SPIN_COUNT; \ + \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + if (++__n < __spin) \ + continue; \ + if (time_check_expr) \ + break; \ + __n = 0; \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + /* * pmem_wmb() ensures that all stores for which the modification * are written to persistent storage by preceding instructions have -- 2.31.1