Add smp_cond_load_relaxed_timeout(), which extends smp_cond_load_relaxed() to allow waiting for a duration. We loop around waiting for the condition variable to change while peridically doing a time-check. The loop uses cpu_poll_relax() to slow down the busy-wait, which, unless overridden by the architecture code, amounts to a cpu_relax(). Note that there are two ways for the time-check to fail: the timeout case or, @time_expr_ns returning an invalid value (negative or zero). The second failure mode allows for clocks attached to the clock-domain of @cond_expr -- which might cease to operate meaningfully once some state internal to @cond_expr has changed -- to fail. Evaluation of @time_expr_ns: in the fastpath we want to keep the performance close to smp_cond_load_relaxed(). So defer evaluation of the potentially costly @time_expr_ns to the slowpath. This also means that there will always be some hardware dependent duration that has passed in cpu_poll_relax() iterations at the time of first evaluation. Additionally cpu_poll_relax() is not guaranteed to return at timeout boundary. In sum, expect timeout overshoot when we exit due to expiration of the timeout. The number of spin iterations before time-check, SMP_TIMEOUT_POLL_COUNT is chosen to be 200 by default. With a cpu_poll_relax() iteration taking ~20-30 cycles (measured on a variety of x86 platforms), we expect a time-check every ~4000-6000 cycles. The outer limit of the overshoot is double that when working with the parameters above. This might be higher or lower depending on the implementation of cpu_poll_relax() across architectures. Lastly, config option ARCH_HAS_CPU_RELAX indicates availability of a cpu_poll_relax() that is cheaper than polling. This might be relevant for cases with a long timeout. Cc: Arnd Bergmann Cc: Will Deacon Cc: Catalin Marinas Cc: Peter Zijlstra Cc: linux-arch@vger.kernel.org Reviewed-by: Catalin Marinas Signed-off-by: Ankur Arora --- Notes: - add a comment mentioning that smp_cond_load_relaxed_timeout() might be using architectural primitives that don't support MMIO. (David Laight, Catalin Marinas) include/asm-generic/barrier.h | 69 +++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index d4f581c1e21d..e5a6a1c04649 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -273,6 +273,75 @@ do { \ }) #endif +/* + * Number of times we iterate in the loop before doing the time check. + * Note that the iteration count assumes that the loop condition is + * relatively cheap. + */ +#ifndef SMP_TIMEOUT_POLL_COUNT +#define SMP_TIMEOUT_POLL_COUNT 200 +#endif + +/* + * Platforms with ARCH_HAS_CPU_RELAX have a cpu_poll_relax() implementation + * that is expected to be cheaper (lower power) than pure polling. + */ +#ifndef cpu_poll_relax +#define cpu_poll_relax(ptr, val, timeout_ns) cpu_relax() +#endif + +/** + * smp_cond_load_relaxed_timeout() - (Spin) wait for cond with no ordering + * guarantees until a timeout expires. + * @ptr: pointer to the variable to wait on. + * @cond_expr: boolean expression to wait for. + * @time_expr_ns: expression that evaluates to monotonic time (in ns) or, + * on failure, returns a negative value. + * @timeout_ns: timeout value in ns + * Both of the above are assumed to be compatible with s64; the signed + * value is used to handle the failure case in @time_expr_ns. + * + * Equivalent to using READ_ONCE() on the condition variable. + * + * Callers that expect to wait for prolonged durations might want + * to take into account the availability of ARCH_HAS_CPU_RELAX. + * + * Note that @ptr is expected to point to a memory address. Using this + * interface with MMIO will be slower (since SMP_TIMEOUT_POLL_COUNT is + * tuned for memory) and might also break in interesting architecture + * dependent ways. + */ +#ifndef smp_cond_load_relaxed_timeout +#define smp_cond_load_relaxed_timeout(ptr, cond_expr, \ + time_expr_ns, timeout_ns) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + u32 __n = 0, __spin = SMP_TIMEOUT_POLL_COUNT; \ + s64 __timeout = (s64)timeout_ns; \ + s64 __time_now, __time_end = 0; \ + \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_poll_relax(__PTR, VAL, (u64)__timeout); \ + if (++__n < __spin) \ + continue; \ + __time_now = (s64)(time_expr_ns); \ + if (unlikely(__time_end == 0)) \ + __time_end = __time_now + __timeout; \ + __timeout = __time_end - __time_now; \ + if (__time_now <= 0 || __timeout <= 0) { \ + VAL = READ_ONCE(*__PTR); \ + break; \ + } \ + __n = 0; \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + /* * pmem_wmb() ensures that all stores for which the modification * are written to persistent storage by preceding instructions have -- 2.31.1 Support waiting in smp_cond_load_relaxed_timeout() via __cmpwait_relaxed(). To ensure that we wake from waiting in WFE periodically and don't block forever if there are no stores to ptr, this path is only used when the event-stream is enabled. Note that when using __cmpwait_relaxed() we ignore the timeout value, allowing an overshoot by up to the event-stream period. And, in the unlikely event that the event-stream is unavailable, fallback to spin-waiting. Also set SMP_TIMEOUT_POLL_COUNT to 1 so we do the time-check in each iteration of smp_cond_load_relaxed_timeout(). And finally define ARCH_HAS_CPU_RELAX to indicate that we have an optimized implementation of cpu_poll_relax(). Cc: Arnd Bergmann Cc: Will Deacon Cc: Catalin Marinas Cc: linux-arm-kernel@lists.infradead.org Suggested-by: Will Deacon Acked-by: Will Deacon Reviewed-by: Catalin Marinas Signed-off-by: Ankur Arora --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ea19b74b6c3..e3ce08276e9b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1628,6 +1628,9 @@ config ARCH_SUPPORTS_CRASH_DUMP config ARCH_DEFAULT_CRASH_DUMP def_bool y +config ARCH_HAS_CPU_RELAX + def_bool y + config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION def_bool CRASH_RESERVE diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9495c4441a46..6190e178db51 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -12,6 +12,7 @@ #include #include +#include #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -219,6 +220,26 @@ do { \ (typeof(*ptr))VAL; \ }) +/* Re-declared here to avoid include dependency. */ +extern bool arch_timer_evtstrm_available(void); + +/* + * In the common case, cpu_poll_relax() sits waiting in __cmpwait_relaxed() + * for the ptr value to change. + * + * Since this period is reasonably long, choose SMP_TIMEOUT_POLL_COUNT + * to be 1, so smp_cond_load_{relaxed,acquire}_timeout() does a + * time-check in each iteration. + */ +#define SMP_TIMEOUT_POLL_COUNT 1 + +#define cpu_poll_relax(ptr, val, timeout_ns) do { \ + if (arch_timer_evtstrm_available()) \ + __cmpwait_relaxed(ptr, val); \ + else \ + cpu_relax(); \ +} while (0) + #include #endif /* __ASSEMBLER__ */ -- 2.31.1 Moves some constants and functions related to xloops, cycles computation out to a new header. Also make __delay_cycles() available outside of arch/arm64/lib/delay.c. Rename some macros in qcom/rpmh-rsc.c which were occupying the same namespace. No functional change. Cc: Catalin Marinas Cc: Will Deacon Cc: Bjorn Andersson Cc: Konrad Dybcio Cc: linux-arm-kernel@lists.infradead.org Reviewed-by: Christoph Lameter Acked-by: Catalin Marinas Signed-off-by: Ankur Arora --- arch/arm64/include/asm/delay-const.h | 27 +++++++++++++++++++++++++++ arch/arm64/lib/delay.c | 15 ++++----------- drivers/soc/qcom/rpmh-rsc.c | 8 ++++---- 3 files changed, 35 insertions(+), 15 deletions(-) create mode 100644 arch/arm64/include/asm/delay-const.h diff --git a/arch/arm64/include/asm/delay-const.h b/arch/arm64/include/asm/delay-const.h new file mode 100644 index 000000000000..cb3988ff4e41 --- /dev/null +++ b/arch/arm64/include/asm/delay-const.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_DELAY_CONST_H +#define _ASM_DELAY_CONST_H + +#include /* For HZ */ + +/* 2**32 / 1000000 (rounded up) */ +#define __usecs_to_xloops_mult 0x10C7UL + +/* 2**32 / 1000000000 (rounded up) */ +#define __nsecs_to_xloops_mult 0x5UL + +extern unsigned long loops_per_jiffy; +static inline unsigned long xloops_to_cycles(unsigned long xloops) +{ + return (xloops * loops_per_jiffy * HZ) >> 32; +} + +#define USECS_TO_CYCLES(time_usecs) \ + xloops_to_cycles((time_usecs) * __usecs_to_xloops_mult) + +#define NSECS_TO_CYCLES(time_nsecs) \ + xloops_to_cycles((time_nsecs) * __nsecs_to_xloops_mult) + +u64 notrace __delay_cycles(void); + +#endif /* _ASM_DELAY_CONST_H */ diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c index e278e060e78a..c660a7ea26dd 100644 --- a/arch/arm64/lib/delay.c +++ b/arch/arm64/lib/delay.c @@ -12,17 +12,10 @@ #include #include #include +#include #include -#define USECS_TO_CYCLES(time_usecs) \ - xloops_to_cycles((time_usecs) * 0x10C7UL) - -static inline unsigned long xloops_to_cycles(unsigned long xloops) -{ - return (xloops * loops_per_jiffy * HZ) >> 32; -} - /* * Force the use of CNTVCT_EL0 in order to have the same base as WFxT. * This avoids some annoying issues when CNTVOFF_EL2 is not reset 0 on a @@ -32,7 +25,7 @@ static inline unsigned long xloops_to_cycles(unsigned long xloops) * Note that userspace cannot change the offset behind our back either, * as the vcpu mutex is held as long as KVM_RUN is in progress. */ -static cycles_t notrace __delay_cycles(void) +u64 notrace __delay_cycles(void) { guard(preempt_notrace)(); return __arch_counter_get_cntvct_stable(); @@ -73,12 +66,12 @@ EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { - __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ + __const_udelay(usecs * __usecs_to_xloops_mult); } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { - __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ + __const_udelay(nsecs * __nsecs_to_xloops_mult); } EXPORT_SYMBOL(__ndelay); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index c6f7d5c9c493..ad5ec5c0de0a 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -146,10 +146,10 @@ enum { * +---------------------------------------------------+ */ -#define USECS_TO_CYCLES(time_usecs) \ - xloops_to_cycles((time_usecs) * 0x10C7UL) +#define RPMH_USECS_TO_CYCLES(time_usecs) \ + rpmh_xloops_to_cycles((time_usecs) * 0x10C7UL) -static inline unsigned long xloops_to_cycles(u64 xloops) +static inline unsigned long rpmh_xloops_to_cycles(u64 xloops) { return (xloops * loops_per_jiffy * HZ) >> 32; } @@ -819,7 +819,7 @@ void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv) wakeup_us = ktime_to_us(wakeup); /* Convert the wakeup to arch timer scale */ - wakeup_cycles = USECS_TO_CYCLES(wakeup_us); + wakeup_cycles = RPMH_USECS_TO_CYCLES(wakeup_us); wakeup_cycles += arch_timer_read_counter(); exit: -- 2.31.1 To handle WFET use __cmpwait_timeout() similarly to __cmpwait(). These call out to the respective __cmpwait_case_timeout_##sz(), __cmpwait_case_##sz() functions. Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Reviewed-by: Catalin Marinas Signed-off-by: Ankur Arora --- arch/arm64/include/asm/barrier.h | 8 +++-- arch/arm64/include/asm/cmpxchg.h | 62 +++++++++++++++++++++++++------- 2 files changed, 55 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 6190e178db51..fbd71cd4ef4e 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -224,8 +224,8 @@ do { \ extern bool arch_timer_evtstrm_available(void); /* - * In the common case, cpu_poll_relax() sits waiting in __cmpwait_relaxed() - * for the ptr value to change. + * In the common case, cpu_poll_relax() sits waiting in __cmpwait_relaxed()/ + * __cmpwait_relaxed_timeout() for the ptr value to change. * * Since this period is reasonably long, choose SMP_TIMEOUT_POLL_COUNT * to be 1, so smp_cond_load_{relaxed,acquire}_timeout() does a @@ -234,7 +234,9 @@ extern bool arch_timer_evtstrm_available(void); #define SMP_TIMEOUT_POLL_COUNT 1 #define cpu_poll_relax(ptr, val, timeout_ns) do { \ - if (arch_timer_evtstrm_available()) \ + if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) \ + __cmpwait_relaxed_timeout(ptr, val, timeout_ns); \ + else if (arch_timer_evtstrm_available()) \ __cmpwait_relaxed(ptr, val); \ else \ cpu_relax(); \ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 6cf3cd6873f5..9e4cdc9e41d1 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -12,6 +12,7 @@ #include #include +#include /* * We need separate acquire parameters for ll/sc and lse, since the full @@ -212,7 +213,8 @@ __CMPXCHG_GEN(_mb) #define __CMPWAIT_CASE(w, sfx, sz) \ static inline void __cmpwait_case_##sz(volatile void *ptr, \ - unsigned long val) \ + unsigned long val, \ + u64 __maybe_unused timeout_ns) \ { \ unsigned long tmp; \ \ @@ -235,20 +237,52 @@ __CMPWAIT_CASE( , , 64); #undef __CMPWAIT_CASE -#define __CMPWAIT_GEN(sfx) \ -static __always_inline void __cmpwait##sfx(volatile void *ptr, \ - unsigned long val, \ - int size) \ +#define __CMPWAIT_TIMEOUT_CASE(w, sfx, sz) \ +static inline void __cmpwait_case_timeout_##sz(volatile void *ptr, \ + unsigned long val, \ + u64 timeout_ns) \ +{ \ + unsigned long tmp; \ + u64 ecycles = __delay_cycles() + \ + NSECS_TO_CYCLES(timeout_ns); \ + asm volatile( \ + " sevl\n" \ + " wfe\n" \ + " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ + " cbnz %" #w "[tmp], 2f\n" \ + " msr s0_3_c1_c0_0, %[ecycles]\n" \ + "2:" \ + : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \ + : [val] "r" (val), [ecycles] "r" (ecycles)); \ +} + +__CMPWAIT_TIMEOUT_CASE(w, b, 8); +__CMPWAIT_TIMEOUT_CASE(w, h, 16); +__CMPWAIT_TIMEOUT_CASE(w, , 32); +__CMPWAIT_TIMEOUT_CASE( , , 64); + +#undef __CMPWAIT_TIMEOUT_CASE + +#define __CMPWAIT_GEN(timeout, sfx) \ +static __always_inline void __cmpwait##timeout##sfx(volatile void *ptr, \ + unsigned long val, \ + u64 timeout_ns, \ + int size) \ { \ switch (size) { \ case 1: \ - return __cmpwait_case##sfx##_8(ptr, (u8)val); \ + return __cmpwait_case##timeout##sfx##_8(ptr, (u8)val, \ + timeout_ns); \ case 2: \ - return __cmpwait_case##sfx##_16(ptr, (u16)val); \ + return __cmpwait_case##timeout##sfx##_16(ptr, (u16)val, \ + timeout_ns); \ case 4: \ - return __cmpwait_case##sfx##_32(ptr, val); \ + return __cmpwait_case##timeout##sfx##_32(ptr, val, \ + timeout_ns); \ case 8: \ - return __cmpwait_case##sfx##_64(ptr, val); \ + return __cmpwait_case##timeout##sfx##_64(ptr, val, \ + timeout_ns); \ default: \ BUILD_BUG(); \ } \ @@ -256,11 +290,15 @@ static __always_inline void __cmpwait##sfx(volatile void *ptr, \ unreachable(); \ } -__CMPWAIT_GEN() +__CMPWAIT_GEN( , ) +__CMPWAIT_GEN(_timeout, ) #undef __CMPWAIT_GEN -#define __cmpwait_relaxed(ptr, val) \ - __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) +#define __cmpwait_relaxed_timeout(ptr, val, timeout_ns) \ + __cmpwait_timeout((ptr), (unsigned long)(val), timeout_ns, sizeof(*(ptr))) + +#define __cmpwait_relaxed(ptr, val) \ + __cmpwait((ptr), (unsigned long)(val), 0, sizeof(*(ptr))) #endif /* __ASM_CMPXCHG_H */ -- 2.31.1 In preparation for defining smp_cond_load_acquire_timeout(), remove the private copy. Lacking this, the rqspinlock code falls back to using smp_cond_load_acquire(). Cc: Kumar Kartikeya Dwivedi Cc: Alexei Starovoitov Cc: bpf@vger.kernel.org Reviewed-by: Catalin Marinas Reviewed-by: Haris Okanovic Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Ankur Arora --- arch/arm64/include/asm/rqspinlock.h | 85 ----------------------------- 1 file changed, 85 deletions(-) diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h index 9ea0a74e5892..a385603436e9 100644 --- a/arch/arm64/include/asm/rqspinlock.h +++ b/arch/arm64/include/asm/rqspinlock.h @@ -3,91 +3,6 @@ #define _ASM_RQSPINLOCK_H #include - -/* - * Hardcode res_smp_cond_load_acquire implementations for arm64 to a custom - * version based on [0]. In rqspinlock code, our conditional expression involves - * checking the value _and_ additionally a timeout. However, on arm64, the - * WFE-based implementation may never spin again if no stores occur to the - * locked byte in the lock word. As such, we may be stuck forever if - * event-stream based unblocking is not available on the platform for WFE spin - * loops (arch_timer_evtstrm_available). - * - * Once support for smp_cond_load_acquire_timewait [0] lands, we can drop this - * copy-paste. - * - * While we rely on the implementation to amortize the cost of sampling - * cond_expr for us, it will not happen when event stream support is - * unavailable, time_expr check is amortized. This is not the common case, and - * it would be difficult to fit our logic in the time_expr_ns >= time_limit_ns - * comparison, hence just let it be. In case of event-stream, the loop is woken - * up at microsecond granularity. - * - * [0]: https://lore.kernel.org/lkml/20250203214911.898276-1-ankur.a.arora@oracle.com - */ - -#ifndef smp_cond_load_acquire_timewait - -#define smp_cond_time_check_count 200 - -#define __smp_cond_load_relaxed_spinwait(ptr, cond_expr, time_expr_ns, \ - time_limit_ns) ({ \ - typeof(ptr) __PTR = (ptr); \ - __unqual_scalar_typeof(*ptr) VAL; \ - unsigned int __count = 0; \ - for (;;) { \ - VAL = READ_ONCE(*__PTR); \ - if (cond_expr) \ - break; \ - cpu_relax(); \ - if (__count++ < smp_cond_time_check_count) \ - continue; \ - if ((time_expr_ns) >= (time_limit_ns)) \ - break; \ - __count = 0; \ - } \ - (typeof(*ptr))VAL; \ -}) - -#define __smp_cond_load_acquire_timewait(ptr, cond_expr, \ - time_expr_ns, time_limit_ns) \ -({ \ - typeof(ptr) __PTR = (ptr); \ - __unqual_scalar_typeof(*ptr) VAL; \ - for (;;) { \ - VAL = smp_load_acquire(__PTR); \ - if (cond_expr) \ - break; \ - __cmpwait_relaxed(__PTR, VAL); \ - if ((time_expr_ns) >= (time_limit_ns)) \ - break; \ - } \ - (typeof(*ptr))VAL; \ -}) - -#define smp_cond_load_acquire_timewait(ptr, cond_expr, \ - time_expr_ns, time_limit_ns) \ -({ \ - __unqual_scalar_typeof(*ptr) _val; \ - int __wfe = arch_timer_evtstrm_available(); \ - \ - if (likely(__wfe)) { \ - _val = __smp_cond_load_acquire_timewait(ptr, cond_expr, \ - time_expr_ns, \ - time_limit_ns); \ - } else { \ - _val = __smp_cond_load_relaxed_spinwait(ptr, cond_expr, \ - time_expr_ns, \ - time_limit_ns); \ - smp_acquire__after_ctrl_dep(); \ - } \ - (typeof(*ptr))_val; \ -}) - -#endif - -#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1) - #include #endif /* _ASM_RQSPINLOCK_H */ -- 2.31.1 Add the acquire variant of smp_cond_load_relaxed_timeout(). This reuses the relaxed variant, with additional LOAD->LOAD ordering. Cc: Arnd Bergmann Cc: Will Deacon Cc: Catalin Marinas Cc: Peter Zijlstra Cc: linux-arch@vger.kernel.org Reviewed-by: Catalin Marinas Reviewed-by: Haris Okanovic Tested-by: Haris Okanovic Signed-off-by: Ankur Arora --- include/asm-generic/barrier.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index e5a6a1c04649..68d9e7108f4a 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -342,6 +342,32 @@ do { \ }) #endif +/** + * smp_cond_load_acquire_timeout() - (Spin) wait for cond with ACQUIRE ordering + * until a timeout expires. + * @ptr: pointer to the variable to wait on. + * @cond_expr: boolean expression to wait for. + * @time_expr_ns: monotonic expression that evaluates to time in ns or, + * on failure, returns a negative value. + * @timeout_ns: timeout value in ns + * (Both of the above are assumed to be compatible with s64.) + * + * Equivalent to using smp_cond_load_acquire() on the condition variable with + * a timeout. + */ +#ifndef smp_cond_load_acquire_timeout +#define smp_cond_load_acquire_timeout(ptr, cond_expr, \ + time_expr_ns, timeout_ns) \ +({ \ + __unqual_scalar_typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed_timeout(ptr, cond_expr, \ + time_expr_ns, \ + timeout_ns); \ + smp_acquire__after_ctrl_dep(); \ + (typeof(*ptr))_val; \ +}) +#endif + /* * pmem_wmb() ensures that all stores for which the modification * are written to persistent storage by preceding instructions have -- 2.31.1 Add atomic load wrappers, atomic_cond_read_*_timeout() and atomic64_cond_read_*_timeout() for the cond-load timeout interfaces. Also add a short description for the atomic_cond_read_{relaxed,acquire}(), and the atomic_cond_read_{relaxed,acquire}_timeout() interfaces. Cc: Will Deacon Cc: Peter Zijlstra Cc: Boqun Feng Acked-by: Catalin Marinas Signed-off-by: Ankur Arora --- Documentation/atomic_t.txt | 14 +++++++++----- include/linux/atomic.h | 10 ++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt index bee3b1bca9a7..0e53f6ccb558 100644 --- a/Documentation/atomic_t.txt +++ b/Documentation/atomic_t.txt @@ -16,6 +16,10 @@ Non-RMW ops: atomic_read(), atomic_set() atomic_read_acquire(), atomic_set_release() +Non-RMW, non-atomic_t ops: + + atomic_cond_read_{relaxed,acquire}() + atomic_cond_read_{relaxed,acquire}_timeout() RMW atomic operations: @@ -79,11 +83,11 @@ SEMANTICS Non-RMW ops: -The non-RMW ops are (typically) regular LOADs and STOREs and are canonically -implemented using READ_ONCE(), WRITE_ONCE(), smp_load_acquire() and -smp_store_release() respectively. Therefore, if you find yourself only using -the Non-RMW operations of atomic_t, you do not in fact need atomic_t at all -and are doing it wrong. +The non-RMW ops are (typically) regular, or conditional LOADs and STOREs and +are canonically implemented using READ_ONCE(), WRITE_ONCE(), +smp_load_acquire() and smp_store_release() respectively. Therefore, if you +find yourself only using the Non-RMW operations of atomic_t, you do not in +fact need atomic_t at all and are doing it wrong. A note for the implementation of atomic_set{}() is that it must not break the atomicity of the RMW ops. That is: diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 8dd57c3a99e9..5bcb86e07784 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -31,6 +31,16 @@ #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) +#define atomic_cond_read_acquire_timeout(v, c, e, t) \ + smp_cond_load_acquire_timeout(&(v)->counter, (c), (e), (t)) +#define atomic_cond_read_relaxed_timeout(v, c, e, t) \ + smp_cond_load_relaxed_timeout(&(v)->counter, (c), (e), (t)) + +#define atomic64_cond_read_acquire_timeout(v, c, e, t) \ + smp_cond_load_acquire_timeout(&(v)->counter, (c), (e), (t)) +#define atomic64_cond_read_relaxed_timeout(v, c, e, t) \ + smp_cond_load_relaxed_timeout(&(v)->counter, (c), (e), (t)) + /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed -- 2.31.1 Add the atomic long wrappers for the cond-load timeout interfaces. Cc: Will Deacon Cc: Peter Zijlstra Cc: Boqun Feng Acked-by: Catalin Marinas Signed-off-by: Ankur Arora --- include/linux/atomic/atomic-long.h | 18 +++++++++++------- scripts/atomic/gen-atomic-long.sh | 16 ++++++++++------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index 6a4e47d2db35..553b6b0e0258 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -11,14 +11,18 @@ #ifdef CONFIG_64BIT typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#define atomic_long_cond_read_acquire_timeout atomic64_cond_read_acquire_timeout +#define atomic_long_cond_read_relaxed_timeout atomic64_cond_read_relaxed_timeout #else typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#define atomic_long_cond_read_acquire_timeout atomic_cond_read_acquire_timeout +#define atomic_long_cond_read_relaxed_timeout atomic_cond_read_relaxed_timeout #endif /** @@ -1809,4 +1813,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v) } #endif /* _LINUX_ATOMIC_LONG_H */ -// 4b882bf19018602c10816c52f8b4ae280adc887b +// 79c1f4acb5774376ceed559843d5d9ed1348df99 diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh index 9826be3ba986..874643dc74bd 100755 --- a/scripts/atomic/gen-atomic-long.sh +++ b/scripts/atomic/gen-atomic-long.sh @@ -79,14 +79,18 @@ cat << EOF #ifdef CONFIG_64BIT typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#define atomic_long_cond_read_acquire_timeout atomic64_cond_read_acquire_timeout +#define atomic_long_cond_read_relaxed_timeout atomic64_cond_read_relaxed_timeout #else typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#define atomic_long_cond_read_acquire_timeout atomic_cond_read_acquire_timeout +#define atomic_long_cond_read_relaxed_timeout atomic_cond_read_relaxed_timeout #endif EOF -- 2.31.1 check_timeout() gets the current time value and depending on how much time has passed, checks for deadlock or times out, returning 0 or -errno on deadlock or timeout. Switch this out to a clock style interface, where it functions as a clock in the "lock-domain", returning the current time until a deadlock or timeout occurs. Once a deadlock or timeout has occurred, it stops functioning as a clock and returns error. Also adjust the RES_CHECK_TIMEOUT macro to discard the clock value when updating the explicit return status. Cc: bpf@vger.kernel.org Cc: Kumar Kartikeya Dwivedi Cc: Alexei Starovoitov Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Ankur Arora --- kernel/bpf/rqspinlock.c | 45 +++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c index e4e338cdb437..0ec17ebb67c1 100644 --- a/kernel/bpf/rqspinlock.c +++ b/kernel/bpf/rqspinlock.c @@ -196,8 +196,12 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask) return 0; } -static noinline int check_timeout(rqspinlock_t *lock, u32 mask, - struct rqspinlock_timeout *ts) +/* + * Returns current monotonic time in ns on success or, negative errno + * value on failure due to timeout expiration or detection of deadlock. + */ +static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, + struct rqspinlock_timeout *ts) { u64 prev = ts->cur; u64 time; @@ -207,7 +211,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask, return -EDEADLK; ts->cur = ktime_get_mono_fast_ns(); ts->timeout_end = ts->cur + ts->duration; - return 0; + return (s64)ts->cur; } time = ktime_get_mono_fast_ns(); @@ -219,11 +223,15 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask, * checks. */ if (prev + NSEC_PER_MSEC < time) { + int ret; ts->cur = time; - return check_deadlock_ABBA(lock, mask); + ret = check_deadlock_ABBA(lock, mask); + if (ret) + return ret; + } - return 0; + return (s64)time; } /* @@ -231,15 +239,22 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask, * as the macro does internal amortization for us. */ #ifndef res_smp_cond_load_acquire -#define RES_CHECK_TIMEOUT(ts, ret, mask) \ - ({ \ - if (!(ts).spin++) \ - (ret) = check_timeout((lock), (mask), &(ts)); \ - (ret); \ +#define RES_CHECK_TIMEOUT(ts, ret, mask) \ + ({ \ + s64 __timeval_err = 0; \ + if (!(ts).spin++) \ + __timeval_err = clock_deadlock((lock), (mask), &(ts)); \ + (ret) = __timeval_err < 0 ? __timeval_err : 0; \ + __timeval_err; \ }) #else -#define RES_CHECK_TIMEOUT(ts, ret, mask) \ - ({ (ret) = check_timeout((lock), (mask), &(ts)); }) +#define RES_CHECK_TIMEOUT(ts, ret, mask) \ + ({ \ + s64 __timeval_err; \ + __timeval_err = clock_deadlock((lock), (mask), &(ts)); \ + (ret) = __timeval_err < 0 ? __timeval_err : 0; \ + __timeval_err; \ + }) #endif /* @@ -281,7 +296,7 @@ int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock) val = atomic_read(&lock->val); if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) { - if (RES_CHECK_TIMEOUT(ts, ret, ~0u)) + if (RES_CHECK_TIMEOUT(ts, ret, ~0u) < 0) goto out; cpu_relax(); goto retry; @@ -406,7 +421,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) */ if (val & _Q_LOCKED_MASK) { RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT); - res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK)); + res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK) < 0); } if (ret) { @@ -568,7 +583,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) */ RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2); val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) || - RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK)); + RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK) < 0); /* Disable queue destruction when we detect deadlocks. */ if (ret == -EDEADLK) { -- 2.31.1 Switch out the conditional load interfaces used by rqspinlock to smp_cond_read_acquire_timeout() and its wrapper, atomic_cond_read_acquire_timeout(). Both these handle the timeout and amortize as needed, so use the non-amortized RES_CHECK_TIMEOUT. RES_CHECK_TIMEOUT does double duty here -- presenting the current clock value, the timeout/deadlock error from clock_deadlock() to the cond-load and, returning the error value via ret. For correctness, we need to ensure that the error case of the cond-load interface always agrees with that in clock_deadlock(). For the most part, this is fine because there's no independent clock, or double reads from the clock in cond-load -- either of which could lead to its internal state going out of sync from that of clock_deadlock(). There is, however, an edge case where clock_deadlock() checks for: if (time > ts->timeout_end) return -ETIMEDOUT; while smp_cond_load_acquire_timeout() checks for: __time_now = (time_expr_ns); if (__time_now <= 0 || __time_now >= __time_end) { VAL = READ_ONCE(*__PTR); break; } This runs into a problem when (__time_now == __time_end) since clock_deadlock() does not treat it as a timeout condition but the second clause in the conditional above does. So, add an equality check in clock_deadlock(). Finally, redefine SMP_TIMEOUT_POLL_COUNT to be 16k to be similar to the spin-count used in the amortized version. We only do this for non-arm64 as that uses a waiting implementation. Cc: bpf@vger.kernel.org Cc: Kumar Kartikeya Dwivedi Cc: Alexei Starovoitov Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Ankur Arora --- kernel/bpf/rqspinlock.c | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c index 0ec17ebb67c1..e5e27266b813 100644 --- a/kernel/bpf/rqspinlock.c +++ b/kernel/bpf/rqspinlock.c @@ -215,7 +215,7 @@ static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, } time = ktime_get_mono_fast_ns(); - if (time > ts->timeout_end) + if (time >= ts->timeout_end) return -ETIMEDOUT; /* @@ -235,11 +235,10 @@ static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, } /* - * Do not amortize with spins when res_smp_cond_load_acquire is defined, - * as the macro does internal amortization for us. + * Spin amortized version of RES_CHECK_TIMEOUT. Used when busy-waiting in + * atomic_try_cmpxchg(). */ -#ifndef res_smp_cond_load_acquire -#define RES_CHECK_TIMEOUT(ts, ret, mask) \ +#define RES_CHECK_TIMEOUT_AMORTIZED(ts, ret, mask) \ ({ \ s64 __timeval_err = 0; \ if (!(ts).spin++) \ @@ -247,7 +246,7 @@ static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, (ret) = __timeval_err < 0 ? __timeval_err : 0; \ __timeval_err; \ }) -#else + #define RES_CHECK_TIMEOUT(ts, ret, mask) \ ({ \ s64 __timeval_err; \ @@ -255,7 +254,6 @@ static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, (ret) = __timeval_err < 0 ? __timeval_err : 0; \ __timeval_err; \ }) -#endif /* * Initialize the 'spin' member. @@ -269,6 +267,17 @@ static noinline s64 clock_deadlock(rqspinlock_t *lock, u32 mask, */ #define RES_RESET_TIMEOUT(ts, _duration) ({ (ts).timeout_end = 0; (ts).duration = _duration; }) +/* + * Limit how often we invoke clock_deadlock() while spin-waiting in + * smp_cond_load_acquire_timeout() or atomic_cond_read_acquire_timeout(). + * + * We only override the default value not superceding ARM64's override. + */ +#ifndef CONFIG_ARM64 +#undef SMP_TIMEOUT_POLL_COUNT +#define SMP_TIMEOUT_POLL_COUNT (16*1024) +#endif + /* * Provide a test-and-set fallback for cases when queued spin lock support is * absent from the architecture. @@ -296,7 +305,7 @@ int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock) val = atomic_read(&lock->val); if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) { - if (RES_CHECK_TIMEOUT(ts, ret, ~0u) < 0) + if (RES_CHECK_TIMEOUT_AMORTIZED(ts, ret, ~0u) < 0) goto out; cpu_relax(); goto retry; @@ -319,12 +328,6 @@ EXPORT_SYMBOL_GPL(resilient_tas_spin_lock); */ static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]); -#ifndef res_smp_cond_load_acquire -#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c) -#endif - -#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c)) - /** * resilient_queued_spin_lock_slowpath - acquire the queued spinlock * @lock: Pointer to queued spinlock structure @@ -421,7 +424,9 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) */ if (val & _Q_LOCKED_MASK) { RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT); - res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK) < 0); + smp_cond_load_acquire_timeout(&lock->locked, !VAL, + RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK), + ts.duration); } if (ret) { @@ -582,8 +587,9 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) * us. */ RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2); - val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) || - RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK) < 0); + val = atomic_cond_read_acquire_timeout(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK), + RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK), + ts.duration); /* Disable queue destruction when we detect deadlocks. */ if (ret == -EDEADLK) { -- 2.31.1 Add tif_bitset_relaxed_wait() (and tif_need_resched_relaxed_wait() which wraps it) which takes the thread_info bit and timeout duration as parameters and waits until the bit is set or for the expiration of the timeout. The wait is implemented via smp_cond_load_relaxed_timeout(). smp_cond_load_relaxed_timeout() essentially provides the pattern used in poll_idle() where we spin in a loop waiting for the flag to change until a timeout occurs. tif_need_resched_relaxed_wait() allows us to abstract out the internals of waiting, scheduler specific details etc. Placed in linux/sched/idle.h instead of linux/thread_info.h to work around recursive include hell. Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Daniel Lezcano Cc: linux-pm@vger.kernel.org Signed-off-by: Ankur Arora --- include/linux/sched/idle.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 8465ff1f20d1..ddee9b019895 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -3,6 +3,7 @@ #define _LINUX_SCHED_IDLE_H #include +#include enum cpu_idle_type { __CPU_NOT_IDLE = 0, @@ -113,4 +114,32 @@ static __always_inline void current_clr_polling(void) } #endif +/* + * Caller needs to make sure that the thread context cannot be preempted + * or migrated, so current_thread_info() cannot change from under us. + * + * This also allows us to safely stay in the local_clock domain. + */ +static __always_inline bool tif_bitset_relaxed_wait(int tif, u64 timeout_ns) +{ + unsigned long flags; + + flags = smp_cond_load_relaxed_timeout(¤t_thread_info()->flags, + (VAL & BIT(tif)), + local_clock_noinstr(), + timeout_ns); + return flags & BIT(tif); +} + +/** + * tif_need_resched_relaxed_wait() - Wait for need-resched being set + * with no ordering guarantees until a timeout expires. + * + * @timeout_ns: timeout value. + */ +static __always_inline bool tif_need_resched_relaxed_wait(u64 timeout_ns) +{ + return tif_bitset_relaxed_wait(TIF_NEED_RESCHED, timeout_ns); +} + #endif /* _LINUX_SCHED_IDLE_H */ -- 2.31.1 The inner loop in poll_idle() polls over the thread_info flags, waiting to see if the thread has TIF_NEED_RESCHED set. The loop exits once the condition is met, or if the poll time limit has been exceeded. To minimize the number of instructions executed in each iteration, the time check is rate-limited. In addition, each loop iteration executes cpu_relax() which on certain platforms provides a hint to the pipeline that the loop busy-waits, allowing the processor to reduce power consumption. Switch over to tif_need_resched_relaxed_wait() instead, since that provides exactly that. However, since we want to minimize power consumption in idle, building of cpuidle/poll_state.c continues to depend on CONFIG_ARCH_HAS_CPU_RELAX as that serves as an indicator that the platform supports an optimized version of tif_need_resched_relaxed_wait() (via smp_cond_load_acquire_timeout()). Cc: Rafael J. Wysocki Cc: Daniel Lezcano Cc: linux-pm@vger.kernel.org Suggested-by: Rafael J. Wysocki Acked-by: Rafael J. Wysocki (Intel) Signed-off-by: Ankur Arora --- drivers/cpuidle/poll_state.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index c7524e4c522a..7443b3e971ba 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -6,41 +6,22 @@ #include #include #include -#include -#include #include #include #include -#define POLL_IDLE_RELAX_COUNT 200 - static int __cpuidle poll_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - u64 time_start; - - time_start = local_clock_noinstr(); - dev->poll_time_limit = false; raw_local_irq_enable(); if (!current_set_polling_and_test()) { - unsigned int loop_count = 0; u64 limit; limit = cpuidle_poll_time(drv, dev); - while (!need_resched()) { - cpu_relax(); - if (loop_count++ < POLL_IDLE_RELAX_COUNT) - continue; - - loop_count = 0; - if (local_clock_noinstr() - time_start > limit) { - dev->poll_time_limit = true; - break; - } - } + dev->poll_time_limit = !tif_need_resched_relaxed_wait(limit); } raw_local_irq_disable(); -- 2.31.1 This enables the barrier tests to be built as a module. Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Ankur Arora --- arch/arm64/lib/delay.c | 2 ++ drivers/clocksource/arm_arch_timer.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c index c660a7ea26dd..dfb102ce3009 100644 --- a/arch/arm64/lib/delay.c +++ b/arch/arm64/lib/delay.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,7 @@ u64 notrace __delay_cycles(void) guard(preempt_notrace)(); return __arch_counter_get_cntvct_stable(); } +EXPORT_SYMBOL_IF_KUNIT(__delay_cycles); void __delay(unsigned long cycles) { diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 90aeff44a276..1de63e1a2cd2 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -896,6 +897,7 @@ bool arch_timer_evtstrm_available(void) */ return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); } +EXPORT_SYMBOL_IF_KUNIT(arch_timer_evtstrm_available); static struct arch_timer_kvm_info arch_timer_kvm_info; -- 2.31.1 Add a success and failure case for smp_cond_load_relaxed_timeout(). Both test cases wait on some state in smp_cond_load_relaxed_timeout(). In the success case we spawn a kthread that pokes the bit. Signed-off-by: Ankur Arora --- lib/Kconfig.debug | 10 +++ lib/tests/Makefile | 1 + lib/tests/barrier-timeout-test.c | 125 +++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+) create mode 100644 lib/tests/barrier-timeout-test.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 93f356d2b3d9..dcd2d60a9391 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2398,6 +2398,16 @@ config FPROBE_SANITY_TEST Say N if you are unsure. +config BARRIER_TIMEOUT_TEST + tristate "KUnit tests for smp_cond_load_relaxed_timeout()" + depends on KUNIT + default KUNIT_ALL_TESTS + help + Builds KUnit tests that validate wake-up and timeout handling paths + in smp_cond_load_relaxed_timeout(). + + Say N if you are unsure. + config BACKTRACE_SELF_TEST tristate "Self test for the backtrace code" depends on DEBUG_KERNEL diff --git a/lib/tests/Makefile b/lib/tests/Makefile index 05f74edbc62b..3504d677b7b8 100644 --- a/lib/tests/Makefile +++ b/lib/tests/Makefile @@ -20,6 +20,7 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o +obj-$(CONFIG_BARRIER_TIMEOUT_TEST) += barrier-timeout-test.o obj-$(CONFIG_GLOB_KUNIT_TEST) += glob_kunit.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o diff --git a/lib/tests/barrier-timeout-test.c b/lib/tests/barrier-timeout-test.c new file mode 100644 index 000000000000..d72200daa0f2 --- /dev/null +++ b/lib/tests/barrier-timeout-test.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit tests exercising smp_cond_load_relaxed_timeout(). + * + * Copyright (c) 2026, Oracle Corp. + * Author: Ankur Arora + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +struct clock_state { + s64 start_time; + s64 end_time; +}; + +#define TIMEOUT_MSEC 2 +#define TEST_FLAG_VAL BIT(2) +static unsigned int flag; + +static s64 basic_clock(struct clock_state *clk) +{ + clk->end_time = local_clock(); + return clk->end_time; +} + +static void update_flags(void) +{ + WRITE_ONCE(flag, TEST_FLAG_VAL); +} + +static s64 mock_clock(struct clock_state *clk) +{ + s64 clk_mid = clk->start_time + (TIMEOUT_MSEC * NSEC_PER_MSEC)/2; + + clk->end_time = local_clock(); + if (clk->end_time >= clk_mid) + update_flags(); + return clk->end_time; +} + +typedef s64 (*clkfn_t)(struct clock_state *); + +static void test_smp_cond_relaxed_timeout(struct kunit *test, + clkfn_t clock, bool succeeds) +{ + struct clock_state clk = { + .start_time = local_clock(), + .end_time = local_clock(), + }; + s64 runtime, timeout_ns = TIMEOUT_MSEC * NSEC_PER_MSEC; + unsigned int result; + + result = smp_cond_load_relaxed_timeout(&flag, + (VAL & TEST_FLAG_VAL), + clock(&clk), + timeout_ns); + + runtime = clk.end_time - clk.start_time; + KUNIT_EXPECT_EQ(test, (bool)(result & TEST_FLAG_VAL), succeeds); + KUNIT_EXPECT_EQ(test, runtime <= timeout_ns, succeeds); +} + +static int smp_cond_threadfn(void *data) +{ + udelay(TIMEOUT_MSEC * USEC_PER_MSEC / 4); + + /* + * Update flags after a delay to give smp_cond_relaxed_timeout() + * time to get started. + */ + update_flags(); + return 0; +} + +static void smp_cond_relaxed_timeout_succeeds(struct kunit *test) +{ + struct task_struct *task; + + flag = 0; + + task = kthread_run(smp_cond_threadfn, &flag, "smp_cond_thread"); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, task); + test_smp_cond_relaxed_timeout(test, &basic_clock, true); + + kthread_stop(task); +} + +static void smp_cond_relaxed_timeout_mocked(struct kunit *test) +{ + flag = 0; + test_smp_cond_relaxed_timeout(test, &mock_clock, true); +} + +static void smp_cond_relaxed_timeout_expires(struct kunit *test) +{ + flag = 0; + test_smp_cond_relaxed_timeout(test, &basic_clock, false); +} + +static struct kunit_case barrier_timeout_test_cases[] = { + KUNIT_CASE(smp_cond_relaxed_timeout_mocked), + KUNIT_CASE(smp_cond_relaxed_timeout_succeeds), + KUNIT_CASE(smp_cond_relaxed_timeout_expires), + {} +}; + +static struct kunit_suite barrier_timeout_test_suite = { + .name = "smp-cond-load-relaxed-timeout", + .test_cases = barrier_timeout_test_cases, +}; + +kunit_test_suite(barrier_timeout_test_suite); + +MODULE_DESCRIPTION("KUnit tests for smp_cond_load_relaxed_timeout()"); +MODULE_LICENSE("GPL"); -- 2.31.1