Add support for Clang's capability analysis for local_lock_t and local_trylock_t. Signed-off-by: Marco Elver --- v3: * Switch to DECLARE_LOCK_GUARD_1_ATTRS() (suggested by Peter) * __assert -> __assume rename * Rework __this_cpu_local_lock helper * Support local_trylock_t --- .../dev-tools/capability-analysis.rst | 2 +- include/linux/local_lock.h | 45 +++++++----- include/linux/local_lock_internal.h | 71 ++++++++++++++---- lib/test_capability-analysis.c | 73 +++++++++++++++++++ 4 files changed, 156 insertions(+), 35 deletions(-) diff --git a/Documentation/dev-tools/capability-analysis.rst b/Documentation/dev-tools/capability-analysis.rst index 7a4c2238c910..9fb964e94920 100644 --- a/Documentation/dev-tools/capability-analysis.rst +++ b/Documentation/dev-tools/capability-analysis.rst @@ -82,7 +82,7 @@ Supported Kernel Primitives Currently the following synchronization primitives are supported: `raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`, -`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`. +`bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`. For capabilities with an initialization function (e.g., `spin_lock_init()`), calling this function on the capability instance before initializing any diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 2ba846419524..cfdca5bee89e 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -13,13 +13,13 @@ * local_lock - Acquire a per CPU local lock * @lock: The lock variable */ -#define local_lock(lock) __local_lock(this_cpu_ptr(lock)) +#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock)) /** * local_lock_irq - Acquire a per CPU local lock and disable interrupts * @lock: The lock variable */ -#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock)) +#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock)) /** * local_lock_irqsave - Acquire a per CPU local lock, save and disable @@ -28,19 +28,19 @@ * @flags: Storage for interrupt flags */ #define local_lock_irqsave(lock, flags) \ - __local_lock_irqsave(this_cpu_ptr(lock), flags) + __local_lock_irqsave(__this_cpu_local_lock(lock), flags) /** * local_unlock - Release a per CPU local lock * @lock: The lock variable */ -#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock)) +#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock)) /** * local_unlock_irq - Release a per CPU local lock and enable interrupts * @lock: The lock variable */ -#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock)) +#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock)) /** * local_unlock_irqrestore - Release a per CPU local lock and restore @@ -49,7 +49,7 @@ * @flags: Interrupt flags to restore */ #define local_unlock_irqrestore(lock, flags) \ - __local_unlock_irqrestore(this_cpu_ptr(lock), flags) + __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags) /** * local_lock_init - Runtime initialize a lock instance @@ -64,7 +64,7 @@ * locking constrains it will _always_ fail to acquire the lock in NMI or * HARDIRQ context on PREEMPT_RT. */ -#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock)) +#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock)) /** * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable @@ -77,27 +77,32 @@ * HARDIRQ context on PREEMPT_RT. */ #define local_trylock_irqsave(lock, flags) \ - __local_trylock_irqsave(this_cpu_ptr(lock), flags) - -DEFINE_GUARD(local_lock, local_lock_t __percpu*, - local_lock(_T), - local_unlock(_T)) -DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*, - local_lock_irq(_T), - local_unlock_irq(_T)) + __local_trylock_irqsave(__this_cpu_local_lock(lock), flags) + +DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu, + local_lock(_T->lock), + local_unlock(_T->lock)) +DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu, + local_lock_irq(_T->lock), + local_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, local_lock_irqsave(_T->lock, _T->flags), local_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) #define local_lock_nested_bh(_lock) \ - __local_lock_nested_bh(this_cpu_ptr(_lock)) + __local_lock_nested_bh(__this_cpu_local_lock(_lock)) #define local_unlock_nested_bh(_lock) \ - __local_unlock_nested_bh(this_cpu_ptr(_lock)) + __local_unlock_nested_bh(__this_cpu_local_lock(_lock)) -DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, - local_lock_nested_bh(_T), - local_unlock_nested_bh(_T)) +DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu, + local_lock_nested_bh(_T->lock), + local_unlock_nested_bh(_T->lock)) + +DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __assumes_cap(_T), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __assumes_cap(_T), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __assumes_cap(_T), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __assumes_cap(_T), /* */) #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 4c0e117d2d08..22ffaf06d9eb 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -10,18 +10,20 @@ #ifndef CONFIG_PREEMPT_RT -typedef struct { +struct_with_capability(local_lock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; struct task_struct *owner; #endif -} local_lock_t; +}; +typedef struct local_lock local_lock_t; /* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */ -typedef struct { +struct_with_capability(local_trylock) { local_lock_t llock; u8 acquired; -} local_trylock_t; +}; +typedef struct local_trylock local_trylock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCAL_LOCK_DEBUG_INIT(lockname) \ @@ -81,9 +83,14 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_PERCPU); \ local_lock_debug_init(lock); \ + __assume_cap(lock); \ } while (0) -#define __local_trylock_init(lock) __local_lock_init(lock.llock) +#define __local_trylock_init(lock) \ +do { \ + __local_lock_init(lock.llock); \ + __assume_cap(lock); \ +} while (0) #define __spinlock_nested_bh_init(lock) \ do { \ @@ -94,6 +101,7 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_NORMAL); \ local_lock_debug_init(lock); \ + __assume_cap(lock); \ } while (0) #define __local_lock_acquire(lock) \ @@ -116,22 +124,25 @@ do { \ do { \ preempt_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irq(lock) \ do { \ local_irq_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irqsave(lock, flags) \ do { \ local_irq_save(flags); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_cap(lock, ({ \ local_trylock_t *tl; \ \ preempt_disable(); \ @@ -145,10 +156,10 @@ do { \ (local_lock_t *)tl); \ } \ !!tl; \ - }) + })) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_cap(lock, ({ \ local_trylock_t *tl; \ \ local_irq_save(flags); \ @@ -162,7 +173,7 @@ do { \ (local_lock_t *)tl); \ } \ !!tl; \ - }) + })) #define __local_lock_release(lock) \ do { \ @@ -182,18 +193,21 @@ do { \ #define __local_unlock(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ preempt_enable(); \ } while (0) #define __local_unlock_irq(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_enable(); \ } while (0) #define __local_unlock_irqrestore(lock, flags) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_restore(flags); \ } while (0) @@ -202,13 +216,19 @@ do { \ do { \ lockdep_assert_in_softirq(); \ local_lock_acquire((lock)); \ + __acquire(lock); \ } while (0) #define __local_unlock_nested_bh(lock) \ - local_lock_release((lock)) + do { \ + __release(lock); \ + local_lock_release((lock)); \ + } while (0) #else /* !CONFIG_PREEMPT_RT */ +#include + /* * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the * critical section while staying preemptible. @@ -263,7 +283,7 @@ do { \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_cap(lock, capability_unsafe(({ \ int __locked; \ \ if (in_nmi() | in_hardirq()) { \ @@ -275,13 +295,36 @@ do { \ migrate_enable(); \ } \ __locked; \ - }) + }))) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_cap(lock, ({ \ typecheck(unsigned long, flags); \ flags = 0; \ __local_trylock(lock); \ - }) + })) + +#endif /* CONFIG_PREEMPT_RT */ +#if defined(WARN_CAPABILITY_ANALYSIS) +/* + * Because the compiler only knows about the base per-CPU variable, use this + * helper function to make the compiler think we lock/unlock the @base variable, + * and hide the fact we actually pass the per-CPU instance to lock/unlock + * functions. + */ +static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base) + __returns_cap(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} +#ifndef CONFIG_PREEMPT_RT +static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base) + __returns_cap(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} #endif /* CONFIG_PREEMPT_RT */ +#else /* WARN_CAPABILITY_ANALYSIS */ +#define __this_cpu_local_lock(base) this_cpu_ptr(base) +#endif /* WARN_CAPABILITY_ANALYSIS */ diff --git a/lib/test_capability-analysis.c b/lib/test_capability-analysis.c index 3c6dad0ba065..e506dadb3933 100644 --- a/lib/test_capability-analysis.c +++ b/lib/test_capability-analysis.c @@ -6,7 +6,9 @@ #include #include +#include #include +#include #include #include #include @@ -450,3 +452,74 @@ static void __used test_srcu_guard(struct test_srcu_data *d) guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); } + +struct test_local_lock_data { + local_lock_t lock; + int counter __guarded_by(&lock); +}; + +static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = { + .lock = INIT_LOCAL_LOCK(lock), +}; + +static void __used test_local_lock_init(struct test_local_lock_data *d) +{ + local_lock_init(&d->lock); + d->counter = 0; +} + +static void __used test_local_lock(void) +{ + unsigned long flags; + + local_lock(&test_local_lock_data.lock); + this_cpu_add(test_local_lock_data.counter, 1); + local_unlock(&test_local_lock_data.lock); + + local_lock_irq(&test_local_lock_data.lock); + this_cpu_add(test_local_lock_data.counter, 1); + local_unlock_irq(&test_local_lock_data.lock); + + local_lock_irqsave(&test_local_lock_data.lock, flags); + this_cpu_add(test_local_lock_data.counter, 1); + local_unlock_irqrestore(&test_local_lock_data.lock, flags); + + local_lock_nested_bh(&test_local_lock_data.lock); + this_cpu_add(test_local_lock_data.counter, 1); + local_unlock_nested_bh(&test_local_lock_data.lock); +} + +static void __used test_local_lock_guard(void) +{ + { guard(local_lock)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); } + { guard(local_lock_irq)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); } + { guard(local_lock_irqsave)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); } + { guard(local_lock_nested_bh)(&test_local_lock_data.lock); this_cpu_add(test_local_lock_data.counter, 1); } +} + +struct test_local_trylock_data { + local_trylock_t lock; + int counter __guarded_by(&lock); +}; + +static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = { + .lock = INIT_LOCAL_TRYLOCK(lock), +}; + +static void __used test_local_trylock_init(struct test_local_trylock_data *d) +{ + local_trylock_init(&d->lock); + d->counter = 0; +} + +static void __used test_local_trylock(void) +{ + local_lock(&test_local_trylock_data.lock); + this_cpu_add(test_local_trylock_data.counter, 1); + local_unlock(&test_local_trylock_data.lock); + + if (local_trylock(&test_local_trylock_data.lock)) { + this_cpu_add(test_local_trylock_data.counter, 1); + local_unlock(&test_local_trylock_data.lock); + } +} -- 2.51.0.384.g4c02a37b29-goog