As part of adding some additional lock asserts in mm, we wish to be able to determine if a read/write semaphore is write-locked, so add rwsem_is_write_locked() to do the write-lock equivalent of rwsem_is_locked(). While we're here, update rwsem_assert_[write_]held_nolockdep() to utilise the rwsem_is_[write_]locked() helpers directly to reduce code duplication, and also update rwsem_is_locked() to take a const rwsem and return a boolean. This patch also updates the CONFIG_PREEMPT_RT helpers to do the same thing there. Signed-off-by: Lorenzo Stoakes --- include/linux/rwsem.h | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f1aaf676a874..b25b7944ad99 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -70,19 +70,24 @@ struct rw_semaphore { #define RWSEM_WRITER_LOCKED (1UL << 0) #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) -static inline int rwsem_is_locked(struct rw_semaphore *sem) +static inline bool rwsem_is_locked(const struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE; } +static inline bool rwsem_is_write_locked(const struct rw_semaphore *sem) +{ + return atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED; +} + static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) { - WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE); + WARN_ON(!rwsem_is_locked(sem)); } static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) { - WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED)); + WARN_ON(!rwsem_is_write_locked(sem)); } /* Common initializer macros and functions */ @@ -174,11 +179,16 @@ do { \ __init_rwsem((sem), #sem, &__key); \ } while (0) -static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem) +static __always_inline bool rwsem_is_locked(const struct rw_semaphore *sem) { return rw_base_is_locked(&sem->rwbase); } +static __always_inline bool rwsem_is_write_locked(const struct rw_semaphore *sem) +{ + return rw_base_is_write_locked(&sem->rwbase); +} + static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem) { WARN_ON(!rwsem_is_locked(sem)); @@ -186,7 +196,7 @@ static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphor static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem) { - WARN_ON(!rw_base_is_write_locked(&sem->rwbase)); + WARN_ON(!rwsem_is_write_locked(sem)); } static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) -- 2.52.0 Add vma_is_read_locked(), vma_is_write_locked() and vma_is_locked() helpers and utilise them in vma_assert_locked() and vma_assert_write_locked(). We need to test mmap lock state to correctly test vma write lock state, so add mmap_is_locked() and mmap_is_write_locked() so we can explicitly provide means by which to check mmap_lock state also. These functions will intentionally not be defined if CONFIG_PER_VMA_LOCK is not set, as they would not make any sense in a context where VMA locks do not exist. We are careful in invoking __is_vma_write_locked() - this function asserts the mmap write lock, so we check that this lock is held before invoking the function so vma_is_write_locked() can be used in situations where we don't want an assert failure. While we're here, we also update __is_vma_write_locked() to accept a const vm_area_struct pointer so we can consistently have const VMA parameters for these helpers. As part of this change we also move mmap_lock_is_contended() up in include/linux/mmap_lock.h so we group predicates based on mmap lock state together. This lays the groundwork for a subsequent change that allows for asserting that either the mmap lock or VMA lock is held. Suggested-by: Suren Baghdasaryan Signed-off-by: Lorenzo Stoakes --- include/linux/mmap_lock.h | 50 +++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index b50416fbba20..9f6932ffaaa0 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -66,6 +66,22 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) #endif /* CONFIG_TRACING */ + +static inline bool mmap_lock_is_contended(struct mm_struct *mm) +{ + return rwsem_is_contended(&mm->mmap_lock); +} + +static inline bool mmap_is_locked(const struct mm_struct *mm) +{ + return rwsem_is_locked(&mm->mmap_lock); +} + +static inline bool mmap_is_write_locked(const struct mm_struct *mm) +{ + return rwsem_is_write_locked(&mm->mmap_lock); +} + static inline void mmap_assert_locked(const struct mm_struct *mm) { rwsem_assert_held(&mm->mmap_lock); @@ -183,7 +199,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) +static inline bool __is_vma_write_locked(const struct vm_area_struct *vma, + unsigned int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -236,19 +253,33 @@ int vma_start_write_killable(struct vm_area_struct *vma) return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE); } -static inline void vma_assert_write_locked(struct vm_area_struct *vma) +static inline bool vma_is_read_locked(const struct vm_area_struct *vma) +{ + return refcount_read(&vma->vm_refcnt) > 1; +} + +static inline bool vma_is_write_locked(struct vm_area_struct *vma) { unsigned int mm_lock_seq; - VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); + /* __is_vma_write_locked() requires the mmap write lock. */ + return mmap_is_write_locked(vma->vm_mm) && + __is_vma_write_locked(vma, &mm_lock_seq); } -static inline void vma_assert_locked(struct vm_area_struct *vma) +static inline bool vma_is_locked(struct vm_area_struct *vma) { - unsigned int mm_lock_seq; + return vma_is_read_locked(vma) || vma_is_write_locked(vma); +} + +static inline void vma_assert_write_locked(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!vma_is_write_locked(vma), vma); +} - VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 && - !__is_vma_write_locked(vma, &mm_lock_seq), vma); +static inline void vma_assert_locked(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!vma_is_locked(vma), vma); } static inline bool vma_is_attached(struct vm_area_struct *vma) @@ -432,9 +463,4 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) up_read_non_owner(&mm->mmap_lock); } -static inline int mmap_lock_is_contended(struct mm_struct *mm) -{ - return rwsem_is_contended(&mm->mmap_lock); -} - #endif /* _LINUX_MMAP_LOCK_H */ -- 2.52.0 Sometimes we wish to assert that a VMA is stable, that is - the VMA cannot be changed underneath us. This will be the case if EITHER the VMA lock or the mmap lock is held. In order to be able to do so this patch adds a vma_is_stabilised() predicate. We specify this differently based on whether CONFIG_PER_VMA_LOCK is specified - if it is then naturally we check both whether a VMA lock is held or an mmap lock held, otherwise we need only check the mmap lock. Note that we only trigger the assert is CONFIG_DEBUG_VM is set, as having this lock unset would indicate a programmatic error, so a release kernel runtime assert doesn't make much sense. There are a couple places in the kernel where we already do this check - the anon_vma_name() helper in mm/madvise.c and vma_flag_set_atomic() in include/linux/mm.h, which we update to use vma_assert_stabilised(). These were in fact implemented incorrectly - if neither the mmap lock nor the VMA lock were held, these asserts did not fire. However since these asserts are debug-only, and a large number of test configurations will have CONFIG_PER_VMA_LOCK set, it has likely had no real-world impact. This change corrects this mistake at any rate. Signed-off-by: Lorenzo Stoakes --- include/linux/mm.h | 4 +--- include/linux/mmap_lock.h | 23 ++++++++++++++++++++++- mm/madvise.c | 4 +--- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 44a2a9c0a92f..8707059f4d37 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1008,9 +1008,7 @@ static inline void vma_flag_set_atomic(struct vm_area_struct *vma, { unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags); - /* mmap read lock/VMA read lock must be held. */ - if (!rwsem_is_locked(&vma->vm_mm->mmap_lock)) - vma_assert_locked(vma); + vma_assert_stabilised(vma); if (__vma_flag_atomic_valid(vma, bit)) set_bit((__force int)bit, bitmap); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 9f6932ffaaa0..711885cb5372 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -66,7 +66,6 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) #endif /* CONFIG_TRACING */ - static inline bool mmap_lock_is_contended(struct mm_struct *mm) { return rwsem_is_contended(&mm->mmap_lock); @@ -272,6 +271,11 @@ static inline bool vma_is_locked(struct vm_area_struct *vma) return vma_is_read_locked(vma) || vma_is_write_locked(vma); } +static inline bool vma_is_stabilised(struct vm_area_struct *vma) +{ + return vma_is_locked(vma) || mmap_is_locked(vma->vm_mm); +} + static inline void vma_assert_write_locked(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!vma_is_write_locked(vma), vma); @@ -358,6 +362,11 @@ static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, return NULL; } +static inline bool vma_is_stabilised(struct vm_area_struct *vma) +{ + return mmap_is_locked(vma->vm_mm); +} + static inline void vma_assert_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); @@ -463,4 +472,16 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) up_read_non_owner(&mm->mmap_lock); } +/** + * vma_assert_stabilised() - assert that this VMA cannot be changed from + * underneath us either by having a VMA or mmap lock held. + * @vma: The VMA whose stability we wish to assess. + * + * Note that this will only trigger an assert if CONFIG_DEBUG_VM is set. + */ +static inline void vma_assert_stabilised(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!vma_is_stabilised(vma), vma); +} + #endif /* _LINUX_MMAP_LOCK_H */ diff --git a/mm/madvise.c b/mm/madvise.c index 4bf4c8c38fd3..1f3040688f04 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -109,9 +109,7 @@ void anon_vma_name_free(struct kref *kref) struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_mm->mmap_lock)) - vma_assert_locked(vma); - + vma_assert_stabilised(vma); return vma->anon_name; } -- 2.52.0