Rather than have the callers handle this both the rmap lock release and unmapping the VMA on error, handle it within the mmap_action_complete() logic where it makes sense to, being careful not to unlock twice. This simplifies the logic and makes it harder to make mistake with this, while retaining correct behaviour with regard to avoiding deadlocks. Also replace the call_action_complete() function with a direct invocation of mmap_action_complete() as the abstraction is no longer required. Also update the VMA tests to reflect this change. Signed-off-by: Lorenzo Stoakes (Oracle) --- mm/internal.h | 19 +++++++++++++++ mm/util.c | 41 +++++++++++++++------------------ mm/vma.c | 26 +-------------------- tools/testing/vma/include/dup.h | 8 +------ 4 files changed, 40 insertions(+), 54 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 0256ca44115a..760fbff9c430 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1817,6 +1817,25 @@ static inline int io_remap_pfn_range_prepare(struct vm_area_desc *desc) return 0; } +/* + * When we succeed an mmap action or just before we unmap a VMA on error, we + * need to ensure any rmap lock held is released. On unmap it's required to + * avoid a deadlock. + */ +static inline void maybe_rmap_unlock_action(struct vm_area_struct *vma, + struct mmap_action *action) +{ + struct file *file; + + if (!action->hide_from_rmap_until_complete) + return; + + VM_WARN_ON_ONCE(vma_is_anonymous(vma)); + file = vma->vm_file; + i_mmap_unlock_write(file->f_mapping); + action->hide_from_rmap_until_complete = false; +} + #ifdef CONFIG_MMU_NOTIFIER static inline int clear_flush_young_ptes_notify(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned int nr) diff --git a/mm/util.c b/mm/util.c index 182f0f5cc400..a187f3ab4437 100644 --- a/mm/util.c +++ b/mm/util.c @@ -1219,13 +1219,7 @@ int compat_vma_mmap(struct file *file, struct vm_area_struct *vma) action->hide_from_rmap_until_complete = false; set_vma_from_desc(vma, &desc); - err = mmap_action_complete(vma, action); - if (err) { - const size_t len = vma_pages(vma) << PAGE_SHIFT; - - do_munmap(current->mm, vma->vm_start, len, NULL); - } - return err; + return mmap_action_complete(vma, action); } EXPORT_SYMBOL(compat_vma_mmap); @@ -1320,26 +1314,30 @@ void snapshot_page(struct page_snapshot *ps, const struct page *page) static int mmap_action_finish(struct vm_area_struct *vma, struct mmap_action *action, int err) { + size_t len; + + if (!err && action->success_hook) + err = action->success_hook(vma); + + /* do_munmap() might take rmap lock, so release if held. */ + maybe_rmap_unlock_action(vma, action); + if (!err) + return 0; + /* * If an error occurs, unmap the VMA altogether and return an error. We * only clear the newly allocated VMA, since this function is only * invoked if we do NOT merge, so we only clean up the VMA we created. */ - if (err) { - if (action->error_hook) { - /* We may want to filter the error. */ - err = action->error_hook(err); - - /* The caller should not clear the error. */ - VM_WARN_ON_ONCE(!err); - } - return err; + len = vma_pages(vma) << PAGE_SHIFT; + do_munmap(current->mm, vma->vm_start, len, NULL); + if (action->error_hook) { + /* We may want to filter the error. */ + err = action->error_hook(err); + /* The caller should not clear the error. */ + VM_WARN_ON_ONCE(!err); } - - if (action->success_hook) - return action->success_hook(vma); - - return 0; + return err; } #ifdef CONFIG_MMU @@ -1377,7 +1375,6 @@ EXPORT_SYMBOL(mmap_action_prepare); */ int mmap_action_complete(struct vm_area_struct *vma, struct mmap_action *action) - { int err = 0; diff --git a/mm/vma.c b/mm/vma.c index 6105f54cea61..86f947c90442 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2730,30 +2730,6 @@ static bool can_set_ksm_flags_early(struct mmap_state *map) return false; } -static int call_action_complete(struct mmap_state *map, - struct mmap_action *action, - struct vm_area_struct *vma) -{ - int err; - - err = mmap_action_complete(vma, action); - - /* If we held the file rmap we need to release it. */ - if (action->hide_from_rmap_until_complete) { - struct file *file = vma->vm_file; - - i_mmap_unlock_write(file->f_mapping); - } - - if (err) { - const size_t len = vma_pages(vma) << PAGE_SHIFT; - - do_munmap(current->mm, vma->vm_start, len, NULL); - } - - return err; -} - static unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vma_flags_t vma_flags, unsigned long pgoff, struct list_head *uf) @@ -2805,7 +2781,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr, __mmap_complete(&map, vma); if (have_mmap_prepare && allocated_new) { - error = call_action_complete(&map, &desc.action, vma); + error = mmap_action_complete(vma, &desc.action); if (error) return error; diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h index c62d3998e922..bf8cffe1ea58 100644 --- a/tools/testing/vma/include/dup.h +++ b/tools/testing/vma/include/dup.h @@ -1296,13 +1296,7 @@ static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma) action->hide_from_rmap_until_complete = false; set_vma_from_desc(vma, &desc); - err = mmap_action_complete(vma, action); - if (err) { - const size_t len = vma_pages(vma) << PAGE_SHIFT; - - do_munmap(current->mm, vma->vm_start, len, NULL); - } - return err; + return mmap_action_complete(vma, action); } static inline void vma_iter_init(struct vma_iterator *vmi, -- 2.53.0