The ceiling and tree search limit need to be different arguments for the future change in the failed fork attempt. No functional changes intended. Signed-off-by: Liam R. Howlett --- mm/internal.h | 4 +++- mm/memory.c | 7 ++++--- mm/mmap.c | 2 +- mm/vma.c | 3 ++- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 45b725c3dc030..f9a278ac76d83 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -444,7 +444,9 @@ void folio_activate(struct folio *folio); void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *start_vma, unsigned long floor, - unsigned long ceiling, bool mm_wr_locked); + unsigned long ceiling, unsigned long tree_max, + bool mm_wr_locked); + void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); struct zap_details; diff --git a/mm/memory.c b/mm/memory.c index 0ba4f6b718471..3346514562bba 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb, void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long floor, - unsigned long ceiling, bool mm_wr_locked) + unsigned long ceiling, unsigned long tree_max, + bool mm_wr_locked) { struct unlink_vma_file_batch vb; @@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, * Note: USER_PGTABLES_CEILING may be passed as ceiling and may * be 0. This will underflow and is okay. */ - next = mas_find(mas, ceiling - 1); + next = mas_find(mas, tree_max - 1); if (unlikely(xa_is_zero(next))) next = NULL; @@ -405,7 +406,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { vma = next; - next = mas_find(mas, ceiling - 1); + next = mas_find(mas, tree_max - 1); if (unlikely(xa_is_zero(next))) next = NULL; if (mm_wr_locked) diff --git a/mm/mmap.c b/mm/mmap.c index 0995a48b46d59..eba2bc81bc749 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1311,7 +1311,7 @@ void exit_mmap(struct mm_struct *mm) mt_clear_in_rcu(&mm->mm_mt); vma_iter_set(&vmi, vma->vm_end); free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, - USER_PGTABLES_CEILING, true); + USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true); tlb_finish_mmu(&tlb); /* diff --git a/mm/vma.c b/mm/vma.c index fd270345c25d3..aa75ca8618609 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -486,6 +486,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, /* mm_wr_locked = */ true); mas_set(mas, vma->vm_end); free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, + next ? next->vm_start : USER_PGTABLES_CEILING, next ? next->vm_start : USER_PGTABLES_CEILING, /* mm_wr_locked = */ true); tlb_finish_mmu(&tlb); @@ -1232,7 +1233,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms, mas_set(mas_detach, 1); /* start and end may be different if there is no prev or next vma. */ free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, - vms->unmap_end, mm_wr_locked); + vms->unmap_end, vms->unmap_end, mm_wr_locked); tlb_finish_mmu(&tlb); vms->clear_ptes = false; } -- 2.47.2