The unmap_region() calls need to pass through the page table limit for a future patch. No functional changes intended. Signed-off-by: Liam R. Howlett --- mm/vma.c | 5 +++-- mm/vma.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index aa75ca8618609..39f3b55a020b2 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -474,7 +474,7 @@ void remove_vma(struct vm_area_struct *vma) * Called with the mm semaphore held. */ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, - unsigned long vma_min, unsigned long vma_max, + unsigned long vma_min, unsigned long vma_max, unsigned long pg_max, struct vm_area_struct *prev, struct vm_area_struct *next) { struct mm_struct *mm = vma->vm_mm; @@ -487,7 +487,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, mas_set(mas, vma->vm_end); free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING, - next ? next->vm_start : USER_PGTABLES_CEILING, + pg_max, /* mm_wr_locked = */ true); tlb_finish_mmu(&tlb); } @@ -2420,6 +2420,7 @@ static int __mmap_new_file_vma(struct mmap_state *map, vma_iter_set(vmi, vma->vm_end); /* Undo any partial mapping done by a device driver. */ unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end, + map->next ? map->next->vm_start : USER_PGTABLES_CEILING, map->prev, map->next); return error; diff --git a/mm/vma.h b/mm/vma.h index 336dae295853e..ba203c0c1d89d 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -281,7 +281,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, void remove_vma(struct vm_area_struct *vma); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, - unsigned long min, unsigned long max, + unsigned long min, unsigned long max, unsigned long pg_max, struct vm_area_struct *prev, struct vm_area_struct *next); /* We are about to modify the VMA's flags. */ -- 2.47.2