From: Jinliang Zheng After commit 6eb82f994026 ("x86/mm: Pre-allocate P4D/PUD pages for vmalloc area"), we don't need to synchronize kernel mappings in the vmalloc area on x86_64. And commit 58a18fe95e83 ("x86/mm/64: Do not sync vmalloc/ioremap mappings") actually does this. But commit 6659d0279980 ("x86/mm/64: define ARCH_PAGE_TABLE_SYNC_MASK and arch_sync_kernel_mappings()") breaks this. This patch introduces ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC to avoid unnecessary kernel mappings synchronization of the vmalloc area. Fixes: 6659d0279980 ("x86/mm/64: define ARCH_PAGE_TABLE_SYNC_MASK and arch_sync_kernel_mappings()") Signed-off-by: Jinliang Zheng --- arch/arm/include/asm/page.h | 3 ++- arch/x86/include/asm/pgtable-2level_types.h | 3 ++- arch/x86/include/asm/pgtable-3level_types.h | 3 ++- include/linux/pgtable.h | 4 ++++ mm/memory.c | 2 +- mm/vmalloc.c | 6 +++--- 6 files changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ef11b721230e..764afc1d0aba 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -167,7 +167,8 @@ extern void copy_page(void *to, const void *from); #else #include #ifdef CONFIG_VMAP_STACK -#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC ARCH_PAGE_TABLE_SYNC_MASK #endif #endif diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h index 54690bd4ddbe..650b12c25c0c 100644 --- a/arch/x86/include/asm/pgtable-2level_types.h +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -18,7 +18,8 @@ typedef union { } pte_t; #endif /* !__ASSEMBLER__ */ -#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC ARCH_PAGE_TABLE_SYNC_MASK /* * Traditional i386 two-level paging structure: diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 580b09bf6a45..272d946a3c7d 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -27,7 +27,8 @@ typedef union { } pmd_t; #endif /* !__ASSEMBLER__ */ -#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED +#define ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC ARCH_PAGE_TABLE_SYNC_MASK /* * PGDIR_SHIFT determines what a top-level page table entry can map diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2b80fd456c8b..53b97c5773ba 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1476,6 +1476,10 @@ static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned #define ARCH_PAGE_TABLE_SYNC_MASK 0 #endif +#ifndef ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC +#define ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC 0 +#endif + /* * There is no default implementation for arch_sync_kernel_mappings(). It is * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK diff --git a/mm/memory.c b/mm/memory.c index 0ba4f6b71847..cd2488043f8f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3170,7 +3170,7 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, break; } while (pgd++, addr = next, addr != end); - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + if (mask & ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC) arch_sync_kernel_mappings(start, start + size); return err; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5edd536ba9d2..2fe2480de5dc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -311,7 +311,7 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end, break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + if (mask & ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC) arch_sync_kernel_mappings(start, end); return err; @@ -484,7 +484,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end) vunmap_p4d_range(pgd, addr, next, &mask); } while (pgd++, addr = next, addr != end); - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + if (mask & ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC) arch_sync_kernel_mappings(start, end); } @@ -629,7 +629,7 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, break; } while (pgd++, addr = next, addr != end); - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + if (mask & ARCH_PAGE_TABLE_SYNC_MASK_VMALLOC) arch_sync_kernel_mappings(start, end); return err; -- 2.49.0