The tmpfs.rst doc references the has_transparent_hugepage() helper, which is an implementation detail in the kernel and not relevant for users wishing to properly configure THP support for tmpfs. Remove it. Acked-by: David Hildenbrand (Red Hat) Signed-off-by: Luiz Capitulino --- Documentation/filesystems/tmpfs.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Documentation/filesystems/tmpfs.rst b/Documentation/filesystems/tmpfs.rst index d677e0428c3f..46fc986c3388 100644 --- a/Documentation/filesystems/tmpfs.rst +++ b/Documentation/filesystems/tmpfs.rst @@ -109,9 +109,8 @@ noswap Disables swap. Remounts must respect the original settings. ====== =========================================================== tmpfs also supports Transparent Huge Pages which requires a kernel -configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge supported for -your system (has_transparent_hugepage(), which is architecture specific). -The mount options for this are: +configured with CONFIG_TRANSPARENT_HUGEPAGE and with huge pages +supported for your system. The mount options for this are: ================ ============================================================== huge=never Do not allocate huge pages. This is the default. -- 2.52.0 Currently, we have two helpers that check for PMD-sized pages but have different names and slightly different semantics: - has_transparent_hugepages(): the name suggests it checks if THP is enabled, but it actually checks if the CPU supports PMD-sized pages. It may perform a hardware check, so it can't be used in fast paths - thp_disabled_by_hw(): the name suggests it checks if THP is disabled by the hardware, but it just returns a cached value acquired with has_transparent_hugepages(). This way, this helper can be called from fast paths This commit introduces a new helper called pgtable_has_pmd_leaves() which is intended to replace both has_transparent_hugepages() and thp_disable_by_hw(). pgtable_has_pmd_leaves() has very clear semantics: it returns true if the CPU supports PMD-sized pages and false otherwise. It always returns a cached value, so it can be used in fast paths. The new helper requires an initialization step that needs to be performed very early during boot because there are pgtable_has_pmd_leaves() users that want to call it from __setup() handlers. This initialization is performed by init_arch_has_pmd_leaves(), called early from start_kernel(). The next commits will convert users of both has_transparent_hugepages() and thp_disabled_by_hw() to pgtable_has_pmd_leaves(). Signed-off-by: Luiz Capitulino --- include/linux/pgtable.h | 7 +++++++ init/main.c | 1 + mm/memory.c | 8 ++++++++ 3 files changed, 16 insertions(+) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 652f287c1ef6..6733f90a1da4 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -2017,6 +2017,13 @@ static inline const char *pgtable_level_to_str(enum pgtable_level level) } } +extern bool __arch_has_pmd_leaves; +static inline bool pgtable_has_pmd_leaves(void) +{ + return __arch_has_pmd_leaves; +} +void __init init_arch_has_pmd_leaves(void); + #endif /* !__ASSEMBLY__ */ #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) diff --git a/init/main.c b/init/main.c index b84818ad9685..ad1209fffcde 100644 --- a/init/main.c +++ b/init/main.c @@ -1036,6 +1036,7 @@ void start_kernel(void) smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ early_numa_node_init(); boot_cpu_hotplug_init(); + init_arch_has_pmd_leaves(); print_kernel_cmdline(saved_command_line); /* parameters may set static keys */ diff --git a/mm/memory.c b/mm/memory.c index 2a55edc48a65..79bd59d5243f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -177,6 +177,14 @@ static int __init init_zero_pfn(void) } early_initcall(init_zero_pfn); +bool __arch_has_pmd_leaves __read_mostly; +EXPORT_SYMBOL(__arch_has_pmd_leaves); + +void __init init_arch_has_pmd_leaves(void) +{ + __arch_has_pmd_leaves = has_transparent_hugepage(); +} + void mm_trace_rss_stat(struct mm_struct *mm, int member) { trace_rss_stat(mm, member); -- 2.52.0 dax_align_valid() uses has_transparent_hugepage() to check if PMD-sized pages are supported, use pgtable_has_pmd_leaves() instead. Acked-by: David Hildenbrand (Red Hat) Signed-off-by: Luiz Capitulino --- drivers/dax/dax-private.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index 0867115aeef2..10aeaec9e789 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -117,7 +117,7 @@ static inline bool dax_align_valid(unsigned long align) { if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) return true; - if (align == PMD_SIZE && has_transparent_hugepage()) + if (align == PMD_SIZE && pgtable_has_pmd_leaves()) return true; if (align == PAGE_SIZE) return true; -- 2.52.0 igt_can_allocate_thp() uses has_transparent_hugepage() to check if PMD-sized pages are supported, use pgtable_has_pmd_leaves() instead. Since igt_can_allocate_thp() wants to use PMD-sized pages with THP, also check if THP is built-in with IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE). Signed-off-by: Luiz Capitulino --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index bd08605a1611..dcd1f1141513 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1316,7 +1316,9 @@ typedef struct drm_i915_gem_object * static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) { - return i915->mm.gemfs && has_transparent_hugepage(); + return i915->mm.gemfs && + IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + pgtable_has_pmd_leaves(); } static struct drm_i915_gem_object * -- 2.52.0 nd_pfn_supported_alignments() and nd_pfn_default_alignment() use has_transparent_hugepage() to check if PMD-sized pages are supported, use pgtable_has_pmd_leaves() instead. Since both functions want to use PMD-sized pages with THP, also check if THP is built-in with IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE). Signed-off-by: Luiz Capitulino --- drivers/nvdimm/pfn_devs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 42b172fc5576..7ee8ec50e72d 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -94,7 +94,8 @@ static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments) alignments[0] = PAGE_SIZE; - if (has_transparent_hugepage()) { + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + pgtable_has_pmd_leaves()) { alignments[1] = HPAGE_PMD_SIZE; if (has_transparent_pud_hugepage()) alignments[2] = HPAGE_PUD_SIZE; @@ -109,7 +110,8 @@ static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments) static unsigned long nd_pfn_default_alignment(void) { - if (has_transparent_hugepage()) + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + pgtable_has_pmd_leaves()) return HPAGE_PMD_SIZE; return PAGE_SIZE; } -- 2.52.0 debug_vm_pgtable calls has_transparent_hugepage() in multiple places to check if PMD-sized pages are supported, use pgtable_has_pmd_leaves() instead. Signed-off-by: Luiz Capitulino --- mm/debug_vm_pgtable.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index ae9b9310d96f..ec02bafd9d45 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -177,7 +177,7 @@ static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) unsigned long val = idx, *ptr = &val; pmd_t pmd; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD basic (%pGv)\n", ptr); @@ -222,7 +222,7 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args) pmd_t pmd; unsigned long vaddr = args->vaddr; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; @@ -283,7 +283,7 @@ static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { pmd_t pmd; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD leaf\n"); @@ -688,7 +688,7 @@ static void __init pmd_protnone_tests(struct pgtable_debug_args *args) if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) return; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD protnone\n"); @@ -737,7 +737,7 @@ static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) if (!pgtable_supports_soft_dirty()) return; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD soft dirty\n"); @@ -754,7 +754,7 @@ static void __init pmd_leaf_soft_dirty_tests(struct pgtable_debug_args *args) !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) return; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD swap soft dirty\n"); @@ -825,7 +825,7 @@ static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) swp_entry_t arch_entry; pmd_t pmd1, pmd2; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD swap\n"); @@ -906,7 +906,7 @@ static void __init pmd_thp_tests(struct pgtable_debug_args *args) { pmd_t pmd; - if (!has_transparent_hugepage()) + if (!pgtable_has_pmd_leaves()) return; pr_debug("Validating PMD based THP\n"); @@ -993,7 +993,7 @@ static void __init destroy_args(struct pgtable_debug_args *args) } if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - has_transparent_hugepage() && + pgtable_has_pmd_leaves() && args->pmd_pfn != ULONG_MAX) { if (args->is_contiguous_page) { free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); @@ -1253,7 +1253,7 @@ static int __init init_args(struct pgtable_debug_args *args) } if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - has_transparent_hugepage()) { + pgtable_has_pmd_leaves()) { page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); if (page) { args->pmd_pfn = page_to_pfn(page); -- 2.52.0 Shmem uses has_transparent_hugepage() to check if PMD-sized pages are supported, use pgtable_has_pmd_leaves() instead. Signed-off-by: Luiz Capitulino --- mm/shmem.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index b329b5302c48..ad5825667b49 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -689,7 +689,8 @@ static int shmem_parse_huge(const char *str) else return -EINVAL; - if (!has_transparent_hugepage() && + if (!(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + pgtable_has_pmd_leaves()) && huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) return -EINVAL; @@ -4655,7 +4656,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) ctx->huge = result.uint_32; if (ctx->huge != SHMEM_HUGE_NEVER && !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - has_transparent_hugepage())) + pgtable_has_pmd_leaves())) goto unsupported_parameter; ctx->seen |= SHMEM_SEEN_HUGE; break; @@ -5439,7 +5440,7 @@ void __init shmem_init(void) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) + if (pgtable_has_pmd_leaves() && shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ -- 2.52.0 Now that all has_transparent_hugepage() callers have been converted to pgtable_has_pmd_leaves(), rename has_transparent_hugepage() to arch_has_pmd_leaves() since that's what the helper checks for. arch_has_pmd_leaves() is supposed to be called only by init_arch_has_pmd_leaves(). The only temporary exception is hugepage_init() which will be converted in a future commit. Signed-off-by: Luiz Capitulino --- arch/mips/include/asm/pgtable.h | 4 ++-- arch/mips/mm/tlb-r4k.c | 4 ++-- arch/powerpc/include/asm/book3s/64/hash-4k.h | 2 +- arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +- arch/powerpc/include/asm/book3s/64/pgtable.h | 10 +++++----- arch/powerpc/include/asm/book3s/64/radix.h | 2 +- arch/powerpc/mm/book3s64/hash_pgtable.c | 4 ++-- arch/s390/include/asm/pgtable.h | 4 ++-- arch/x86/include/asm/pgtable.h | 4 ++-- include/linux/pgtable.h | 4 ++-- mm/huge_memory.c | 2 +- mm/memory.c | 2 +- 12 files changed, 22 insertions(+), 22 deletions(-) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9c06a612d33a..0080724a7df5 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -615,8 +615,8 @@ unsigned long io_remap_pfn_range_pfn(unsigned long pfn, unsigned long size); /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ #define pmdp_establish generic_pmdp_establish -#define has_transparent_hugepage has_transparent_hugepage -extern int has_transparent_hugepage(void); +#define arch_has_pmd_leaves arch_has_pmd_leaves +extern int arch_has_pmd_leaves(void); static inline int pmd_trans_huge(pmd_t pmd) { diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 44a662536148..4fcc8195a130 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -432,7 +432,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_TRANSPARENT_HUGEPAGE -int has_transparent_hugepage(void) +int arch_has_pmd_leaves(void) { static unsigned int mask = -1; @@ -448,7 +448,7 @@ int has_transparent_hugepage(void) } return mask == PM_HUGE_MASK; } -EXPORT_SYMBOL(has_transparent_hugepage); +EXPORT_SYMBOL(arch_has_pmd_leaves); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 8e5bd9902bed..6744c2287199 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -165,7 +165,7 @@ extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); -extern int hash__has_transparent_hugepage(void); +extern int hash__arch_has_pmd_leaves(void); #endif #endif /* !__ASSEMBLER__ */ diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 7deb3a66890b..9392aba5e5dc 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -278,7 +278,7 @@ extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); -extern int hash__has_transparent_hugepage(void); +extern int hash__arch_has_pmd_leaves(void); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* __ASSEMBLER__ */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index aac8ce30cd3b..6ed036b3d3c2 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1094,14 +1094,14 @@ static inline void update_mmu_cache_pud(struct vm_area_struct *vma, { } -extern int hash__has_transparent_hugepage(void); -static inline int has_transparent_hugepage(void) +extern int hash__arch_has_pmd_leaves(void); +static inline int arch_has_pmd_leaves(void) { if (radix_enabled()) - return radix__has_transparent_hugepage(); - return hash__has_transparent_hugepage(); + return radix__arch_has_pmd_leaves(); + return hash__arch_has_pmd_leaves(); } -#define has_transparent_hugepage has_transparent_hugepage +#define arch_has_pmd_leaves arch_has_pmd_leaves static inline int has_transparent_pud_hugepage(void) { diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index da954e779744..c884a119cbd9 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -298,7 +298,7 @@ extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pud_t *pudp); -static inline int radix__has_transparent_hugepage(void) +static inline int radix__arch_has_pmd_leaves(void) { /* For radix 2M at PMD level means thp */ if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT) diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 82d31177630b..1dec64bf0c75 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -366,7 +366,7 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, return old_pmd; } -int hash__has_transparent_hugepage(void) +int hash__arch_has_pmd_leaves(void) { if (!mmu_has_feature(MMU_FTR_16M_PAGE)) @@ -395,7 +395,7 @@ int hash__has_transparent_hugepage(void) return 1; } -EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); +EXPORT_SYMBOL_GPL(hash__arch_has_pmd_leaves); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index bca9b29778c3..4398855d558e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1866,8 +1866,8 @@ static inline int pmd_trans_huge(pmd_t pmd) return pmd_leaf(pmd); } -#define has_transparent_hugepage has_transparent_hugepage -static inline int has_transparent_hugepage(void) +#define arch_has_pmd_leaves arch_has_pmd_leaves +static inline int arch_has_pmd_leaves(void) { return cpu_has_edat1() ? 1 : 0; } diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e33df3da6980..08d109280e36 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -313,8 +313,8 @@ static inline int pud_trans_huge(pud_t pud) } #endif -#define has_transparent_hugepage has_transparent_hugepage -static inline int has_transparent_hugepage(void) +#define arch_has_pmd_leaves arch_has_pmd_leaves +static inline int arch_has_pmd_leaves(void) { return boot_cpu_has(X86_FEATURE_PSE); } diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 6733f90a1da4..b4d10ea9e45a 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -2039,8 +2039,8 @@ void __init init_arch_has_pmd_leaves(void); #endif #endif -#ifndef has_transparent_hugepage -#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) +#ifndef arch_has_pmd_leaves +#define arch_has_pmd_leaves() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) #endif #ifndef has_transparent_pud_hugepage diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40cf59301c21..b80a897b9b6f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -905,7 +905,7 @@ static int __init hugepage_init(void) int err; struct kobject *hugepage_kobj; - if (!has_transparent_hugepage()) { + if (!arch_has_pmd_leaves()) { transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; return -EINVAL; } diff --git a/mm/memory.c b/mm/memory.c index 79bd59d5243f..e816d4b53bc0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -182,7 +182,7 @@ EXPORT_SYMBOL(__arch_has_pmd_leaves); void __init init_arch_has_pmd_leaves(void) { - __arch_has_pmd_leaves = has_transparent_hugepage(); + __arch_has_pmd_leaves = arch_has_pmd_leaves(); } void mm_trace_rss_stat(struct mm_struct *mm, int member) -- 2.52.0 Despite its name, thp_disabled_by_hw() only checks whether the architecture supports PMD-sized pages. It returns true when TRANSPARENT_HUGEPAGE_UNSUPPORTED is set, which occurs if the architecture implements arch_has_pmd_leaves() and that function returns false. Since pgtable_has_pmd_leaves() provides the same semantics, use it instead. Signed-off-by: Luiz Capitulino --- include/linux/huge_mm.h | 7 ------- mm/huge_memory.c | 6 ++---- mm/memory.c | 2 +- mm/shmem.c | 2 +- 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a4d9f964dfde..e291a650b10f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -47,7 +47,6 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, bool write); enum transparent_hugepage_flag { - TRANSPARENT_HUGEPAGE_UNSUPPORTED, TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, @@ -352,12 +351,6 @@ static inline bool vma_thp_disabled(struct vm_area_struct *vma, return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm); } -static inline bool thp_disabled_by_hw(void) -{ - /* If the hardware/firmware marked hugepage support disabled. */ - return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED); -} - unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b80a897b9b6f..1e5ea2e47f79 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -122,7 +122,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, if (!vma->vm_mm) /* vdso */ return 0; - if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse)) + if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vm_flags, forced_collapse)) return 0; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ @@ -905,10 +905,8 @@ static int __init hugepage_init(void) int err; struct kobject *hugepage_kobj; - if (!arch_has_pmd_leaves()) { - transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; + if (!pgtable_has_pmd_leaves()) return -EINVAL; - } /* * hugepages can't be allocated by the buddy allocator diff --git a/mm/memory.c b/mm/memory.c index e816d4b53bc0..c35df4c477c1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5383,7 +5383,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa * PMD mappings if THPs are disabled. As we already have a THP, * behave as if we are forcing a collapse. */ - if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags, + if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vma->vm_flags, /* forced_collapse=*/ true)) return ret; diff --git a/mm/shmem.c b/mm/shmem.c index ad5825667b49..6b350e336f8c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1800,7 +1800,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, vm_flags_t vm_flags = vma ? vma->vm_flags : 0; unsigned int global_orders; - if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force))) + if (!pgtable_has_pmd_leaves() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force))) return 0; global_orders = shmem_huge_global_enabled(inode, index, write_end, -- 2.52.0 If PMD-sized pages are not supported on an architecture (ie. the arch implements arch_has_pmd_leaves() and it returns false) then the current code disables all THP, including mTHP. This commit fixes this by allowing mTHP to be always enabled for all archs. When PMD-sized pages are not supported, its sysfs entry won't be created and their mapping will be disallowed at page-fault time. Signed-off-by: Luiz Capitulino --- mm/huge_memory.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1e5ea2e47f79..882331592928 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -115,6 +115,9 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, else supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; + if (!pgtable_has_pmd_leaves()) + supported_orders &= ~(BIT(PMD_ORDER) | BIT(PUD_ORDER)); + orders &= supported_orders; if (!orders) return 0; @@ -122,7 +125,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, if (!vma->vm_mm) /* vdso */ return 0; - if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vm_flags, forced_collapse)) + if (vma_thp_disabled(vma, vm_flags, forced_collapse)) return 0; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ @@ -806,6 +809,9 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) } orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; + if (!pgtable_has_pmd_leaves()) + orders &= ~(BIT(PMD_ORDER) | BIT(PUD_ORDER)); + order = highest_order(orders); while (orders) { thpsize = thpsize_create(order, *hugepage_kobj); @@ -905,9 +911,6 @@ static int __init hugepage_init(void) int err; struct kobject *hugepage_kobj; - if (!pgtable_has_pmd_leaves()) - return -EINVAL; - /* * hugepages can't be allocated by the buddy allocator */ -- 2.52.0 Historically, THP support on x86 checked the PSE feature bit to enable THP. On 64-bit, this check is redundant since PSE is always enabled by default for compatibility. On 32-bit, PSE can enable 2 MiB or 4 MiB page sizes so it must be checked. To clean this up, this commit: 1. Drops arch_has_pmd_leaves() from common x86 code. For 64-bit, we assume PMD-sized pages are always supported 2. Checks for PSE only on 32-bit by implementing arch_has_pmd_leaves() Signed-off-by: Luiz Capitulino --- arch/x86/include/asm/pgtable.h | 6 ------ arch/x86/include/asm/pgtable_32.h | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 08d109280e36..55b88de5178f 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -313,12 +313,6 @@ static inline int pud_trans_huge(pud_t pud) } #endif -#define arch_has_pmd_leaves arch_has_pmd_leaves -static inline int arch_has_pmd_leaves(void) -{ - return boot_cpu_has(X86_FEATURE_PSE); -} - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP static inline bool pmd_special(pmd_t pmd) { diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index b612cc57a4d3..3bd51cfa431e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -45,6 +45,12 @@ do { \ flush_tlb_one_kernel((vaddr)); \ } while (0) +#define arch_has_pmd_leaves arch_has_pmd_leaves +static inline int arch_has_pmd_leaves(void) +{ + return boot_cpu_has(X86_FEATURE_PSE); +} + #endif /* !__ASSEMBLER__ */ /* -- 2.52.0