Add pte_clrhuge() helper function for architectures that enable ARCH_SUPPORTS_HUGE_PFNMAP to clear huge page attributes from PTE entries. This function provides the inverse operation of pte_mkhuge() and will be needed for upcoming huge page splitting, where PTE entries derived from huge page mappings need to have their huge page attributes cleared. Future work will refactor pfn_pte() to automatically filter huge bits, removing the need for pte_clrhuge() across all architectures. Signed-off-by: Yin Tirui --- arch/arm64/include/asm/pgtable.h | 8 ++++++++ arch/riscv/include/asm/pgtable.h | 5 +++++ 2 files changed, 13 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index abd2dee416b3..244755bad46f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -366,6 +366,14 @@ static inline pte_t pte_mkinvalid(pte_t pte) return pte; } +static inline pte_t pte_clrhuge(pte_t pte) +{ + pteval_t mask = PTE_TYPE_MASK & ~PTE_VALID; + pteval_t val = PTE_TYPE_PAGE & ~PTE_VALID; + + return __pte((pte_val(pte) & ~mask) | val); +} + static inline pmd_t pmd_mkcont(pmd_t pmd) { return __pmd(pmd_val(pmd) | PMD_SECT_CONT); diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 815067742939..b0a20ddf780a 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -455,6 +455,11 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +static inline pte_t pte_clrhuge(pte_t pte) +{ + return pte; +} + #ifdef CONFIG_RISCV_ISA_SVNAPOT #define pte_leaf_size(pte) (pte_napot(pte) ? \ napot_cont_size(napot_cont_order(pte)) :\ -- 2.43.0 Add PMD-level huge page support to remap_pfn_range(), automatically creating huge mappings when prerequisites are satisfied (size, alignment, architecture support, etc.) and falling back to normal page mappings otherwise. Implement special huge PMD splitting by utilizing the pgtable deposit/ withdraw mechanism. When splitting is needed, the deposited pgtable is withdrawn and populated with individual PTEs created from the original huge mapping, using pte_clrhuge() to clear huge page attributes. Update arch_needs_pgtable_deposit() to return true when PMD pfnmap support is enabled, ensuring proper pgtable management for huge pfnmap operations. Signed-off-by: Yin Tirui --- include/linux/pgtable.h | 6 +++++- mm/huge_memory.c | 26 +++++++++++++++++++------- mm/memory.c | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 8 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 25a7257052ff..9ae015cb67a0 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1025,7 +1025,11 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif #ifndef arch_needs_pgtable_deposit -#define arch_needs_pgtable_deposit() (false) +#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit +static inline bool arch_needs_pgtable_deposit(void) +{ + return IS_ENABLED(CONFIG_ARCH_SUPPORTS_PMD_PFNMAP); +} #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9c38a95e9f09..b5eecd8fc1bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2857,14 +2857,26 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, if (!vma_is_anonymous(vma)) { old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); - /* - * We are going to unmap this huge page. So - * just go ahead and zap it - */ - if (arch_needs_pgtable_deposit()) - zap_deposited_table(mm, pmd); - if (!vma_is_dax(vma) && vma_is_special_huge(vma)) + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { + pte_t entry; + + pgtable = pgtable_trans_huge_withdraw(mm, pmd); + if (unlikely(!pgtable)) + return; + pmd_populate(mm, &_pmd, pgtable); + pte = pte_offset_map(&_pmd, haddr); + entry = pte_clrhuge(pfn_pte(pmd_pfn(old_pmd), pmd_pgprot(old_pmd))); + set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); + pte_unmap(pte); + + smp_wmb(); /* make pte visible before pmd */ + pmd_populate(mm, pmd, pgtable); return; + } else if (arch_needs_pgtable_deposit()) { + /* Zap for the non-special mappings. */ + zap_deposited_table(mm, pmd); + } + if (unlikely(is_pmd_migration_entry(old_pmd))) { swp_entry_t entry; diff --git a/mm/memory.c b/mm/memory.c index 0ba4f6b71847..4e8f2248a86f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2705,6 +2705,40 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, return err; } +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP +static int remap_try_huge_pmd(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) +{ + pgtable_t pgtable; + spinlock_t *ptl; + + if ((end - addr) != PMD_SIZE) + return 0; + + if (!IS_ALIGNED(addr, PMD_SIZE)) + return 0; + + if (!IS_ALIGNED(pfn, HPAGE_PMD_NR)) + return 0; + + if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) + return 0; + + pgtable = pte_alloc_one(mm); + if (unlikely(!pgtable)) + return 0; + + mm_inc_nr_ptes(mm); + ptl = pmd_lock(mm, pmd); + set_pmd_at(mm, addr, pmd, pmd_mkspecial(pmd_mkhuge(pfn_pmd(pfn, prot)))); + pgtable_trans_huge_deposit(mm, pmd, pgtable); + spin_unlock(ptl); + + return 1; +} +#endif + static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) @@ -2720,6 +2754,12 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP + if (remap_try_huge_pmd(mm, pmd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot)) { + continue; + } +#endif err = remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) -- 2.43.0