In the current kernel, there is spurious fault fixing support for pte, but not for huge pmd because no architectures need it. But in the next patch in the series, we will change the write protection fault handling logic on arm64, so that some stale huge pmd entries may remain in the TLB. These entries need to be flushed via the huge pmd spurious fault fixing mechanism. Signed-off-by: Huang Ying Cc: Catalin Marinas Cc: Will Deacon Cc: Andrew Morton Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Vlastimil Babka Cc: Zi Yan Cc: Baolin Wang Cc: Ryan Roberts Cc: Yang Shi Cc: "Christoph Lameter (Ampere)" Cc: Dev Jain Cc: Barry Song Cc: Anshuman Khandual Cc: Yicong Yang Cc: Kefeng Wang Cc: Kevin Brodsky Cc: Yin Fengwei Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org --- include/linux/pgtable.h | 4 ++++ mm/huge_memory.c | 22 +++++++++++++++++----- mm/internal.h | 4 ++-- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 32e8457ad535..341622ec80e4 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1232,6 +1232,10 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) #endif +#ifndef flush_tlb_fix_spurious_fault_pmd +#define flush_tlb_fix_spurious_fault_pmd(vma, address, ptep) do { } while (0) +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1b81680b4225..8533457c52b7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1641,17 +1641,22 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ -void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, bool write) +/* Returns whether the PMD entry is changed */ +int touch_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, bool write) { + int changed; pmd_t _pmd; _pmd = pmd_mkyoung(*pmd); if (write) _pmd = pmd_mkdirty(_pmd); - if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, write)) + changed = pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, + pmd, _pmd, write); + if (changed) update_mmu_cache_pmd(vma, addr, pmd); + + return changed; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -1849,7 +1854,14 @@ void huge_pmd_set_accessed(struct vm_fault *vmf) if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) goto unlock; - touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); + if (!touch_pmd(vmf->vma, vmf->address, vmf->pmd, write)) { + /* See corresponding comments in handle_pte_fault(). */ + if (vmf->flags & FAULT_FLAG_TRIED) + goto unlock; + if (vmf->flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address, + vmf->pmd); + } unlock: spin_unlock(vmf->ptl); diff --git a/mm/internal.h b/mm/internal.h index 1561fc2ff5b8..8b58ab00a7cd 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1402,8 +1402,8 @@ int __must_check try_grab_folio(struct folio *folio, int refs, */ void touch_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, bool write); -void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, bool write); +int touch_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, bool write); /* * Parses a string with mem suffixes into its order. Useful to parse kernel -- 2.39.5