In the current kernel, there is spurious fault fixing support for pte, but not for huge pmd because no architectures need it. But in the next patch in the series, we will change the write protection fault handling logic on arm64, so that some stale huge pmd entries may remain in the TLB. These entries need to be flushed via the huge pmd spurious fault fixing mechanism. Signed-off-by: Huang Ying Cc: Catalin Marinas Cc: Will Deacon Cc: Andrew Morton Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Vlastimil Babka Cc: Zi Yan Cc: Baolin Wang Cc: Ryan Roberts Cc: Yang Shi Cc: "Christoph Lameter (Ampere)" Cc: Dev Jain Cc: Barry Song Cc: Anshuman Khandual Cc: Yicong Yang Cc: Kefeng Wang Cc: Kevin Brodsky Cc: Yin Fengwei Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: linux-mm@kvack.org --- include/linux/pgtable.h | 4 ++++ mm/huge_memory.c | 25 +++++++++++++++++++++---- mm/internal.h | 4 ++-- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2b80fd456c8b..d7604ad34d36 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1232,6 +1232,10 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) #endif +#ifndef flush_tlb_fix_spurious_fault_pmd +#define flush_tlb_fix_spurious_fault_pmd(vma, address, ptep) do { } while (0) +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9c38a95e9f09..0e2ef6b007c2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1650,8 +1650,8 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ -void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, bool write) +int touch_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, bool write) { pmd_t _pmd; @@ -1659,8 +1659,12 @@ void touch_pmd(struct vm_area_struct *vma, unsigned long addr, if (write) _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, - pmd, _pmd, write)) + pmd, _pmd, write)) { update_mmu_cache_pmd(vma, addr, pmd); + return 1; + } + + return 0; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -1857,7 +1861,20 @@ void huge_pmd_set_accessed(struct vm_fault *vmf) if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) goto unlock; - touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); + if (!touch_pmd(vmf->vma, vmf->address, vmf->pmd, write)) { + /* Skip spurious TLB flush for retried page fault */ + if (vmf->flags & FAULT_FLAG_TRIED) + goto unlock; + /* + * This is needed only for protection faults but the arch code + * is not yet telling us if this is a protection fault or not. + * This still avoids useless tlb flushes for .text page faults + * with threads. + */ + if (vmf->flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault_pmd(vmf->vma, vmf->address, + vmf->pmd); + } unlock: spin_unlock(vmf->ptl); diff --git a/mm/internal.h b/mm/internal.h index 45b725c3dc03..743ce97c7248 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1406,8 +1406,8 @@ int __must_check try_grab_folio(struct folio *folio, int refs, */ void touch_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, bool write); -void touch_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, bool write); +int touch_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, bool write); /* * Parses a string with mem suffixes into its order. Useful to parse kernel -- 2.39.5