Currently, folio_referenced_one() always checks the young flag for each PTE sequentially, which is inefficient for large folios. This inefficiency is especially noticeable when reclaiming clean file-backed large folios, where folio_referenced() is observed as a significant performance hotspot. Moreover, on Arm64 architecture, which supports contiguous PTEs, there is already an optimization to clear the young flags for PTEs within a contiguous range. However, this is not sufficient. We can extend this to perform batched operations for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE). Introduce a new API: clear_flush_young_ptes() to facilitate batched checking of the young flags and flushing TLB entries, thereby improving performance during large folio reclamation. And it will be overridden by the architecture that implements a more efficient batch operation in the following patches. Signed-off-by: Baolin Wang --- include/linux/mmu_notifier.h | 9 +++++---- include/linux/pgtable.h | 35 +++++++++++++++++++++++++++++++++++ mm/rmap.c | 29 +++++++++++++++++++++++++++-- 3 files changed, 67 insertions(+), 6 deletions(-) diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index d1094c2d5fb6..dbbdcef4abf1 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -515,16 +515,17 @@ static inline void mmu_notifier_range_init_owner( range->owner = owner; } -#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +#define ptep_clear_flush_young_notify(__vma, __address, __ptep, __nr) \ ({ \ int __young; \ struct vm_area_struct *___vma = __vma; \ unsigned long ___address = __address; \ - __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + unsigned int ___nr = __nr; \ + __young = clear_flush_young_ptes(___vma, ___address, __ptep, ___nr); \ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ ___address, \ ___address + \ - PAGE_SIZE); \ + ___nr * PAGE_SIZE); \ __young; \ }) @@ -650,7 +651,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) #define mmu_notifier_range_update_to_read_only(r) false -#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_young_notify clear_flush_young_ptes #define pmdp_clear_flush_young_notify pmdp_clear_flush_young #define ptep_clear_young_notify ptep_test_and_clear_young #define pmdp_clear_young_notify pmdp_test_and_clear_young diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2f0dd3a4ace1..fcf7a7820061 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1087,6 +1087,41 @@ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, } #endif +#ifndef clear_flush_young_ptes +/** + * clear_flush_young_ptes - Clear the access bit and perform a TLB flush for PTEs + * that map consecutive pages of the same folio. + * @vma: The virtual memory area the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear access bit. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_clear_flush_young(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline int clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) +{ + int young; + + young = ptep_clear_flush_young(vma, addr, ptep); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + young |= ptep_clear_flush_young(vma, addr, ptep); + } + + return young; +} +#endif + /* * On some architectures hardware does not set page access bit when accessing * memory page, it is responsibility of software setting this bit. It brings diff --git a/mm/rmap.c b/mm/rmap.c index d6799afe1114..a0fc05f5966f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -827,9 +827,11 @@ static bool folio_referenced_one(struct folio *folio, struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int ptes = 0, referenced = 0; + unsigned int nr; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; + nr = 1; if (vma->vm_flags & VM_LOCKED) { ptes++; @@ -874,9 +876,24 @@ static bool folio_referenced_one(struct folio *folio, if (lru_gen_look_around(&pvmw)) referenced++; } else if (pvmw.pte) { + if (folio_test_large(folio)) { + unsigned long end_addr = + pmd_addr_end(address, vma->vm_end); + unsigned int max_nr = + (end_addr - address) >> PAGE_SHIFT; + pte_t pteval = ptep_get(pvmw.pte); + + nr = folio_pte_batch(folio, pvmw.pte, + pteval, max_nr); + } + + ptes += nr; if (ptep_clear_flush_young_notify(vma, address, - pvmw.pte)) + pvmw.pte, nr)) referenced++; + /* Skip the batched PTEs */ + pvmw.pte += nr - 1; + pvmw.address += (nr - 1) * PAGE_SIZE; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) @@ -886,7 +903,15 @@ static bool folio_referenced_one(struct folio *folio, WARN_ON_ONCE(1); } - pra->mapcount--; + pra->mapcount -= nr; + /* + * If we are sure that we batched the entire folio, + * we can just optimize and stop right here. + */ + if (ptes == pvmw.nr_pages) { + page_vma_mapped_walk_done(&pvmw); + break; + } } if (referenced) -- 2.47.3 Factor out the contpte block's address and ptep alignment into a new helper, and will be reused in the following patch. No functional changes. Signed-off-by: Baolin Wang --- arch/arm64/mm/contpte.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 589bcf878938..e4ddeb46f25d 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -26,6 +26,26 @@ static inline pte_t *contpte_align_down(pte_t *ptep) return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); } +static inline pte_t *contpte_align_addr_ptep(unsigned long *start, + unsigned long *end, pte_t *ptep, + unsigned int nr) +{ + /* + * Note: caller must ensure these nr PTEs are consecutive (present) + * PTEs that map consecutive pages of the same large folio within a + * single VMA and a single page table. + */ + if (pte_cont(__ptep_get(ptep + nr - 1))) + *end = ALIGN(*end, CONT_PTE_SIZE); + + if (pte_cont(__ptep_get(ptep))) { + *start = ALIGN_DOWN(*start, CONT_PTE_SIZE); + ptep = contpte_align_down(ptep); + } + + return ptep; +} + static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) { @@ -569,14 +589,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, unsigned long start = addr; unsigned long end = start + nr * PAGE_SIZE; - if (pte_cont(__ptep_get(ptep + nr - 1))) - end = ALIGN(end, CONT_PTE_SIZE); - - if (pte_cont(__ptep_get(ptep))) { - start = ALIGN_DOWN(start, CONT_PTE_SIZE); - ptep = contpte_align_down(ptep); - } - + ptep = contpte_align_addr_ptep(&start, &end, ptep, nr); __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags); } EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes); -- 2.47.3 Currently, contpte_ptep_test_and_clear_young() and contpte_ptep_clear_flush_young() only clear the young flag and flush TLBs for PTEs within the contiguous range. To support batch PTE operations for other sized large folios in the following patches, adding a new parameter to specify the number of PTEs that map consecutive pages of the same large folio in a single VMA and a single page table. While we are at it, rename the functions to maintain consistency with other contpte_*() functions. Signed-off-by: Baolin Wang --- arch/arm64/include/asm/pgtable.h | 12 ++++++------ arch/arm64/mm/contpte.c | 33 ++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 445e18e92221..d5fbe72e820a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1648,10 +1648,10 @@ extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full); -extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); -extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); +int contpte_test_and_clear_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr); +int contpte_clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr); extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr); extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, @@ -1823,7 +1823,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, if (likely(!pte_valid_cont(orig_pte))) return __ptep_test_and_clear_young(vma, addr, ptep); - return contpte_ptep_test_and_clear_young(vma, addr, ptep); + return contpte_test_and_clear_young_ptes(vma, addr, ptep, CONT_PTES); } #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH @@ -1835,7 +1835,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, if (likely(!pte_valid_cont(orig_pte))) return __ptep_clear_flush_young(vma, addr, ptep); - return contpte_ptep_clear_flush_young(vma, addr, ptep); + return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES); } #define wrprotect_ptes wrprotect_ptes diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index e4ddeb46f25d..b929a455103f 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -508,8 +508,9 @@ pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, } EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); -int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int contpte_test_and_clear_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) { /* * ptep_clear_flush_young() technically requires us to clear the access @@ -518,41 +519,45 @@ int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, * contig range when the range is covered by a single folio, we can get * away with clearing young for the whole contig range here, so we avoid * having to unfold. + * + * The 'nr' means consecutive (present) PTEs that map consecutive pages + * of the same large folio in a single VMA and a single page table. */ + unsigned long end = addr + nr * PAGE_SIZE; int young = 0; - int i; - ptep = contpte_align_down(ptep); - addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - - for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + ptep = contpte_align_addr_ptep(&addr, &end, ptep, nr); + for (; addr != end; ptep++, addr += PAGE_SIZE) young |= __ptep_test_and_clear_young(vma, addr, ptep); return young; } -EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); +EXPORT_SYMBOL_GPL(contpte_test_and_clear_young_ptes); -int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int contpte_clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) { int young; - young = contpte_ptep_test_and_clear_young(vma, addr, ptep); + young = contpte_test_and_clear_young_ptes(vma, addr, ptep, nr); if (young) { + unsigned long end = addr + nr * PAGE_SIZE; + + contpte_align_addr_ptep(&addr, &end, ptep, nr); /* * See comment in __ptep_clear_flush_young(); same rationale for * eliding the trailing DSB applies here. */ - addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, + __flush_tlb_range_nosync(vma->vm_mm, addr, end, PAGE_SIZE, true, 3); } return young; } -EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); +EXPORT_SYMBOL_GPL(contpte_clear_flush_young_ptes); void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) -- 2.47.3 Implement the Arm64 architecture-specific clear_flush_young_ptes() to enable batched checking of young flags and TLB flushing, improving performance during large folio reclamation. Performance testing: Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to reclaim 8G file-backed folios via the memory.reclaim interface. I can observe 33% performance improvement on my Arm64 32-core server (and 10%+ improvement on my X86 machine). Meanwhile, the hotspot folio_check_references() dropped from approximately 35% to around 5%. W/o patchset: real 0m1.518s user 0m0.000s sys 0m1.518s W/ patchset: real 0m1.018s user 0m0.000s sys 0m1.018s Signed-off-by: Baolin Wang --- arch/arm64/include/asm/pgtable.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index d5fbe72e820a..84a6bdabb1f9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1838,6 +1838,17 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, return contpte_clear_flush_young_ptes(vma, addr, ptep, CONT_PTES); } +#define clear_flush_young_ptes clear_flush_young_ptes +static inline int clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) +{ + if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) + return __ptep_clear_flush_young(vma, addr, ptep); + + return contpte_clear_flush_young_ptes(vma, addr, ptep, nr); +} + #define wrprotect_ptes wrprotect_ptes static __always_inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) -- 2.47.3 Similar to folio_referenced_one(), we can apply batched unmapping for file large folios to optimize the performance of file folios reclamation. Barry previously implemented batched unmapping for lazyfree anonymous large folios[1] and did not further optimize anonymous large folios or file-backed large folios at that stage. As for file-backed large folios, the batched unmapping support is relatively straightforward, as we only need to clear the consecutive (present) PTE entries for file-backed large folios. Performance testing: Allocate 10G clean file-backed folios by mmap() in a memory cgroup, and try to reclaim 8G file-backed folios via the memory.reclaim interface. I can observe 75% performance improvement on my Arm64 32-core server (and 50%+ improvement on my X86 machine) with this patch. W/o patch: real 0m1.018s user 0m0.000s sys 0m1.018s W/ patch: real 0m0.249s user 0m0.000s sys 0m0.249s [1] https://lore.kernel.org/all/20250214093015.51024-4-21cnbao@gmail.com/T/#u Acked-by: Barry Song Signed-off-by: Baolin Wang --- mm/rmap.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index a0fc05f5966f..7482121d4e92 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1862,9 +1862,10 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio, end_addr = pmd_addr_end(addr, vma->vm_end); max_nr = (end_addr - addr) >> PAGE_SHIFT; - /* We only support lazyfree batching for now ... */ - if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) + /* We only support lazyfree or file folios batching for now ... */ + if (folio_test_anon(folio) && folio_test_swapbacked(folio)) return 1; + if (pte_unused(pte)) return 1; @@ -2230,7 +2231,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * * See Documentation/mm/mmu_notifier.rst */ - dec_mm_counter(mm, mm_counter_file(folio)); + add_mm_counter(mm, mm_counter_file(folio), -nr_pages); } discard: if (unlikely(folio_test_hugetlb(folio))) { -- 2.47.3