Currently, contpte_ptep_test_and_clear_young() and contpte_ptep_clear_flush_young() only clear the young flag and flush TLBs for PTEs within the contiguous range. To support batch PTE operations for other sized large folios in the following patches, adding a new parameter to specify the number of PTEs that map consecutive pages of the same large folio in a single VMA and a single page table. While we are at it, rename the functions to maintain consistency with other contpte_*() functions. Signed-off-by: Baolin Wang --- arch/arm64/include/asm/pgtable.h | 12 ++++++------ arch/arm64/mm/contpte.c | 33 ++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 445e18e92221..5e9ff16146c3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1648,10 +1648,10 @@ extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full); -extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); -extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep); +int contpte_test_and_clear_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr); +int contpte_clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr); extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr); extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, @@ -1823,7 +1823,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, if (likely(!pte_valid_cont(orig_pte))) return __ptep_test_and_clear_young(vma, addr, ptep); - return contpte_ptep_test_and_clear_young(vma, addr, ptep); + return contpte_test_and_clear_young_ptes(vma, addr, ptep, 1); } #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH @@ -1835,7 +1835,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, if (likely(!pte_valid_cont(orig_pte))) return __ptep_clear_flush_young(vma, addr, ptep); - return contpte_ptep_clear_flush_young(vma, addr, ptep); + return contpte_clear_flush_young_ptes(vma, addr, ptep, 1); } #define wrprotect_ptes wrprotect_ptes diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index e4ddeb46f25d..b929a455103f 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -508,8 +508,9 @@ pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, } EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); -int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int contpte_test_and_clear_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) { /* * ptep_clear_flush_young() technically requires us to clear the access @@ -518,41 +519,45 @@ int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, * contig range when the range is covered by a single folio, we can get * away with clearing young for the whole contig range here, so we avoid * having to unfold. + * + * The 'nr' means consecutive (present) PTEs that map consecutive pages + * of the same large folio in a single VMA and a single page table. */ + unsigned long end = addr + nr * PAGE_SIZE; int young = 0; - int i; - ptep = contpte_align_down(ptep); - addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - - for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + ptep = contpte_align_addr_ptep(&addr, &end, ptep, nr); + for (; addr != end; ptep++, addr += PAGE_SIZE) young |= __ptep_test_and_clear_young(vma, addr, ptep); return young; } -EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); +EXPORT_SYMBOL_GPL(contpte_test_and_clear_young_ptes); -int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +int contpte_clear_flush_young_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr) { int young; - young = contpte_ptep_test_and_clear_young(vma, addr, ptep); + young = contpte_test_and_clear_young_ptes(vma, addr, ptep, nr); if (young) { + unsigned long end = addr + nr * PAGE_SIZE; + + contpte_align_addr_ptep(&addr, &end, ptep, nr); /* * See comment in __ptep_clear_flush_young(); same rationale for * eliding the trailing DSB applies here. */ - addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, + __flush_tlb_range_nosync(vma->vm_mm, addr, end, PAGE_SIZE, true, 3); } return young; } -EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); +EXPORT_SYMBOL_GPL(contpte_clear_flush_young_ptes); void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) -- 2.47.3