Factor out the contpte block's address and ptep alignment into a new helper, and will be reused in the following patch. No functional changes. Signed-off-by: Baolin Wang --- arch/arm64/mm/contpte.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 589bcf878938..e4ddeb46f25d 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -26,6 +26,26 @@ static inline pte_t *contpte_align_down(pte_t *ptep) return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); } +static inline pte_t *contpte_align_addr_ptep(unsigned long *start, + unsigned long *end, pte_t *ptep, + unsigned int nr) +{ + /* + * Note: caller must ensure these nr PTEs are consecutive (present) + * PTEs that map consecutive pages of the same large folio within a + * single VMA and a single page table. + */ + if (pte_cont(__ptep_get(ptep + nr - 1))) + *end = ALIGN(*end, CONT_PTE_SIZE); + + if (pte_cont(__ptep_get(ptep))) { + *start = ALIGN_DOWN(*start, CONT_PTE_SIZE); + ptep = contpte_align_down(ptep); + } + + return ptep; +} + static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) { @@ -569,14 +589,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, unsigned long start = addr; unsigned long end = start + nr * PAGE_SIZE; - if (pte_cont(__ptep_get(ptep + nr - 1))) - end = ALIGN(end, CONT_PTE_SIZE); - - if (pte_cont(__ptep_get(ptep))) { - start = ALIGN_DOWN(start, CONT_PTE_SIZE); - ptep = contpte_align_down(ptep); - } - + ptep = contpte_align_addr_ptep(&start, &end, ptep, nr); __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags); } EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes); -- 2.47.3