In current mremap_folio_pte_batch(), 1) pte_batch_hint() always return one pte in non-ARM64 machine, it is not efficient. 2) Next, it need to acquire a folio to call the folio_pte_batch(). Due to new added can_pte_batch_count(), we just call it instead of folio_pte_batch(). And then rename mremap_folio_pte_batch() to mremap_pte_batch(). Signed-off-by: Zhang Qilong --- mm/mremap.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index bd7314898ec5..d11f93f1622f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -169,27 +169,17 @@ static pte_t move_soft_dirty_pte(pte_t pte) pte = pte_swp_mksoft_dirty(pte); #endif return pte; } -static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr, +static int mremap_pte_batch(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int max_nr) { - struct folio *folio; - if (max_nr == 1) return 1; - /* Avoid expensive folio lookup if we stand no chance of benefit. */ - if (pte_batch_hint(ptep, pte) == 1) - return 1; - - folio = vm_normal_folio(vma, addr, pte); - if (!folio || !folio_test_large(folio)) - return 1; - - return folio_pte_batch(folio, ptep, pte, max_nr); + return can_pte_batch_count(vma, ptep, &pte, max_nr, 0); } static int move_ptes(struct pagetable_move_control *pmc, unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) { @@ -278,11 +268,11 @@ static int move_ptes(struct pagetable_move_control *pmc, * make sure the physical page stays valid until * the TLB entry for the old mapping has been * flushed. */ if (pte_present(old_pte)) { - nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep, + nr_ptes = mremap_pte_batch(vma, old_addr, old_ptep, old_pte, max_nr_ptes); force_flush = true; } pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes); pte = move_pte(pte, old_addr, new_addr); -- 2.43.0