Currently, DAMON virtual address operations use mmap_read_lock during page table walks, which can cause unnecessary contention under high concurrency. Introduce damon_va_walk_page_range() to first attempt acquiring a per-vma lock. If the VMA is found and the range is fully contained within it, the page table walk proceeds with the per-vma lock instead of mmap_read_lock. This optimization is particularly effective for damon_va_young() and damon_va_mkold(), which are frequently called and typically operate within a single VMA. Signed-off-by: Kefeng Wang --- v2: avoid handling VMAs with the VM_PFNMAP for per-vma path, found by Sashiko review. v1: https://lore.kernel.org/linux-mm/20260511132546.1973270-1-wangkefeng.wang@huawei.com/ mm/damon/vaddr.c | 69 ++++++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 1b0ebe3b6951..d27147603564 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -237,6 +237,35 @@ static void damon_va_update(struct damon_ctx *ctx) } } +static void damon_va_walk_page_range(struct mm_struct *mm, unsigned long start, + unsigned long end, struct mm_walk_ops *ops, void *private) +{ + struct vm_area_struct *vma; + + vma = lock_vma_under_rcu(mm, start); + if (!vma) + goto lock_mmap; + + if (end > vma->vm_end) { + vma_end_read(vma); + goto lock_mmap; + } + + if (!(vma->vm_flags & VM_PFNMAP)) { + ops->walk_lock = PGWALK_VMA_RDLOCK_VERIFY; + walk_page_range_vma(vma, start, end, ops, private); + } + + vma_end_read(vma); + return; + +lock_mmap: + mmap_read_lock(mm); + ops->walk_lock = PGWALK_RDLOCK; + walk_page_range(mm, start, end, ops, private); + mmap_read_unlock(mm); +} + static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { @@ -315,17 +344,14 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, #define damon_mkold_hugetlb_entry NULL #endif /* CONFIG_HUGETLB_PAGE */ -static const struct mm_walk_ops damon_mkold_ops = { - .pmd_entry = damon_mkold_pmd_entry, - .hugetlb_entry = damon_mkold_hugetlb_entry, - .walk_lock = PGWALK_RDLOCK, -}; - static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) { - mmap_read_lock(mm); - walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); - mmap_read_unlock(mm); + struct mm_walk_ops damon_mkold_ops = { + .pmd_entry = damon_mkold_pmd_entry, + .hugetlb_entry = damon_mkold_hugetlb_entry, + }; + + damon_va_walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); } /* @@ -445,12 +471,6 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, #define damon_young_hugetlb_entry NULL #endif /* CONFIG_HUGETLB_PAGE */ -static const struct mm_walk_ops damon_young_ops = { - .pmd_entry = damon_young_pmd_entry, - .hugetlb_entry = damon_young_hugetlb_entry, - .walk_lock = PGWALK_RDLOCK, -}; - static bool damon_va_young(struct mm_struct *mm, unsigned long addr, unsigned long *folio_sz) { @@ -459,9 +479,12 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr, .young = false, }; - mmap_read_lock(mm); - walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); - mmap_read_unlock(mm); + struct mm_walk_ops damon_young_ops = { + .pmd_entry = damon_young_pmd_entry, + .hugetlb_entry = damon_young_hugetlb_entry, + }; + + damon_va_walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); return arg.young; } @@ -750,7 +773,6 @@ static unsigned long damos_va_migrate(struct damon_target *target, struct mm_walk_ops walk_ops = { .pmd_entry = damos_va_migrate_pmd_entry, .pte_entry = NULL, - .walk_lock = PGWALK_RDLOCK, }; use_target_nid = dests->nr_dests == 0; @@ -768,9 +790,7 @@ static unsigned long damos_va_migrate(struct damon_target *target, if (!mm) goto free_lists; - mmap_read_lock(mm); - walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); - mmap_read_unlock(mm); + damon_va_walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); mmput(mm); for (int i = 0; i < nr_dests; i++) { @@ -862,7 +882,6 @@ static unsigned long damos_va_stat(struct damon_target *target, struct mm_struct *mm; struct mm_walk_ops walk_ops = { .pmd_entry = damos_va_stat_pmd_entry, - .walk_lock = PGWALK_RDLOCK, }; priv.scheme = s; @@ -875,9 +894,7 @@ static unsigned long damos_va_stat(struct damon_target *target, if (!mm) return 0; - mmap_read_lock(mm); - walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); - mmap_read_unlock(mm); + damon_va_walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); mmput(mm); return 0; } -- 2.27.0