From: Vernon Yang Add mm_khugepaged_scan event to track the total time for full scan and the total number of pages scanned of khugepaged. Signed-off-by: Vernon Yang Acked-by: David Hildenbrand (Red Hat) Reviewed-by: Barry Song Reviewed-by: Lance Yang Reviewed-by: Dev Jain --- include/trace/events/huge_memory.h | 25 +++++++++++++++++++++++++ mm/khugepaged.c | 2 ++ 2 files changed, 27 insertions(+) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 4e41bff31888..384e29f6bef0 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -237,5 +237,30 @@ TRACE_EVENT(mm_khugepaged_collapse_file, __print_symbolic(__entry->result, SCAN_STATUS)) ); +TRACE_EVENT(mm_khugepaged_scan, + + TP_PROTO(struct mm_struct *mm, unsigned int progress, + bool full_scan_finished), + + TP_ARGS(mm, progress, full_scan_finished), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned int, progress) + __field(bool, full_scan_finished) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->progress = progress; + __entry->full_scan_finished = full_scan_finished; + ), + + TP_printk("mm=%p, progress=%u, full_scan_finished=%d", + __entry->mm, + __entry->progress, + __entry->full_scan_finished) +); + #endif /* __HUGE_MEMORY_H */ #include diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fa1e57fd2c46..4049234e1c8b 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2536,6 +2536,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result collect_mm_slot(slot); } + trace_mm_khugepaged_scan(mm, progress, khugepaged_scan.mm_slot == NULL); + return progress; } -- 2.51.0 From: Vernon Yang Currently, each scan always increases "progress" by HPAGE_PMD_NR, even if only scanning a single PTE/PMD entry. - When only scanning a sigle PTE entry, let me provide a detailed example: static int hpage_collapse_scan_pmd() { for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); ... if (pte_uffd_wp(pteval)) { <-- first scan hit result = SCAN_PTE_UFFD_WP; goto out_unmap; } } } During the first scan, if pte_uffd_wp(pteval) is true, the loop exits directly. In practice, only one PTE is scanned before termination. Here, "progress += 1" reflects the actual number of PTEs scanned, but previously "progress += HPAGE_PMD_NR" always. - When the memory has been collapsed to PMD, let me provide a detailed example: The following data is traced by bpftrace on a desktop system. After the system has been left idle for 10 minutes upon booting, a lot of SCAN_PMD_MAPPED or SCAN_NO_PTE_TABLE are observed during a full scan by khugepaged. From trace_mm_khugepaged_scan_pmd and trace_mm_khugepaged_scan_file, the following statuses were observed, with frequency mentioned next to them: SCAN_SUCCEED : 1 SCAN_EXCEED_SHARED_PTE: 2 SCAN_PMD_MAPPED : 142 SCAN_NO_PTE_TABLE : 178 total progress size : 674 MB Total time : 419 seconds, include khugepaged_scan_sleep_millisecs The khugepaged_scan list save all task that support collapse into hugepage, as long as the task is not destroyed, khugepaged will not remove it from the khugepaged_scan list. This exist a phenomenon where task has already collapsed all memory regions into hugepage, but khugepaged continues to scan it, which wastes CPU time and invalid, and due to khugepaged_scan_sleep_millisecs (default 10s) causes a long wait for scanning a large number of invalid task, so scanning really valid task is later. After applying this patch, when the memory is either SCAN_PMD_MAPPED or SCAN_NO_PTE_TABLE, just skip it, as follow: SCAN_EXCEED_SHARED_PTE: 2 SCAN_PMD_MAPPED : 147 SCAN_NO_PTE_TABLE : 173 total progress size : 45 MB Total time : 20 seconds Signed-off-by: Vernon Yang --- mm/khugepaged.c | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 4049234e1c8b..8b68ae3bc2c5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -68,7 +68,10 @@ enum scan_result { static struct task_struct *khugepaged_thread __read_mostly; static DEFINE_MUTEX(khugepaged_mutex); -/* default scan 8*HPAGE_PMD_NR ptes (or vmas) every 10 second */ +/* + * default scan 8*HPAGE_PMD_NR ptes, pmd_mapped, no_pte_table or vmas + * every 10 second. + */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; static unsigned int khugepaged_full_scans; @@ -1240,7 +1243,8 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a } static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked, + struct vm_area_struct *vma, unsigned long start_addr, + bool *mmap_locked, unsigned int *cur_progress, struct collapse_control *cc) { pmd_t *pmd; @@ -1256,19 +1260,27 @@ static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm, VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK); result = find_pmd_or_thp_or_none(mm, start_addr, &pmd); - if (result != SCAN_SUCCEED) + if (result != SCAN_SUCCEED) { + if (cur_progress) + *cur_progress = 1; goto out; + } memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl); if (!pte) { + if (cur_progress) + *cur_progress = 1; result = SCAN_NO_PTE_TABLE; goto out; } for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, addr += PAGE_SIZE) { + if (cur_progress) + *cur_progress += 1; + pte_t pteval = ptep_get(_pte); if (pte_none_or_zero(pteval)) { ++none_or_zero; @@ -2288,8 +2300,9 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr, return result; } -static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, - struct file *file, pgoff_t start, struct collapse_control *cc) +static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, + unsigned long addr, struct file *file, pgoff_t start, + unsigned int *cur_progress, struct collapse_control *cc) { struct folio *folio = NULL; struct address_space *mapping = file->f_mapping; @@ -2378,6 +2391,8 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned cond_resched_rcu(); } } + if (cur_progress) + *cur_progress = HPAGE_PMD_NR; rcu_read_unlock(); if (result == SCAN_SUCCEED) { @@ -2457,6 +2472,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result while (khugepaged_scan.address < hend) { bool mmap_locked = true; + unsigned int cur_progress = 0; cond_resched(); if (unlikely(hpage_collapse_test_exit_or_disable(mm))) @@ -2473,7 +2489,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result mmap_read_unlock(mm); mmap_locked = false; *result = hpage_collapse_scan_file(mm, - khugepaged_scan.address, file, pgoff, cc); + khugepaged_scan.address, file, pgoff, + &cur_progress, cc); fput(file); if (*result == SCAN_PTE_MAPPED_HUGEPAGE) { mmap_read_lock(mm); @@ -2487,7 +2504,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result } } else { *result = hpage_collapse_scan_pmd(mm, vma, - khugepaged_scan.address, &mmap_locked, cc); + khugepaged_scan.address, &mmap_locked, + &cur_progress, cc); } if (*result == SCAN_SUCCEED) @@ -2495,7 +2513,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; - progress += HPAGE_PMD_NR; + progress += cur_progress; if (!mmap_locked) /* * We released mmap_lock so break loop. Note @@ -2818,7 +2836,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, mmap_locked = false; *lock_dropped = true; result = hpage_collapse_scan_file(mm, addr, file, pgoff, - cc); + NULL, cc); if (result == SCAN_PAGE_DIRTY_OR_WRITEBACK && !triggered_wb && mapping_can_writeback(file->f_mapping)) { @@ -2833,7 +2851,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, fput(file); } else { result = hpage_collapse_scan_pmd(mm, vma, addr, - &mmap_locked, cc); + &mmap_locked, NULL, cc); } if (!mmap_locked) *lock_dropped = true; -- 2.51.0 From: Vernon Yang Add folio_test_lazyfree() function to identify lazy-free folios to improve code readability. Signed-off-by: Vernon Yang Acked-by: David Hildenbrand (Red Hat) Reviewed-by: Lance Yang Reviewed-by: Dev Jain Reviewed-by: Barry Song --- include/linux/page-flags.h | 5 +++++ mm/rmap.c | 2 +- mm/vmscan.c | 5 ++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f7a0e4af0c73..415e9f2ef616 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -724,6 +724,11 @@ static __always_inline bool folio_test_anon(const struct folio *folio) return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0; } +static __always_inline bool folio_test_lazyfree(const struct folio *folio) +{ + return folio_test_anon(folio) && !folio_test_swapbacked(folio); +} + static __always_inline bool PageAnonNotKsm(const struct page *page) { unsigned long flags = (unsigned long)page_folio(page)->mapping; diff --git a/mm/rmap.c b/mm/rmap.c index c67a374bb1a3..e3f799c99057 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2049,7 +2049,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } if (!pvmw.pte) { - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (folio_test_lazyfree(folio)) { if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) goto walk_done; /* diff --git a/mm/vmscan.c b/mm/vmscan.c index 44e4fcd6463c..02b522b8c66c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -963,8 +963,7 @@ static void folio_check_dirty_writeback(struct folio *folio, * They could be mistakenly treated as file lru. So further anon * test is needed. */ - if (!folio_is_file_lru(folio) || - (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { + if (!folio_is_file_lru(folio) || folio_test_lazyfree(folio)) { *dirty = false; *writeback = false; return; @@ -1508,7 +1507,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, } } - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (folio_test_lazyfree(folio)) { /* follow __remove_mapping for reference */ if (!folio_ref_freeze(folio, 1)) goto keep_locked; -- 2.51.0 From: Vernon Yang For example, create three task: hot1 -> cold -> hot2. After all three task are created, each allocate memory 128MB. the hot1/hot2 task continuously access 128 MB memory, while the cold task only accesses its memory briefly and then call madvise(MADV_FREE). However, khugepaged still prioritizes scanning the cold task and only scans the hot2 task after completing the scan of the cold task. And if we collapse with a lazyfree page, that content will never be none and the deferred shrinker cannot reclaim them. So if the user has explicitly informed us via MADV_FREE that this memory will be freed, it is appropriate for khugepaged to skip it only, thereby avoiding unnecessary scan and collapse operations to reducing CPU wastage. Here are the performance test results: (Throughput bigger is better, other smaller is better) Testing on x86_64 machine: | task hot2 | without patch | with patch | delta | |---------------------|---------------|---------------|---------| | total accesses time | 3.14 sec | 2.93 sec | -6.69% | | cycles per access | 4.96 | 2.21 | -55.44% | | Throughput | 104.38 M/sec | 111.89 M/sec | +7.19% | | dTLB-load-misses | 284814532 | 69597236 | -75.56% | Testing on qemu-system-x86_64 -enable-kvm: | task hot2 | without patch | with patch | delta | |---------------------|---------------|---------------|---------| | total accesses time | 3.35 sec | 2.96 sec | -11.64% | | cycles per access | 7.29 | 2.07 | -71.60% | | Throughput | 97.67 M/sec | 110.77 M/sec | +13.41% | | dTLB-load-misses | 241600871 | 3216108 | -98.67% | Signed-off-by: Vernon Yang Acked-by: David Hildenbrand (arm) Reviewed-by: Lance Yang --- include/trace/events/huge_memory.h | 1 + mm/khugepaged.c | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 384e29f6bef0..bcdc57eea270 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -25,6 +25,7 @@ EM( SCAN_PAGE_LRU, "page_not_in_lru") \ EM( SCAN_PAGE_LOCK, "page_locked") \ EM( SCAN_PAGE_ANON, "page_not_anon") \ + EM( SCAN_PAGE_LAZYFREE, "page_lazyfree") \ EM( SCAN_PAGE_COMPOUND, "page_compound") \ EM( SCAN_ANY_PROCESS, "no_process_for_page") \ EM( SCAN_VMA_NULL, "vma_null") \ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8b68ae3bc2c5..0d160e612e16 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -46,6 +46,7 @@ enum scan_result { SCAN_PAGE_LRU, SCAN_PAGE_LOCK, SCAN_PAGE_ANON, + SCAN_PAGE_LAZYFREE, SCAN_PAGE_COMPOUND, SCAN_ANY_PROCESS, SCAN_VMA_NULL, @@ -583,6 +584,12 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma, folio = page_folio(page); VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); + if (cc->is_khugepaged && !pte_dirty(pteval) && + folio_test_lazyfree(folio)) { + result = SCAN_PAGE_LAZYFREE; + goto out; + } + /* See hpage_collapse_scan_pmd(). */ if (folio_maybe_mapped_shared(folio)) { ++shared; @@ -1335,6 +1342,12 @@ static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm, } folio = page_folio(page); + if (cc->is_khugepaged && !pte_dirty(pteval) && + folio_test_lazyfree(folio)) { + result = SCAN_PAGE_LAZYFREE; + goto out_unmap; + } + if (!folio_test_anon(folio)) { result = SCAN_PAGE_ANON; goto out_unmap; -- 2.51.0 From: Vernon Yang When an mm with the MMF_DISABLE_THP_COMPLETELY flag is detected during scanning, directly set khugepaged_scan.mm_slot to the next mm_slot, reduce redundant operation. Without this patch, entering khugepaged_scan_mm_slot() next time, we will set khugepaged_scan.mm_slot to the next mm_slot. With this patch, we will directly set khugepaged_scan.mm_slot to the next mm_slot. Signed-off-by: Vernon Yang Acked-by: David Hildenbrand (Red Hat) Reviewed-by: Lance Yang Reviewed-by: Dev Jain Reviewed-by: Barry Song --- mm/khugepaged.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0d160e612e16..b3854d990fd9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2548,9 +2548,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result VM_BUG_ON(khugepaged_scan.mm_slot != slot); /* * Release the current mm_slot if this mm is about to die, or - * if we scanned all vmas of this mm. + * if we scanned all vmas of this mm, or THP got disabled. */ - if (hpage_collapse_test_exit(mm) || !vma) { + if (hpage_collapse_test_exit_or_disable(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find -- 2.51.0