When collapse_file encounters dirty or writeback pages in file-backed mappings, it currently returns SCAN_FAIL which maps to -EINVAL. This is misleading as EINVAL suggests invalid arguments, whereas dirty/writeback pages represent transient conditions that may resolve on retry. Introduce SCAN_PAGE_DIRTY_OR_WRITEBACK to cover both dirty and writeback states, mapping it to -EAGAIN. For MADV_COLLAPSE, this provides userspace with a clear signal that retry may succeed after writeback completes. For khugepaged, this is harmless as it will naturally revisit the range during periodic scans after async writeback completes. Reported-by: Branden Moore Closes: https://lore.kernel.org/all/4e26fe5e-7374-467c-a333-9dd48f85d7cc@amd.com Fixes: 34488399fa08 ("mm/madvise: add file and shmem support to MADV_COLLAPSE") Reviewed-by: Dev Jain Reviewed-by: Lance Yang Reviewed-by: Baolin Wang Reviewed-by: wang lian Signed-off-by: Shivank Garg --- include/trace/events/huge_memory.h | 3 ++- mm/khugepaged.c | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 4cde53b45a85..4e41bff31888 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -37,7 +37,8 @@ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ EM( SCAN_STORE_FAILED, "store_failed") \ EM( SCAN_COPY_MC, "copy_poisoned_page") \ - EMe(SCAN_PAGE_FILLED, "page_filled") + EM( SCAN_PAGE_FILLED, "page_filled") \ + EMe(SCAN_PAGE_DIRTY_OR_WRITEBACK, "page_dirty_or_writeback") #undef EM #undef EMe diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 97d1b2824386..219dfa2e523c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -58,6 +58,7 @@ enum scan_result { SCAN_STORE_FAILED, SCAN_COPY_MC, SCAN_PAGE_FILLED, + SCAN_PAGE_DIRTY_OR_WRITEBACK, }; #define CREATE_TRACE_POINTS @@ -1967,11 +1968,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, */ xas_unlock_irq(&xas); filemap_flush(mapping); - result = SCAN_FAIL; + result = SCAN_PAGE_DIRTY_OR_WRITEBACK; goto xa_unlocked; } else if (folio_test_writeback(folio)) { xas_unlock_irq(&xas); - result = SCAN_FAIL; + result = SCAN_PAGE_DIRTY_OR_WRITEBACK; goto xa_unlocked; } else if (folio_trylock(folio)) { folio_get(folio); @@ -2018,7 +2019,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * folio is dirty because it hasn't been flushed * since first write. */ - result = SCAN_FAIL; + result = SCAN_PAGE_DIRTY_OR_WRITEBACK; goto out_unlock; } @@ -2747,6 +2748,7 @@ static int madvise_collapse_errno(enum scan_result r) case SCAN_PAGE_LRU: case SCAN_DEL_PAGE_LRU: case SCAN_PAGE_FILLED: + case SCAN_PAGE_DIRTY_OR_WRITEBACK: return -EAGAIN; /* * Other: Trying again likely not to succeed / error intrinsic to -- 2.43.0 When MADV_COLLAPSE is called on file-backed mappings (e.g., executable text sections), the pages may still be dirty from recent writes. collapse_file() will trigger async writeback and fail with SCAN_PAGE_DIRTY_OR_WRITEBACK (-EAGAIN). MADV_COLLAPSE is a synchronous operation where userspace expects immediate results. If the collapse fails due to dirty pages, perform synchronous writeback on the specific range and retry once. This avoids spurious failures for freshly written executables while avoiding unnecessary synchronous I/O for mappings that are already clean. Reported-by: Branden Moore Closes: https://lore.kernel.org/all/4e26fe5e-7374-467c-a333-9dd48f85d7cc@amd.com Fixes: 34488399fa08 ("mm/madvise: add file and shmem support to MADV_COLLAPSE") Suggested-by: David Hildenbrand Tested-by: Lance Yang Signed-off-by: Shivank Garg --- mm/khugepaged.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 219dfa2e523c..6c8c35d3e0c9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "internal.h" @@ -2787,9 +2788,11 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, hend = end & HPAGE_PMD_MASK; for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { + bool retried = false; int result = SCAN_FAIL; if (!mmap_locked) { +retry: cond_resched(); mmap_read_lock(mm); mmap_locked = true; @@ -2819,6 +2822,43 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, if (!mmap_locked) *lock_dropped = true; + /* + * If the file-backed VMA has dirty pages, the scan triggers + * async writeback and returns SCAN_PAGE_DIRTY_OR_WRITEBACK. + * Since MADV_COLLAPSE is sync, we force sync writeback and + * retry once. + */ + if (result == SCAN_PAGE_DIRTY_OR_WRITEBACK && !retried) { + /* + * File scan drops the lock. We must re-acquire it to + * safely inspect the VMA and hold the file reference. + */ + if (!mmap_locked) { + cond_resched(); + mmap_read_lock(mm); + mmap_locked = true; + result = hugepage_vma_revalidate(mm, addr, false, &vma, cc); + if (result != SCAN_SUCCEED) + goto handle_result; + } + + if (!vma_is_anonymous(vma) && vma->vm_file && + mapping_can_writeback(vma->vm_file->f_mapping)) { + struct file *file = get_file(vma->vm_file); + pgoff_t pgoff = linear_page_index(vma, addr); + loff_t lstart = (loff_t)pgoff << PAGE_SHIFT; + loff_t lend = lstart + HPAGE_PMD_SIZE - 1; + + mmap_read_unlock(mm); + mmap_locked = false; + *lock_dropped = true; + filemap_write_and_wait_range(file->f_mapping, lstart, lend); + fput(file); + retried = true; + goto retry; + } + } + handle_result: switch (result) { case SCAN_SUCCEED: -- 2.43.0