There are cases where, if an attempted collapse fails, all subsequent orders are guaranteed to also fail. Avoid these collapse attempts by bailing out early. Signed-off-by: Nico Pache --- mm/khugepaged.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6a4cf7e4a7cc..7d9b5100bea1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1389,10 +1389,39 @@ static int collapse_scan_bitmap(struct mm_struct *mm, unsigned long address, ret = collapse_huge_page(mm, address, referenced, unmapped, cc, mmap_locked, order, offset * KHUGEPAGED_MIN_MTHP_NR); - if (ret == SCAN_SUCCEED) { + + /* + * Analyze failure reason to determine next action: + * - goto next_order: try smaller orders in same region + * - continue: try other regions at same order + * - break: stop all attempts (system-wide failure) + */ + switch (ret) { + /* Cases were we should continue to the next region */ + case SCAN_SUCCEED: collapsed += (1 << order); + case SCAN_PAGE_RO: + case SCAN_PTE_MAPPED_HUGEPAGE: continue; + /* Cases were lower orders might still succeed */ + case SCAN_LACK_REFERENCED_PAGE: + case SCAN_EXCEED_NONE_PTE: + case SCAN_EXCEED_SWAP_PTE: + case SCAN_EXCEED_SHARED_PTE: + case SCAN_PAGE_LOCK: + case SCAN_PAGE_COUNT: + case SCAN_PAGE_LRU: + case SCAN_PAGE_NULL: + case SCAN_DEL_PAGE_LRU: + case SCAN_PTE_NON_PRESENT: + case SCAN_PTE_UFFD_WP: + case SCAN_ALLOC_HUGE_PAGE_FAIL: + goto next_order; + /* All other cases should stop collapse attempts */ + default: + break; } + break; } next_order: -- 2.50.1