During the execution of __split_unmapped_folio(), the folio's anon/!anon attribute is invariant (not expected to change). Therefore, it is safe and more efficient to retrieve this attribute once at the start and reuse it throughout the function. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3c74227cc847..4b2d5a7e5c8e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3527,6 +3527,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct page *split_at, struct xa_state *xas, struct address_space *mapping, bool uniform_split) { + bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; bool stop_split = false; @@ -3534,7 +3535,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, int split_order; int ret = 0; - if (folio_test_anon(folio)) + if (is_anon) mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3551,7 +3552,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct folio *new_folio; /* order-1 anonymous folio is not supported */ - if (folio_test_anon(folio) && split_order == 1) + if (is_anon && split_order == 1) continue; if (uniform_split && split_order != new_order) continue; @@ -3603,7 +3604,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, if (split_order != new_order && !stop_split) continue; } - if (folio_test_anon(new_folio)) + if (is_anon) mod_mthp_stat(folio_order(new_folio), MTHP_STAT_NR_ANON, 1); } -- 2.34.1 When a folio is successfully split, its statistics must be updated. The current implementation complicates this process: * It iterates over the resulting new folios. * It uses a flag (@stop_split) to conditionally skip updating the stat for the folio at @split_at during the loop. * It then attempts to update the skipped stat on a subsequent failure path. This logic is unnecessarily hard to follow. This commit refactors the code to update the folio statistics only after a successful split. This makes the logic much cleaner and sets the stage for further simplification of the stat-handling code. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b2d5a7e5c8e..bafbd66769ac 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3530,13 +3530,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; - bool stop_split = false; struct folio *next; int split_order; - int ret = 0; - - if (is_anon) - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3545,7 +3540,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * folio is split to new_order directly. */ for (split_order = start_order; - split_order >= new_order && !stop_split; + split_order >= new_order; split_order--) { struct folio *end_folio = folio_next(folio); int old_order = folio_order(folio); @@ -3568,49 +3563,32 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, else { xas_set_order(xas, folio->index, split_order); xas_try_split(xas, folio, old_order); - if (xas_error(xas)) { - ret = xas_error(xas); - stop_split = true; - } + if (xas_error(xas)) + return xas_error(xas); } } - if (!stop_split) { - folio_split_memcg_refs(folio, old_order, split_order); - split_page_owner(&folio->page, old_order, split_order); - pgalloc_tag_split(folio, old_order, split_order); - - __split_folio_to_order(folio, old_order, split_order); - } + folio_split_memcg_refs(folio, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); + __split_folio_to_order(folio, old_order, split_order); + if (is_anon) + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); /* * Iterate through after-split folios and update folio stats. - * But in buddy allocator like split, the folio - * containing the specified page is skipped until its order - * is new_order, since the folio will be worked on in next - * iteration. */ for (new_folio = folio; new_folio != end_folio; new_folio = next) { next = folio_next(new_folio); - /* - * for buddy allocator like split, new_folio containing - * @split_at page could be split again, thus do not - * change stats yet. Wait until new_folio's order is - * @new_order or stop_split is set to true by the above - * xas_split() failure. - */ - if (new_folio == page_folio(split_at)) { + if (new_folio == page_folio(split_at)) folio = new_folio; - if (split_order != new_order && !stop_split) - continue; - } if (is_anon) mod_mthp_stat(folio_order(new_folio), MTHP_STAT_NR_ANON, 1); } } - return ret; + return 0; } bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, -- 2.34.1 The loop executed after a successful folio split currently has two combined responsibilities: * updating statistics for the new folios * determining the folio for the next split iteration. This commit refactors the logic to directly calculate and update folio statistics, eliminating the need for the iteration step. We can do this because all necessary information is already available: * All resulting new folios have the same order, which is @split_order. * The exact number of new folios can be calculated directly using @old_order and @split_order. * The folio for the subsequent split is simply the one containing @split_at. By leveraging this knowledge, we can achieve the stat update more cleanly and efficiently without the looping logic. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bafbd66769ac..482a734b61ac 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3530,7 +3530,6 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; - struct folio *next; int split_order; folio_clear_has_hwpoisoned(folio); @@ -3542,9 +3541,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, for (split_order = start_order; split_order >= new_order; split_order--) { - struct folio *end_folio = folio_next(folio); int old_order = folio_order(folio); - struct folio *new_folio; + int new_folios = 1UL << (old_order - split_order); /* order-1 anonymous folio is not supported */ if (is_anon && split_order == 1) @@ -3573,19 +3571,11 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, pgalloc_tag_split(folio, old_order, split_order); __split_folio_to_order(folio, old_order, split_order); - if (is_anon) + if (is_anon) { mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); - /* - * Iterate through after-split folios and update folio stats. - */ - for (new_folio = folio; new_folio != end_folio; new_folio = next) { - next = folio_next(new_folio); - if (new_folio == page_folio(split_at)) - folio = new_folio; - if (is_anon) - mod_mthp_stat(folio_order(new_folio), - MTHP_STAT_NR_ANON, 1); + mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, new_folios); } + folio = page_folio(split_at); } return 0; -- 2.34.1 Folio splitting requires both the folio's original order (@old_order) and the new target order (@split_order). In the current implementation, @old_order is repeatedly retrieved using folio_order(). However, for every iteration after the first, the folio being split is the result of the previous split, meaning its order is already known to be equal to the previous iteration's @split_order. This commit optimizes the logic: * Instead of calling folio_order(), we now set @old_order directly to the value of @split_order from the previous iteration. * The initial @split_order (which was previously handled by a separate @start_order variable) is now directly used, and the redundant @start_order variable is removed. This change avoids unnecessary function calls and simplifies the loop setup. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 482a734b61ac..63380b185df1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3528,8 +3528,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct address_space *mapping, bool uniform_split) { bool is_anon = folio_test_anon(folio); - int order = folio_order(folio); - int start_order = uniform_split ? new_order : order - 1; + int old_order = folio_order(folio); int split_order; folio_clear_has_hwpoisoned(folio); @@ -3538,10 +3537,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * split to new_order one order at a time. For uniform split, * folio is split to new_order directly. */ - for (split_order = start_order; + for (split_order = uniform_split ? new_order : old_order - 1; split_order >= new_order; split_order--) { - int old_order = folio_order(folio); int new_folios = 1UL << (old_order - split_order); /* order-1 anonymous folio is not supported */ @@ -3576,6 +3574,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, new_folios); } folio = page_folio(split_at); + old_order = split_order; } return 0; -- 2.34.1 The uniform splitting logic is designed so that the @split_order variable starts at the target @new_order and subsequently decreases with each iteration. Given that both @split_order and @new_order are integers and the splitting process only ever targets the @new_order for a uniform split, the condition where split_order != new_order will not logically occur within the expected execution path. Removes the check for this non-existent case, simplifying the code. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 63380b185df1..a1f0da9486eb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3545,8 +3545,6 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, /* order-1 anonymous folio is not supported */ if (is_anon && split_order == 1) continue; - if (uniform_split && split_order != new_order) - continue; if (mapping) { /* -- 2.34.1