When a folio is successfully split, its statistics must be updated. The current implementation complicates this process: * It iterates over the resulting new folios. * It uses a flag (@stop_split) to conditionally skip updating the stat for the folio at @split_at during the loop. * It then attempts to update the skipped stat on a subsequent failure path. This logic is unnecessarily hard to follow. This commit refactors the code to update the folio statistics only after a successful split. This makes the logic much cleaner and sets the stage for further simplification of the stat-handling code. Signed-off-by: Wei Yang Cc: Zi Yan --- mm/huge_memory.c | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b2d5a7e5c8e..bafbd66769ac 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3530,13 +3530,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; - bool stop_split = false; struct folio *next; int split_order; - int ret = 0; - - if (is_anon) - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3545,7 +3540,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * folio is split to new_order directly. */ for (split_order = start_order; - split_order >= new_order && !stop_split; + split_order >= new_order; split_order--) { struct folio *end_folio = folio_next(folio); int old_order = folio_order(folio); @@ -3568,49 +3563,32 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, else { xas_set_order(xas, folio->index, split_order); xas_try_split(xas, folio, old_order); - if (xas_error(xas)) { - ret = xas_error(xas); - stop_split = true; - } + if (xas_error(xas)) + return xas_error(xas); } } - if (!stop_split) { - folio_split_memcg_refs(folio, old_order, split_order); - split_page_owner(&folio->page, old_order, split_order); - pgalloc_tag_split(folio, old_order, split_order); - - __split_folio_to_order(folio, old_order, split_order); - } + folio_split_memcg_refs(folio, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); + __split_folio_to_order(folio, old_order, split_order); + if (is_anon) + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); /* * Iterate through after-split folios and update folio stats. - * But in buddy allocator like split, the folio - * containing the specified page is skipped until its order - * is new_order, since the folio will be worked on in next - * iteration. */ for (new_folio = folio; new_folio != end_folio; new_folio = next) { next = folio_next(new_folio); - /* - * for buddy allocator like split, new_folio containing - * @split_at page could be split again, thus do not - * change stats yet. Wait until new_folio's order is - * @new_order or stop_split is set to true by the above - * xas_split() failure. - */ - if (new_folio == page_folio(split_at)) { + if (new_folio == page_folio(split_at)) folio = new_folio; - if (split_order != new_order && !stop_split) - continue; - } if (is_anon) mod_mthp_stat(folio_order(new_folio), MTHP_STAT_NR_ANON, 1); } } - return ret; + return 0; } bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, -- 2.34.1