The folio splitting process involves several related tasks that are executed together: Adjusting memcg (memory control group) accounting. Updating page owner tracking. Splitting the folio to the target size (new_order). Updating necessary folio statistics. This commit introduces the new helper function, __split_folio_and_update_stats(), to gather all these tasks. This consolidation improves modularity and is a necessary preparation step for further cleanup and simplification of the surrounding folio splitting logic. Signed-off-by: Wei Yang Cc: Lorenzo Stoakes Cc: Zi Yan --- mm/huge_memory.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c37fe6ad0c96..abde0f1aa8ff 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3567,6 +3567,22 @@ static void __split_folio_to_order(struct folio *folio, int old_order, ClearPageCompound(&folio->page); } +static void __split_folio_and_update_stats(struct folio *folio, int old_order, + int new_order, bool is_anon) +{ + int nr_new_folios = 1UL << (old_order - new_order); + + folio_split_memcg_refs(folio, old_order, new_order); + split_page_owner(&folio->page, old_order, new_order); + pgalloc_tag_split(folio, old_order, new_order); + __split_folio_to_order(folio, old_order, new_order); + + if (is_anon) { + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); + mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, nr_new_folios); + } +} + /** * __split_unmapped_folio() - splits an unmapped @folio to lower order folios in * two ways: uniform split or non-uniform split. @@ -3623,8 +3639,6 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, for (split_order = start_order; split_order >= new_order; split_order--) { - int nr_new_folios = 1UL << (old_order - split_order); - /* order-1 anonymous folio is not supported */ if (is_anon && split_order == 1) continue; @@ -3645,15 +3659,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, } } - folio_split_memcg_refs(folio, old_order, split_order); - split_page_owner(&folio->page, old_order, split_order); - pgalloc_tag_split(folio, old_order, split_order); - __split_folio_to_order(folio, old_order, split_order); - - if (is_anon) { - mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); - mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios); - } + __split_folio_and_update_stats(folio, old_order, split_order, is_anon); /* * If uniform split, the process is complete. * If non-uniform, continue splitting the folio at @split_at -- 2.34.1 By utilizing the newly introduced __split_folio_and_update_stats() helper function, we can now clearly separate the logic for uniform and non-uniform folio splitting within __split_unmapped_folio(). This refactoring greatly simplifies the code by creating two distinct execution paths: * Uniform Split: Directly calls __split_folio_and_update_stats() once to achieve the @new_order in a single operation. * Non-Uniform Split: Continues to use a loop to iteratively split the folio to a single lower order at a time, eventually reaching the @new_order. This separation improves code clarity and maintainability. Signed-off-by: Wei Yang Cc: Lorenzo Stoakes Cc: Zi Yan --- mm/huge_memory.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index abde0f1aa8ff..c4fb84cedbe0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3629,14 +3629,20 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, { const bool is_anon = folio_test_anon(folio); int old_order = folio_order(folio); - int start_order = uniform_split ? new_order : old_order - 1; int split_order; + /* For uniform split, folio is split to new_order directly. */ + if (uniform_split) { + if (mapping) + xas_split(xas, folio, old_order); + __split_folio_and_update_stats(folio, old_order, new_order, is_anon); + return 0; + } + /* - * split to new_order one order at a time. For uniform split, - * folio is split to new_order directly. + * For non-uniform, split to new_order one order at a time. */ - for (split_order = start_order; + for (split_order = old_order - 1; split_order >= new_order; split_order--) { /* order-1 anonymous folio is not supported */ @@ -3649,21 +3655,16 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * irq is disabled to allocate enough memory, whereas * non-uniform split can handle ENOMEM. */ - if (uniform_split) - xas_split(xas, folio, old_order); - else { - xas_set_order(xas, folio->index, split_order); - xas_try_split(xas, folio, old_order); - if (xas_error(xas)) - return xas_error(xas); - } + xas_set_order(xas, folio->index, split_order); + xas_try_split(xas, folio, old_order); + if (xas_error(xas)) + return xas_error(xas); } __split_folio_and_update_stats(folio, old_order, split_order, is_anon); /* - * If uniform split, the process is complete. - * If non-uniform, continue splitting the folio at @split_at - * as long as the next @split_order is >= @new_order. + * Continue splitting the folio at @split_at as long as the + * next @split_order is >= @new_order. */ folio = page_folio(split_at); old_order = split_order; -- 2.34.1