During the execution of __split_unmapped_folio(), the folio's anon/!anon attribute is invariant (not expected to change). Therefore, it is safe and more efficient to retrieve this attribute once at the start and reuse it throughout the function. Signed-off-by: Wei Yang Cc: Zi Yan Reviewed-by: Zi Yan Reviewed-by: wang lian --- mm/huge_memory.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3c74227cc847..4b2d5a7e5c8e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3527,6 +3527,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct page *split_at, struct xa_state *xas, struct address_space *mapping, bool uniform_split) { + bool is_anon = folio_test_anon(folio); int order = folio_order(folio); int start_order = uniform_split ? new_order : order - 1; bool stop_split = false; @@ -3534,7 +3535,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, int split_order; int ret = 0; - if (folio_test_anon(folio)) + if (is_anon) mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3551,7 +3552,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct folio *new_folio; /* order-1 anonymous folio is not supported */ - if (folio_test_anon(folio) && split_order == 1) + if (is_anon && split_order == 1) continue; if (uniform_split && split_order != new_order) continue; @@ -3603,7 +3604,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, if (split_order != new_order && !stop_split) continue; } - if (folio_test_anon(new_folio)) + if (is_anon) mod_mthp_stat(folio_order(new_folio), MTHP_STAT_NR_ANON, 1); } -- 2.34.1 Existing __split_unmapped_folio() code splits the given folio and update stats, but it is complicated to understand. After simplification, __split_unmapped_folio() directly calculate and update the folio statistics upon a successful split: * All resulting folios are @split_order. * The number of new folios are calculated directly from @old_order and @split_order. * The folio for the next split is identified as the one containing @split_at. * An xas_try_split() error is returned directly without worrying about stats updates. Signed-off-by: Wei Yang Cc: Zi Yan Reviewed-by: Zi Yan --- v2: * merge patch 2-5 * retain start_order * new_folios -> nr_new_folios * add a comment at the end of the loop --- mm/huge_memory.c | 66 ++++++++++++++---------------------------------- 1 file changed, 19 insertions(+), 47 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b2d5a7e5c8e..68e851f5fcb2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3528,15 +3528,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, struct address_space *mapping, bool uniform_split) { bool is_anon = folio_test_anon(folio); - int order = folio_order(folio); - int start_order = uniform_split ? new_order : order - 1; - bool stop_split = false; - struct folio *next; + int old_order = folio_order(folio); + int start_order = uniform_split ? new_order : old_order - 1; int split_order; - int ret = 0; - - if (is_anon) - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); folio_clear_has_hwpoisoned(folio); @@ -3545,17 +3539,13 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, * folio is split to new_order directly. */ for (split_order = start_order; - split_order >= new_order && !stop_split; + split_order >= new_order; split_order--) { - struct folio *end_folio = folio_next(folio); - int old_order = folio_order(folio); - struct folio *new_folio; + int nr_new_folios = 1UL << (old_order - split_order); /* order-1 anonymous folio is not supported */ if (is_anon && split_order == 1) continue; - if (uniform_split && split_order != new_order) - continue; if (mapping) { /* @@ -3568,49 +3558,31 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, else { xas_set_order(xas, folio->index, split_order); xas_try_split(xas, folio, old_order); - if (xas_error(xas)) { - ret = xas_error(xas); - stop_split = true; - } + if (xas_error(xas)) + return xas_error(xas); } } - if (!stop_split) { - folio_split_memcg_refs(folio, old_order, split_order); - split_page_owner(&folio->page, old_order, split_order); - pgalloc_tag_split(folio, old_order, split_order); + folio_split_memcg_refs(folio, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); + __split_folio_to_order(folio, old_order, split_order); - __split_folio_to_order(folio, old_order, split_order); + if (is_anon) { + mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1); + mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios); } /* - * Iterate through after-split folios and update folio stats. - * But in buddy allocator like split, the folio - * containing the specified page is skipped until its order - * is new_order, since the folio will be worked on in next - * iteration. + * For uniform split, we have finished the job. + * For non-uniform split, we assign folio to the one the one + * containing @split_at and assign @old_order to @split_order. */ - for (new_folio = folio; new_folio != end_folio; new_folio = next) { - next = folio_next(new_folio); - /* - * for buddy allocator like split, new_folio containing - * @split_at page could be split again, thus do not - * change stats yet. Wait until new_folio's order is - * @new_order or stop_split is set to true by the above - * xas_split() failure. - */ - if (new_folio == page_folio(split_at)) { - folio = new_folio; - if (split_order != new_order && !stop_split) - continue; - } - if (is_anon) - mod_mthp_stat(folio_order(new_folio), - MTHP_STAT_NR_ANON, 1); - } + folio = page_folio(split_at); + old_order = split_order; } - return ret; + return 0; } bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, -- 2.34.1