The functions uniform_split_supported() and non_uniform_split_supported() share significantly similar logic. The only functional difference is that uniform_split_supported() includes an additional check on the requested @new_order before proceeding with further validation. This commit unifies the logic by introducing a new variable, @need_check, which is conditionally set based on whether a uniform split is requested. This allows us to merge the two functions into a single, combined helper, removing redundant code and simplifying the split support checking mechanism. Signed-off-by: Wei Yang Cc: Zi Yan --- include/linux/huge_mm.h | 8 +++--- mm/huge_memory.c | 55 +++++++++++------------------------------ 2 files changed, 18 insertions(+), 45 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cbb2243f8e56..79343809a7be 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -369,10 +369,8 @@ int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list unsigned int new_order, bool unmapped); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); +bool folio_split_supported(struct folio *folio, unsigned int new_order, + bool uniform_split, bool warns); int folio_split(struct folio *folio, unsigned int new_order, struct page *page, struct list_head *list); @@ -403,7 +401,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o static inline int try_folio_split_to_order(struct folio *folio, struct page *page, unsigned int new_order) { - if (!non_uniform_split_supported(folio, new_order, /* warns= */ false)) + if (!folio_split_supported(folio, new_order, /* uniform_split = */ false, /* warns= */ false)) return split_huge_page_to_order(&folio->page, new_order); return folio_split(folio, new_order, page, NULL); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d1fa0d2d9b44..f6d2cb2a5ca0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3673,55 +3673,34 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return 0; } -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) +bool folio_split_supported(struct folio *folio, unsigned int new_order, + bool uniform_split, bool warns) { - if (folio_test_anon(folio)) { - /* order-1 is not supported for anonymous THP. */ - VM_WARN_ONCE(warns && new_order == 1, - "Cannot split to order-1 folio"); - return new_order != 1; - } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - !mapping_large_folio_support(folio->mapping)) { - /* - * No split if the file system does not support large folio. - * Note that we might still have THPs in such mappings due to - * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping - * does not actually support large folios properly. - */ - VM_WARN_ONCE(warns, - "Cannot split file folio to non-0 order"); - return false; - } - - /* Only swapping a whole PMD-mapped folio is supported */ - if (folio_test_swapcache(folio)) { - VM_WARN_ONCE(warns, - "Cannot split swapcache folio to non-0 order"); - return false; - } + bool need_check = uniform_split ? new_order : true; - return true; -} - -/* See comments in non_uniform_split_supported() */ -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) -{ if (folio_test_anon(folio)) { + /* order-1 is not supported for anonymous THP. */ VM_WARN_ONCE(warns && new_order == 1, "Cannot split to order-1 folio"); return new_order != 1; - } else if (new_order) { + } else if (need_check) { if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !mapping_large_folio_support(folio->mapping)) { + /* + * No split if the file system does not support large + * folio. Note that we might still have THPs in such + * mappings due to CONFIG_READ_ONLY_THP_FOR_FS. But in + * that case, the mapping does not actually support + * large folios properly. + */ VM_WARN_ONCE(warns, "Cannot split file folio to non-0 order"); return false; } } - if (new_order && folio_test_swapcache(folio)) { + /* Only swapping a whole PMD-mapped folio is supported */ + if (need_check && folio_test_swapcache(folio)) { VM_WARN_ONCE(warns, "Cannot split swapcache folio to non-0 order"); return false; @@ -3779,11 +3758,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (new_order >= old_order) return -EINVAL; - if (uniform_split && !uniform_split_supported(folio, new_order, true)) - return -EINVAL; - - if (!uniform_split && - !non_uniform_split_supported(folio, new_order, true)) + if (!folio_split_supported(folio, new_order, uniform_split, /* warn = */ true)) return -EINVAL; is_hzp = is_huge_zero_folio(folio); -- 2.34.1