The functions uniform_split_supported() and non_uniform_split_supported() share significantly similar logic. The only functional difference is that uniform_split_supported() includes an additional check on the requested @new_order. The reason for this check comes from the following two aspects: * some file system or swap cache just supports order-0 folio * the behavioral difference between uniform/non-uniform split The behavioral difference between uniform split and non-uniform: * uniform split splits folio directly to @new_order * non-uniform split creates after-split folios with orders from folio_order(folio) - 1 to new_order. This means for non-uniform split or !new_order split we should check the file system and swap cache respectively. This commit unifies the logic and merge the two functions into a single combined helper, removing redundant code and simplifying the split support checking mechanism. Signed-off-by: Wei Yang Cc: Zi Yan Cc: "David Hildenbrand (Red Hat)" --- v3: * adjust to use split_type * rebase on Zi Yan fix lkml.kernel.org/r/20251105162910.752266-1-ziy@nvidia.com v2: * remove need_check * update comment * add more explanation in change log --- include/linux/huge_mm.h | 8 ++--- mm/huge_memory.c | 71 +++++++++++++++++------------------------ 2 files changed, 33 insertions(+), 46 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9e96dbe2f246..6f9e711b0954 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -374,10 +374,8 @@ int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list unsigned int new_order, bool unmapped); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); +bool folio_split_supported(struct folio *folio, unsigned int new_order, + enum split_type split_type, bool warns); int folio_split(struct folio *folio, unsigned int new_order, struct page *page, struct list_head *list); @@ -408,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o static inline int try_folio_split_to_order(struct folio *folio, struct page *page, unsigned int new_order) { - if (!non_uniform_split_supported(folio, new_order, /* warns= */ false)) + if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false)) return split_huge_page_to_order(&folio->page, new_order); return folio_split(folio, new_order, page, NULL); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 659532199233..c676f2ab0611 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3686,8 +3686,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return 0; } -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) +bool folio_split_supported(struct folio *folio, unsigned int new_order, + enum split_type split_type, bool warns) { if (folio_test_anon(folio)) { /* order-1 is not supported for anonymous THP. */ @@ -3695,48 +3695,41 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, "Cannot split to order-1 folio"); if (new_order == 1) return false; - } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - !mapping_large_folio_support(folio->mapping)) { - /* - * No split if the file system does not support large folio. - * Note that we might still have THPs in such mappings due to - * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping - * does not actually support large folios properly. - */ - VM_WARN_ONCE(warns, - "Cannot split file folio to non-0 order"); - return false; - } - - /* Only swapping a whole PMD-mapped folio is supported */ - if (folio_test_swapcache(folio)) { - VM_WARN_ONCE(warns, - "Cannot split swapcache folio to non-0 order"); - return false; - } - - return true; -} - -/* See comments in non_uniform_split_supported() */ -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns) -{ - if (folio_test_anon(folio)) { - VM_WARN_ONCE(warns && new_order == 1, - "Cannot split to order-1 folio"); - if (new_order == 1) - return false; - } else if (new_order) { + } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) { if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !mapping_large_folio_support(folio->mapping)) { + /* + * We can always split a folio down to a single page + * (new_order == 0) uniformly. + * + * For any other scenario + * a) uniform split targeting a large folio + * (new_order > 0) + * b) any non-uniform split + * we must confirm that the file system supports large + * folios. + * + * Note that we might still have THPs in such + * mappings, which is created from khugepaged when + * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that + * case, the mapping does not actually support large + * folios properly. + */ VM_WARN_ONCE(warns, "Cannot split file folio to non-0 order"); return false; } } - if (new_order && folio_test_swapcache(folio)) { + /* + * swapcache folio could only be split to order 0 + * + * non-uniform split creates after-split folios with orders from + * folio_order(folio) - 1 to new_order, making it not suitable for any + * swapcache folio split. Only uniform split to order-0 can be used + * here. + */ + if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) { VM_WARN_ONCE(warns, "Cannot split swapcache folio to non-0 order"); return false; @@ -3794,11 +3787,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (new_order >= old_order) return -EINVAL; - if (split_type == SPLIT_TYPE_UNIFORM && !uniform_split_supported(folio, new_order, true)) - return -EINVAL; - - if (split_type == SPLIT_TYPE_NON_UNIFORM && - !non_uniform_split_supported(folio, new_order, true)) + if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true)) return -EINVAL; is_hzp = is_huge_zero_folio(folio); -- 2.34.1