No need to replace free hugepage folios if no free hugetlb folios, we don't replace gigantic folio, so use isolate_or_dissolve_huge_folio(), also skip some pfn iterations for compound pages such as THP and non-compound high order buddy to save time. A simple test on machine with 116G free memory, allocate 120 * 1G HugeTLB folios(107 successfully returned), time echo 120 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages Before: 0m0.602s After: 0m0.429s Signed-off-by: Kefeng Wang --- mm/hugetlb.c | 49 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1806685ea326..bc88b659a88b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2890,26 +2890,51 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list) */ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) { - struct folio *folio; - int ret = 0; + unsigned long nr = 0; + struct page *page; + struct hstate *h; + LIST_HEAD(list); + + /* Avoid pfn iterations if no free non-gigantic huge pages */ + for_each_hstate(h) { + if (!hstate_is_gigantic(h)) + nr += h->free_huge_pages; + } - LIST_HEAD(isolate_list); + if (!nr) + return 0; while (start_pfn < end_pfn) { - folio = pfn_folio(start_pfn); + page = pfn_to_page(start_pfn); + nr = 1; - /* Not to disrupt normal path by vainly holding hugetlb_lock */ - if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) { - ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list); - if (ret) - break; + if (PageHuge(page) || PageCompound(page)) { + struct folio *folio = page_folio(page); + + nr = 1UL << compound_order(page); - putback_movable_pages(&isolate_list); + if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) { + if (isolate_or_dissolve_huge_folio(folio, &list)) + return -ENOMEM; + + putback_movable_pages(&list); + } + } else if (PageBuddy(page)) { + /* + * Buddy order check without zone lock is unsafe and + * the order is maybe invalid, but race should be + * small, and the worst thing is skipping free hugetlb. + */ + const unsigned int order = buddy_order_unsafe(page); + + if (order <= MAX_PAGE_ORDER) + nr = 1UL << order; } - start_pfn++; + + start_pfn += nr; } - return ret; + return 0; } void wait_for_freed_hugetlb_folios(void) -- 2.27.0