Move the NUMA_NO_NODE check out of buddy and gigantic folio allocation to cleanup code a bit, also this will avoid NUMA_NO_NODE passed as 'nid' to node_isset() in alloc_buddy_hugetlb_folio(). Reviewed-by: Sidhartha Kumar Reviewed-by: Jane Chu Signed-off-by: Kefeng Wang --- mm/hugetlb.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ab748964d219..4e8709d7deee 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1479,8 +1479,6 @@ static struct folio *alloc_gigantic_folio(int order, gfp_t gfp_mask, struct folio *folio; bool retried = false; - if (nid == NUMA_NO_NODE) - nid = numa_mem_id(); retry: folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask); if (!folio) { @@ -1942,8 +1940,6 @@ static struct folio *alloc_buddy_hugetlb_folio(int order, gfp_t gfp_mask, alloc_try_hard = false; if (alloc_try_hard) gfp_mask |= __GFP_RETRY_MAYFAIL; - if (nid == NUMA_NO_NODE) - nid = numa_mem_id(); folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask); @@ -1979,6 +1975,9 @@ static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h, struct folio *folio; int order = huge_page_order(h); + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); + if (order > MAX_PAGE_ORDER) folio = alloc_gigantic_folio(order, gfp_mask, nid, nmask); else -- 2.27.0