In order to avoid the wrong nid passed into the account, it's better to move folio_nid() into account_new_hugetlb_folio(). Reviewed-by: Sidhartha Kumar Signed-off-by: Kefeng Wang --- mm/hugetlb.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 93aa32c51265..4131467fc1cd 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1890,11 +1890,11 @@ void free_huge_folio(struct folio *folio) /* * Must be called with the hugetlb lock held */ -static void __prep_account_new_huge_page(struct hstate *h, int nid) +static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio) { lockdep_assert_held(&hugetlb_lock); h->nr_huge_pages++; - h->nr_huge_pages_node[nid]++; + h->nr_huge_pages_node[folio_nid(folio)]++; } static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) @@ -2020,7 +2020,7 @@ static void prep_and_add_allocated_folios(struct hstate *h, /* Add all new pool pages to free lists in one lock cycle */ spin_lock_irqsave(&hugetlb_lock, flags); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { - __prep_account_new_huge_page(h, folio_nid(folio)); + account_new_hugetlb_folio(h, folio); enqueue_hugetlb_folio(h, folio); } spin_unlock_irqrestore(&hugetlb_lock, flags); @@ -2232,7 +2232,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, * as surplus_pages, otherwise it might confuse * persistent_huge_pages() momentarily. */ - __prep_account_new_huge_page(h, folio_nid(folio)); + account_new_hugetlb_folio(h, folio); /* * We could have raced with the pool size change. @@ -2270,7 +2270,7 @@ static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mas return NULL; spin_lock_irq(&hugetlb_lock); - __prep_account_new_huge_page(h, folio_nid(folio)); + account_new_hugetlb_folio(h, folio); spin_unlock_irq(&hugetlb_lock); /* fresh huge pages are frozen */ @@ -2829,7 +2829,7 @@ static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio, /* * Ok, old_folio is still a genuine free hugepage. Remove it from * the freelist and decrease the counters. These will be - * incremented again when calling __prep_account_new_huge_page() + * incremented again when calling account_new_hugetlb_folio() * and enqueue_hugetlb_folio() for new_folio. The counters will * remain stable since this happens under the lock. */ @@ -2839,7 +2839,7 @@ static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio, * Ref count on new_folio is already zero as it was dropped * earlier. It can be directly added to the pool free list. */ - __prep_account_new_huge_page(h, nid); + account_new_hugetlb_folio(h, new_folio); enqueue_hugetlb_folio(h, new_folio); /* @@ -3309,7 +3309,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, hugetlb_bootmem_init_migratetype(folio, h); /* Subdivide locks to achieve better parallel performance */ spin_lock_irqsave(&hugetlb_lock, flags); - __prep_account_new_huge_page(h, folio_nid(folio)); + account_new_hugetlb_folio(h, folio); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } -- 2.27.0