Check hugetlb_cma_size which helps to avoid unnecessary gfp check or nodemask traversal. Signed-off-by: Kefeng Wang --- mm/hugetlb_cma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c index 0ddf9755c090..d8fa93825992 100644 --- a/mm/hugetlb_cma.c +++ b/mm/hugetlb_cma.c @@ -16,7 +16,7 @@ static struct cma *hugetlb_cma[MAX_NUMNODES]; static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; static bool hugetlb_cma_only; -static unsigned long hugetlb_cma_size __initdata; +static unsigned long hugetlb_cma_size __ro_after_init; void hugetlb_cma_free_frozen_folio(struct folio *folio) { @@ -31,6 +31,9 @@ struct folio *hugetlb_cma_alloc_frozen_folio(int order, gfp_t gfp_mask, struct folio *folio; struct page *page = NULL; + if (!hugetlb_cma_size) + return NULL; + if (hugetlb_cma[nid]) page = cma_alloc_frozen_compound(hugetlb_cma[nid], order); -- 2.27.0