In order to support frozen page allocation in the following changes, adding the alloc flags for __cma_alloc(). Signed-off-by: Kefeng Wang --- mm/cma.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index e56ec64d0567..3f3c96be67f7 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -778,7 +778,8 @@ static void cma_debug_show_areas(struct cma *cma) static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, unsigned long count, unsigned int align, - struct page **pagep, gfp_t gfp) + struct page **pagep, gfp_t gfp, + acr_flags_t alloc_flags) { unsigned long mask, offset; unsigned long pfn = -1; @@ -823,7 +824,7 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); mutex_lock(&cma->alloc_mutex); - ret = alloc_contig_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp); + ret = alloc_contig_range(pfn, pfn + count, alloc_flags, gfp); mutex_unlock(&cma->alloc_mutex); if (ret == 0) { page = pfn_to_page(pfn); @@ -848,7 +849,7 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, } static struct page *__cma_alloc(struct cma *cma, unsigned long count, - unsigned int align, gfp_t gfp) + unsigned int align, gfp_t gfp, acr_flags_t alloc_flags) { struct page *page = NULL; int ret = -ENOMEM, r; @@ -870,7 +871,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, page = NULL; ret = cma_range_alloc(cma, &cma->ranges[r], count, align, - &page, gfp); + &page, gfp, alloc_flags); if (ret != -EBUSY || page) break; } @@ -918,7 +919,9 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn) { - return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); + return __cma_alloc(cma, count, align, + GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0), + ACR_FLAGS_CMA); } struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) @@ -928,7 +931,7 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) if (WARN_ON(!order || !(gfp & __GFP_COMP))) return NULL; - page = __cma_alloc(cma, 1 << order, order, gfp); + page = __cma_alloc(cma, 1 << order, order, gfp, ACR_FLAGS_CMA); return page ? page_folio(page) : NULL; } -- 2.27.0