Kill cma_pages_valid() which only used in cma_release(), also cleanup code duplication between cma pages valid checking and cma memrange finding, add __cma_release() helper to prepare for the upcoming frozen page release. Reviewed-by: Jane Chu Signed-off-by: Kefeng Wang --- include/linux/cma.h | 1 - mm/cma.c | 57 ++++++++++++--------------------------------- 2 files changed, 15 insertions(+), 43 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..e5745d2aec55 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -49,7 +49,6 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); -extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); diff --git a/mm/cma.c b/mm/cma.c index 813e6dc7b095..2af8c5bc58dd 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -942,34 +942,36 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) return page ? page_folio(page) : NULL; } -bool cma_pages_valid(struct cma *cma, const struct page *pages, - unsigned long count) +static bool __cma_release(struct cma *cma, const struct page *pages, + unsigned long count) { unsigned long pfn, end; int r; struct cma_memrange *cmr; - bool ret; + + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); if (!cma || !pages || count > cma->count) return false; pfn = page_to_pfn(pages); - ret = false; for (r = 0; r < cma->nranges; r++) { cmr = &cma->ranges[r]; end = cmr->base_pfn + cmr->count; - if (pfn >= cmr->base_pfn && pfn < end) { - ret = pfn + count <= end; + if (pfn >= cmr->base_pfn && pfn < end && pfn + count <= end) break; - } } - if (!ret) - pr_debug("%s(page %p, count %lu)\n", - __func__, (void *)pages, count); + if (r == cma->nranges) + return false; - return ret; + free_contig_range(pfn, count); + cma_clear_bitmap(cma, cmr, pfn, count); + cma_sysfs_account_release_pages(cma, count); + trace_cma_release(cma->name, pfn, pages, count); + + return true; } /** @@ -985,36 +987,7 @@ bool cma_pages_valid(struct cma *cma, const struct page *pages, bool cma_release(struct cma *cma, const struct page *pages, unsigned long count) { - struct cma_memrange *cmr; - unsigned long pfn, end_pfn; - int r; - - pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); - - if (!cma_pages_valid(cma, pages, count)) - return false; - - pfn = page_to_pfn(pages); - end_pfn = pfn + count; - - for (r = 0; r < cma->nranges; r++) { - cmr = &cma->ranges[r]; - if (pfn >= cmr->base_pfn && - pfn < (cmr->base_pfn + cmr->count)) { - VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count); - break; - } - } - - if (r == cma->nranges) - return false; - - free_contig_range(pfn, count); - cma_clear_bitmap(cma, cmr, pfn, count); - cma_sysfs_account_release_pages(cma, count); - trace_cma_release(cma->name, pfn, pages, count); - - return true; + return __cma_release(cma, pages, count); } bool cma_free_folio(struct cma *cma, const struct folio *folio) @@ -1022,7 +995,7 @@ bool cma_free_folio(struct cma *cma, const struct folio *folio) if (WARN_ON(!folio_test_large(folio))) return false; - return cma_release(cma, &folio->page, folio_nr_pages(folio)); + return __cma_release(cma, &folio->page, folio_nr_pages(folio)); } int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) -- 2.43.0