Atomic bit operations aren't needed for the cma flags field, so switch their manipulation over to normal AND/OR operations. Also export the bit values in linux/cma.h, as we will be adding publicly used values later. No functional change. Signed-off-by: Frank van der Linden --- include/linux/cma.h | 12 ++++++++++++ mm/cma.c | 16 ++++++++-------- mm/cma.h | 7 ------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..5c3fdc5da908 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -20,6 +20,18 @@ #define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) +enum cma_flags { + __CMA_RESERVE_PAGES_ON_ERROR, + __CMA_ZONES_VALID, + __CMA_ZONES_INVALID, + __CMA_ACTIVATED, +}; + +#define CMA_RESERVE_PAGES_ON_ERROR BIT(__CMA_RESERVE_PAGES_ON_ERROR) +#define CMA_ZONES_VALID BIT(__CMA_ZONES_VALID) +#define CMA_ZONES_INVALID BIT(__CMA_ZONES_INVALID) +#define CMA_ACTIVATED BIT(__CMA_ACTIVATED) + struct cma; extern unsigned long totalcma_pages; diff --git a/mm/cma.c b/mm/cma.c index 2ffa4befb99a..549d85b2e3a3 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -111,8 +111,8 @@ bool cma_validate_zones(struct cma *cma) * check has already been done. If neither is set, the * check has not been performed yet. */ - valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); - if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) + valid_bit_set = (cma->flags & CMA_ZONES_VALID); + if (valid_bit_set || (cma->flags & CMA_ZONES_INVALID)) return valid_bit_set; for (r = 0; r < cma->nranges; r++) { @@ -126,12 +126,12 @@ bool cma_validate_zones(struct cma *cma) */ WARN_ON_ONCE(!pfn_valid(base_pfn)); if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { - set_bit(CMA_ZONES_INVALID, &cma->flags); + cma->flags |= CMA_ZONES_INVALID; return false; } } - set_bit(CMA_ZONES_VALID, &cma->flags); + cma->flags |= CMA_ZONES_VALID; return true; } @@ -176,7 +176,7 @@ static void __init cma_activate_area(struct cma *cma) INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif - set_bit(CMA_ACTIVATED, &cma->flags); + cma->flags |= CMA_ACTIVATED; return; @@ -185,7 +185,7 @@ static void __init cma_activate_area(struct cma *cma) bitmap_free(cma->ranges[r].bitmap); /* Expose all pages to the buddy, they are useless for CMA. */ - if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { + if (!(cma->flags & CMA_RESERVE_PAGES_ON_ERROR)) { for (r = 0; r < allocrange; r++) { cmr = &cma->ranges[r]; end_pfn = cmr->base_pfn + cmr->count; @@ -211,7 +211,7 @@ core_initcall(cma_init_reserved_areas); void __init cma_reserve_pages_on_error(struct cma *cma) { - set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); + cma->flags |= CMA_RESERVE_PAGES_ON_ERROR; } static int __init cma_new_area(const char *name, phys_addr_t size, @@ -1085,7 +1085,7 @@ void __init *cma_reserve_early(struct cma *cma, unsigned long size) /* * Can only be called early in init. */ - if (test_bit(CMA_ACTIVATED, &cma->flags)) + if (cma->flags & CMA_ACTIVATED) return NULL; if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES)) diff --git a/mm/cma.h b/mm/cma.h index c70180c36559..25b696774c6a 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -64,13 +64,6 @@ struct cma { int nid; }; -enum cma_flags { - CMA_RESERVE_PAGES_ON_ERROR, - CMA_ZONES_VALID, - CMA_ZONES_INVALID, - CMA_ACTIVATED, -}; - extern struct cma cma_areas[MAX_CMA_AREAS]; extern unsigned int cma_area_count; -- 2.51.0.384.g4c02a37b29-goog