From: Pratyush Brahma Replace the manual bitwise conversion of bytes to MB with SZ_1M macro, a standard macro used within the mm subsystem, to improve readability. Signed-off-by: Pratyush Brahma --- Changes in v2: - Replaced the implementations in numa_emulation.c and memblock.c as well as per Mike's recommendation. - Link to v1: https://lore.kernel.org/r/20250819-numa-memblks-refac-v1-1-936b4fd35000@oss.qualcomm.com --- mm/memblock.c | 4 ++-- mm/numa_emulation.c | 4 ++-- mm/numa_memblks.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/memblock.c b/mm/memblock.c index 154f1d73b61f2234efe61c5cce5105be160d0041..8a0ed3074af4b4dacb87e45f3fecaeb6b3222fcf 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt } if ((nr_pages << PAGE_SHIFT) > threshold_bytes) { - mem_size_mb = memblock_phys_mem_size() >> 20; + mem_size_mb = memblock_phys_mem_size() / SZ_1M; pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n", - (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb); + (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb); return false; } diff --git a/mm/numa_emulation.c b/mm/numa_emulation.c index 9d55679d99ceea9807d41840cf097eb449afaf8e..703c8fa05048019317bc2a011d7b928884ddc934 100644 --- a/mm/numa_emulation.c +++ b/mm/numa_emulation.c @@ -73,7 +73,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei, } printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n", - nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); + nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M); return 0; } @@ -264,7 +264,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei, min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE); if (size < min_size) { pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", - size >> 20, min_size >> 20); + size / SZ_1M, min_size / SZ_1M); size = min_size; } size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE); diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c index 541a99c4071a67e5b0ef66f4136dee268a880003..a47aa262a33366337c38ccc7c7064da818523dd2 100644 --- a/mm/numa_memblks.c +++ b/mm/numa_memblks.c @@ -427,9 +427,9 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi) unsigned long pfn_align = node_map_pfn_alignment(); if (pfn_align && pfn_align < PAGES_PER_SECTION) { - unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20; + unsigned long node_align_mb = PFN_PHYS(pfn_align) / SZ_1M; - unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20; + unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M; pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n", node_align_mb, sect_align_mb); --- base-commit: c17b750b3ad9f45f2b6f7e6f7f4679844244f0b9 change-id: 20250819-numa-memblks-refac-7b4b5017b598 Best regards, -- Pratyush Brahma