2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm: numa,memblock: Use SZ_1M macro to denote bytes to MB conversion

Replace the manual bitwise conversion of bytes to MB with
SZ_1M macro, a standard macro used within the mm subsystem,
to improve readability.

Signed-off-by: Pratyush Brahma <pratyush.brahma@oss.qualcomm.com>
Link: https://lore.kernel.org/r/20250820-numa-memblks-refac-v2-1-43bf1af02acd@oss.qualcomm.com
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
This commit is contained in:
Pratyush Brahma 2025-08-20 06:29:34 +05:30 committed by Mike Rapoport (Microsoft)
parent d045c31540
commit 4647c4dead
3 changed files with 6 additions and 6 deletions

View File

@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
} }
if ((nr_pages << PAGE_SHIFT) > threshold_bytes) { if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
mem_size_mb = memblock_phys_mem_size() >> 20; mem_size_mb = memblock_phys_mem_size() / SZ_1M;
pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n", pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
(nr_pages << PAGE_SHIFT) >> 20, mem_size_mb); (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
return false; return false;
} }

View File

@ -73,7 +73,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
} }
printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n", printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M);
return 0; return 0;
} }
@ -264,7 +264,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE); min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) { if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
size >> 20, min_size >> 20); size / SZ_1M, min_size / SZ_1M);
size = min_size; size = min_size;
} }
size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE); size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);

View File

@ -427,9 +427,9 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
unsigned long pfn_align = node_map_pfn_alignment(); unsigned long pfn_align = node_map_pfn_alignment();
if (pfn_align && pfn_align < PAGES_PER_SECTION) { if (pfn_align && pfn_align < PAGES_PER_SECTION) {
unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20; unsigned long node_align_mb = PFN_PHYS(pfn_align) / SZ_1M;
unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20; unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M;
pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n", pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
node_align_mb, sect_align_mb); node_align_mb, sect_align_mb);