From be05f571464404432a0f8fe1c81a86a0862da283 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Sun, 28 Dec 2025 20:39:42 +0200 Subject: [PATCH 1/3] memblock test: include from tools mm.h stub MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit memblock test compilation fails: memblock.c: In function ‘memblock_validate_numa_coverage’: memblock.c:784:58: error: ‘SZ_1M’ undeclared (first use in this function) 784 | mem_size_mb = memblock_phys_mem_size() / SZ_1M; | ^~~~~ The SZ_1M is defined in sizes.h, but it is not included by stub version of mm.h in tools/include/linux. Add include of sizes.h to tools/include/linux/mm.h to fix the compilation of memblock tests. Link: https://patch.msgid.link/20251228183942.3628918-1-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: Anshuman Khandual --- tools/include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h index 677c37e4a18c..028f3faf46e7 100644 --- a/tools/include/linux/mm.h +++ b/tools/include/linux/mm.h @@ -4,6 +4,7 @@ #include #include +#include #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) From 58e3e5265484a1bf39569903630a45a924621aaa Mon Sep 17 00:00:00 2001 From: Shengming Hu Date: Mon, 29 Dec 2025 21:52:27 +0800 Subject: [PATCH 2/3] memblock: drop redundant 'struct page *' argument from memblock_free_pages() memblock_free_pages() currently takes both a struct page * and the corresponding PFN. The page pointer is always derived from the PFN at call sites (pfn_to_page(pfn)), making the parameter redundant and also allowing accidental mismatches between the two arguments. Simplify the interface by removing the struct page * argument and deriving the page locally from the PFN, after the deferred struct page initialization check. This keeps the behavior unchanged while making the helper harder to misuse. Signed-off-by: Shengming Hu Reviewed-by: David Hildenbrand (Red Hat) Link: https://patch.msgid.link/tencent_F741CE6ECC49EE099736685E60C0DBD4A209@qq.com Signed-off-by: Mike Rapoport (Microsoft) --- mm/internal.h | 3 +-- mm/memblock.c | 4 ++-- mm/mm_init.c | 5 +++-- tools/testing/memblock/internal.h | 3 +-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index e430da900430..5f93ee1459d9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -742,8 +742,7 @@ static inline void clear_zone_contiguous(struct zone *zone) extern int __isolate_free_page(struct page *page, unsigned int order); extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); -extern void memblock_free_pages(struct page *page, unsigned long pfn, - unsigned int order); +extern void memblock_free_pages(unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order, enum meminit_context context); diff --git a/mm/memblock.c b/mm/memblock.c index 905d06b16348..6e11f81c4870 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1771,7 +1771,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size) end = PFN_DOWN(base + size); for (; cursor < end; cursor++) { - memblock_free_pages(pfn_to_page(cursor), cursor, 0); + memblock_free_pages(cursor, 0); totalram_pages_inc(); } } @@ -2216,7 +2216,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) while (start + (1UL << order) > end) order--; - memblock_free_pages(pfn_to_page(start), start, order); + memblock_free_pages(start, order); start += (1UL << order); } diff --git a/mm/mm_init.c b/mm/mm_init.c index fc2a6f1e518f..d5b91602ff2a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2480,9 +2480,10 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } -void __init memblock_free_pages(struct page *page, unsigned long pfn, - unsigned int order) +void __init memblock_free_pages(unsigned long pfn, unsigned int order) { + struct page *page = pfn_to_page(pfn); + if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { int nid = early_pfn_to_nid(pfn); diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h index 0ab4b53bb4f3..009b97bbdd22 100644 --- a/tools/testing/memblock/internal.h +++ b/tools/testing/memblock/internal.h @@ -15,8 +15,7 @@ bool mirrored_kernelcore = false; struct page {}; -void memblock_free_pages(struct page *page, unsigned long pfn, - unsigned int order) +void memblock_free_pages(unsigned long pfn, unsigned int order) { } From f56ccc32468ee7885d3a9175e7d2cb608d301521 Mon Sep 17 00:00:00 2001 From: Kevin Lourenco Date: Mon, 29 Dec 2025 17:13:21 +0100 Subject: [PATCH 3/3] mm/memtest: add underflow detection for size calculation The computation: end = start + (size - (start_phys_aligned - start_phys)) / incr could theoretically underflow if size < offset, leading to a massive iteration. Add VM_WARN_ON_ONCE() to detect cases where the region size is smaller than the alignment offset. While this should never happen in practice due to memblock guarantees, the warning helps catch potential bugs in early memory initialization code. Suggested-by: Mike Rapoport Signed-off-by: Kevin Lourenco Link: https://patch.msgid.link/20251229161321.9079-1-klourencodev@gmail.com Signed-off-by: Mike Rapoport (Microsoft) --- mm/memtest.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/memtest.c b/mm/memtest.c index c2c609c39119..520d41534cfa 100644 --- a/mm/memtest.c +++ b/mm/memtest.c @@ -50,6 +50,8 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size start_bad = 0; last_bad = 0; + VM_WARN_ON_ONCE(size < start_phys_aligned - start_phys); + for (p = start; p < end; p++) WRITE_ONCE(*p, pattern);