mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm/cma: simplify zone intersection check
cma_activate_area walks all pages in the area, checking their zone individually to see if the area resides in more than one zone. Make this a little more efficient by using the recently introduced pfn_range_intersects_zones() function. Store the NUMA node id (if any) in the cma structure to facilitate this. Link: https://lkml.kernel.org/r/20250228182928.2645936-23-fvdl@google.com Signed-off-by: Frank van der Linden <fvdl@google.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
08efe29350
commit
b51d3db91d
13
mm/cma.c
13
mm/cma.c
@ -103,7 +103,6 @@ static void __init cma_activate_area(struct cma *cma)
|
||||
{
|
||||
unsigned long pfn, base_pfn;
|
||||
int allocrange, r;
|
||||
struct zone *zone;
|
||||
struct cma_memrange *cmr;
|
||||
|
||||
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
|
||||
@ -124,12 +123,8 @@ static void __init cma_activate_area(struct cma *cma)
|
||||
* CMA resv range to be in the same zone.
|
||||
*/
|
||||
WARN_ON_ONCE(!pfn_valid(base_pfn));
|
||||
zone = page_zone(pfn_to_page(base_pfn));
|
||||
for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) {
|
||||
WARN_ON_ONCE(!pfn_valid(pfn));
|
||||
if (page_zone(pfn_to_page(pfn)) != zone)
|
||||
goto cleanup;
|
||||
}
|
||||
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
|
||||
goto cleanup;
|
||||
|
||||
for (pfn = base_pfn; pfn < base_pfn + cmr->count;
|
||||
pfn += pageblock_nr_pages)
|
||||
@ -261,6 +256,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
||||
cma->ranges[0].base_pfn = PFN_DOWN(base);
|
||||
cma->ranges[0].count = cma->count;
|
||||
cma->nranges = 1;
|
||||
cma->nid = NUMA_NO_NODE;
|
||||
|
||||
*res_cma = cma;
|
||||
|
||||
@ -497,6 +493,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
|
||||
}
|
||||
|
||||
cma->nranges = nr;
|
||||
cma->nid = nid;
|
||||
*res_cma = cma;
|
||||
|
||||
out:
|
||||
@ -684,6 +681,8 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base,
|
||||
if (ret)
|
||||
memblock_phys_free(base, size);
|
||||
|
||||
(*res_cma)->nid = nid;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user