Merge tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

 - minor fixes for the corner cases of the SWIOTLB pool management
   (Robin Murphy)

* tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
  dma/pool: Avoid allocating redundant pools
  mm_zone: Generalise has_managed_dma()
  dma/pool: Improve pool lookup
This commit is contained in:
Linus Torvalds
2026-01-20 10:16:18 -08:00
3 changed files with 25 additions and 19 deletions

View File

@@ -1648,14 +1648,15 @@ static inline int is_highmem(const struct zone *zone)
return is_highmem_idx(zone_idx(zone));
}
#ifdef CONFIG_ZONE_DMA
bool has_managed_dma(void);
#else
bool has_managed_zone(enum zone_type zone);
static inline bool has_managed_dma(void)
{
#ifdef CONFIG_ZONE_DMA
return has_managed_zone(ZONE_DMA);
#else
return false;
}
#endif
}
#ifndef CONFIG_NUMA

View File

@@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
return pool;
}
#ifdef CONFIG_ZONE_DMA32
#define has_managed_dma32 has_managed_zone(ZONE_DMA32)
#else
#define has_managed_dma32 false
#endif
static int __init dma_atomic_pool_init(void)
{
int ret = 0;
@@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void)
}
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
/* All memory might be in the DMA zone(s) to begin with */
if (has_managed_zone(ZONE_NORMAL)) {
atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL);
if (!atomic_pool_kernel)
ret = -ENOMEM;
if (!atomic_pool_kernel)
ret = -ENOMEM;
}
if (has_managed_dma()) {
atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA);
if (!atomic_pool_dma)
ret = -ENOMEM;
}
if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
if (has_managed_dma32) {
atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA32);
if (!atomic_pool_dma32)
@@ -224,11 +233,11 @@ postcore_initcall(dma_atomic_pool_init);
static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
{
if (prev == NULL) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
return atomic_pool_dma32;
if (atomic_pool_dma && (gfp & GFP_DMA))
return atomic_pool_dma;
return atomic_pool_kernel;
if (gfp & GFP_DMA)
return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
if (gfp & GFP_DMA32)
return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
}
if (prev == atomic_pool_kernel)
return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;

View File

@@ -7457,20 +7457,16 @@ bool put_page_back_buddy(struct page *page)
}
#endif
#ifdef CONFIG_ZONE_DMA
bool has_managed_dma(void)
bool has_managed_zone(enum zone_type zone)
{
struct pglist_data *pgdat;
for_each_online_pgdat(pgdat) {
struct zone *zone = &pgdat->node_zones[ZONE_DMA];
if (managed_zone(zone))
if (managed_zone(&pgdat->node_zones[zone]))
return true;
}
return false;
}
#endif /* CONFIG_ZONE_DMA */
#ifdef CONFIG_UNACCEPTED_MEMORY