Merge tag 'mm-hotfixes-stable-2026-03-09-16-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "15 hotfixes. 6 are cc:stable. 14 are for MM.

  Singletons, with one doubleton - please see the changelogs for details"

* tag 'mm-hotfixes-stable-2026-03-09-16-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  MAINTAINERS, mailmap: update email address for Lorenzo Stoakes
  mm/mmu_notifier: clean up mmu_notifier.h kernel-doc
  uaccess: correct kernel-doc parameter format
  mm/huge_memory: fix a folio_split() race condition with folio_try_get()
  MAINTAINERS: add co-maintainer and reviewer for SLAB ALLOCATOR
  MAINTAINERS: add RELAY entry
  memcg: fix slab accounting in refill_obj_stock() trylock path
  mm/hugetlb.c: use __pa() instead of virt_to_phys() in early bootmem alloc code
  zram: rename writeback_compressed device attr
  tools/testing: fix testing/vma and testing/radix-tree build
  Revert "ptdesc: remove references to folios from __pagetable_ctor() and pagetable_dtor()"
  mm/cma: move put_page_testzero() out of VM_WARN_ON in cma_release()
  mm/damon/core: clear walk_control on inactive context in damos_walk()
  mm: memfd_luo: always dirty all folios
  mm: memfd_luo: always make all folios uptodate
This commit is contained in:
Linus Torvalds
2026-03-10 12:47:56 -07:00
18 changed files with 163 additions and 73 deletions

View File

@@ -1013,6 +1013,7 @@ bool cma_release(struct cma *cma, const struct page *pages,
unsigned long count)
{
struct cma_memrange *cmr;
unsigned long ret = 0;
unsigned long i, pfn;
cmr = find_cma_memrange(cma, pages, count);
@@ -1021,7 +1022,9 @@ bool cma_release(struct cma *cma, const struct page *pages,
pfn = page_to_pfn(pages);
for (i = 0; i < count; i++, pfn++)
VM_WARN_ON(!put_page_testzero(pfn_to_page(pfn)));
ret += !put_page_testzero(pfn_to_page(pfn));
WARN(ret, "%lu pages are still in use!\n", ret);
__cma_release_frozen(cma, cmr, pages, count);

View File

@@ -1562,8 +1562,13 @@ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
}
ctx->walk_control = control;
mutex_unlock(&ctx->walk_control_lock);
if (!damon_is_running(ctx))
if (!damon_is_running(ctx)) {
mutex_lock(&ctx->walk_control_lock);
if (ctx->walk_control == control)
ctx->walk_control = NULL;
mutex_unlock(&ctx->walk_control_lock);
return -EINVAL;
}
wait_for_completion(&control->completion);
if (control->canceled)
return -ECANCELED;

View File

@@ -3631,6 +3631,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
const bool is_anon = folio_test_anon(folio);
int old_order = folio_order(folio);
int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
struct folio *old_folio = folio;
int split_order;
/*
@@ -3651,12 +3652,16 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
* uniform split has xas_split_alloc() called before
* irq is disabled to allocate enough memory, whereas
* non-uniform split can handle ENOMEM.
* Use the to-be-split folio, so that a parallel
* folio_try_get() waits on it until xarray is updated
* with after-split folios and the original one is
* unfrozen.
*/
if (split_type == SPLIT_TYPE_UNIFORM)
xas_split(xas, folio, old_order);
else {
if (split_type == SPLIT_TYPE_UNIFORM) {
xas_split(xas, old_folio, old_order);
} else {
xas_set_order(xas, folio->index, split_order);
xas_try_split(xas, folio, old_order);
xas_try_split(xas, old_folio, old_order);
if (xas_error(xas))
return xas_error(xas);
}

View File

@@ -3101,7 +3101,7 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
* extract the actual node first.
*/
if (m)
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
listnode = early_pfn_to_nid(PHYS_PFN(__pa(m)));
}
if (m) {
@@ -3160,7 +3160,7 @@ found:
* The head struct page is used to get folio information by the HugeTLB
* subsystem like zone id and node id.
*/
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
memblock_reserved_mark_noinit(__pa((void *)m + PAGE_SIZE),
huge_page_size(h) - PAGE_SIZE);
return 1;

View File

@@ -3086,7 +3086,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
if (!local_trylock(&obj_stock.lock)) {
if (pgdat)
mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
mod_objcg_mlstate(objcg, pgdat, idx, nr_acct);
nr_pages = nr_bytes >> PAGE_SHIFT;
nr_bytes = nr_bytes & (PAGE_SIZE - 1);
atomic_add(nr_bytes, &objcg->nr_charged_bytes);

View File

@@ -146,19 +146,56 @@ static int memfd_luo_preserve_folios(struct file *file,
for (i = 0; i < nr_folios; i++) {
struct memfd_luo_folio_ser *pfolio = &folios_ser[i];
struct folio *folio = folios[i];
unsigned int flags = 0;
err = kho_preserve_folio(folio);
if (err)
goto err_unpreserve;
if (folio_test_dirty(folio))
flags |= MEMFD_LUO_FOLIO_DIRTY;
if (folio_test_uptodate(folio))
flags |= MEMFD_LUO_FOLIO_UPTODATE;
folio_lock(folio);
/*
* A dirty folio is one which has been written to. A clean folio
* is its opposite. Since a clean folio does not carry user
* data, it can be freed by page reclaim under memory pressure.
*
* Saving the dirty flag at prepare() time doesn't work since it
* can change later. Saving it at freeze() also won't work
* because the dirty bit is normally synced at unmap and there
* might still be a mapping of the file at freeze().
*
* To see why this is a problem, say a folio is clean at
* preserve, but gets dirtied later. The pfolio flags will mark
* it as clean. After retrieve, the next kernel might try to
* reclaim this folio under memory pressure, losing user data.
*
* Unconditionally mark it dirty to avoid this problem. This
* comes at the cost of making clean folios un-reclaimable after
* live update.
*/
folio_mark_dirty(folio);
/*
* If the folio is not uptodate, it was fallocated but never
* used. Saving this flag at prepare() doesn't work since it
* might change later when someone uses the folio.
*
* Since we have taken the performance penalty of allocating,
* zeroing, and pinning all the folios in the holes, take a bit
* more and zero all non-uptodate folios too.
*
* NOTE: For someone looking to improve preserve performance,
* this is a good place to look.
*/
if (!folio_test_uptodate(folio)) {
folio_zero_range(folio, 0, folio_size(folio));
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
folio_unlock(folio);
pfolio->pfn = folio_pfn(folio);
pfolio->flags = flags;
pfolio->flags = MEMFD_LUO_FOLIO_DIRTY | MEMFD_LUO_FOLIO_UPTODATE;
pfolio->index = folio->index;
}