Merge tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "6 hotfixes.  4 are cc:stable.  3 are for MM.

  All are singletons - please see the changelogs for details"

* tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  MAINTAINERS: update email address for Ignat Korchagin
  mm/huge_memory: fix early failure try_to_migrate() when split huge pmd for shared THP
  mm/rmap: fix incorrect pte restoration for lazyfree folios
  mm/huge_memory: fix use of NULL folio in move_pages_huge_pmd()
  build_bug.h: correct function parameters names in kernel-doc
  crash_dump: don't log dm-crypt key bytes in read_key_from_user_keying
This commit is contained in:
Linus Torvalds
2026-03-16 12:21:00 -07:00
6 changed files with 29 additions and 12 deletions

View File

@@ -327,6 +327,7 @@ Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
Ignat Korchagin <ignat@linux.win> <ignat@cloudflare.com>
Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>

View File

@@ -4022,7 +4022,7 @@ F: drivers/hwmon/asus_wmi_sensors.c
ASYMMETRIC KEYS
M: David Howells <dhowells@redhat.com>
M: Lukas Wunner <lukas@wunner.de>
M: Ignat Korchagin <ignat@cloudflare.com>
M: Ignat Korchagin <ignat@linux.win>
L: keyrings@vger.kernel.org
L: linux-crypto@vger.kernel.org
S: Maintained
@@ -4035,7 +4035,7 @@ F: include/linux/verification.h
ASYMMETRIC KEYS - ECDSA
M: Lukas Wunner <lukas@wunner.de>
M: Ignat Korchagin <ignat@cloudflare.com>
M: Ignat Korchagin <ignat@linux.win>
R: Stefan Berger <stefanb@linux.ibm.com>
L: linux-crypto@vger.kernel.org
S: Maintained
@@ -4045,14 +4045,14 @@ F: include/crypto/ecc*
ASYMMETRIC KEYS - GOST
M: Lukas Wunner <lukas@wunner.de>
M: Ignat Korchagin <ignat@cloudflare.com>
M: Ignat Korchagin <ignat@linux.win>
L: linux-crypto@vger.kernel.org
S: Odd fixes
F: crypto/ecrdsa*
ASYMMETRIC KEYS - RSA
M: Lukas Wunner <lukas@wunner.de>
M: Ignat Korchagin <ignat@cloudflare.com>
M: Ignat Korchagin <ignat@linux.win>
L: linux-crypto@vger.kernel.org
S: Maintained
F: crypto/rsa*

View File

@@ -32,7 +32,8 @@
/**
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
* error message.
* @condition: the condition which the compiler should know is false.
* @cond: the condition which the compiler should know is false.
* @msg: build-time error message
*
* See BUILD_BUG_ON for description.
*/
@@ -60,6 +61,7 @@
/**
* static_assert - check integer constant expression at build time
* @expr: expression to be checked
*
* static_assert() is a wrapper for the C11 _Static_assert, with a
* little macro magic to make the message optional (defaulting to the

View File

@@ -168,8 +168,8 @@ static int read_key_from_user_keying(struct dm_crypt_key *dm_key)
memcpy(dm_key->data, ukp->data, ukp->datalen);
dm_key->key_size = ukp->datalen;
kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size,
dm_key->key_desc, dm_key->data);
kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size,
dm_key->key_desc);
out:
up_read(&key->sem);

View File

@@ -2797,7 +2797,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
} else {
src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
_dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
_dst_pmd = move_soft_dirty_pmd(src_pmdval);
_dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
}
set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);

View File

@@ -1955,7 +1955,14 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
if (userfaultfd_wp(vma))
return 1;
return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
/*
* If unmap fails, we need to restore the ptes. To avoid accidentally
* upgrading write permissions for ptes that were not originally
* writable, and to avoid losing the soft-dirty bit, use the
* appropriate FPB flags.
*/
return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr,
FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY);
}
/*
@@ -2443,11 +2450,17 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
__maybe_unused pmd_t pmdval;
if (flags & TTU_SPLIT_HUGE_PMD) {
/*
* split_huge_pmd_locked() might leave the
* folio mapped through PTEs. Retry the walk
* so we can detect this scenario and properly
* abort the walk.
*/
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
flags &= ~TTU_SPLIT_HUGE_PMD;
page_vma_mapped_walk_restart(&pvmw);
continue;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmdval = pmdp_get(pvmw.pmd);