2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

11 hotfixes. 9 are cc:stable and the remainder address post-6.15 issues

or aren't considered necessary for -stable kernels.
 
 7 are for MM.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaILYBgAKCRDdBJ7gKXxA
 jo0uAQDvTlAjH6TcgRW/cbqHRIeiRoZ9Bwh/RUlJXM9neDR2LgEA41B+ohTsxUmZ
 OhM3Ce94tiGrHnVlW3SsmVaO+1TjGAU=
 =KUR9
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-07-24-18-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "11 hotfixes. 9 are cc:stable and the remainder address post-6.15
  issues or aren't considered necessary for -stable kernels.

  7 are for MM"

* tag 'mm-hotfixes-stable-2025-07-24-18-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  sprintf.h requires stdarg.h
  resource: fix false warning in __request_region()
  mm/damon/core: commit damos_quota_goal->nid
  kasan: use vmalloc_dump_obj() for vmalloc error reports
  mm/ksm: fix -Wsometimes-uninitialized from clang-21 in advisor_mode_show()
  mm: update MAINTAINERS entry for HMM
  nilfs2: reject invalid file types when reading inodes
  selftests/mm: fix split_huge_page_test for folio_split() tests
  mailmap: add entry for Senozhatsky
  mm/zsmalloc: do not pass __GFP_MOVABLE if CONFIG_COMPACTION=n
  mm/vmscan: fix hwpoisoned large folio handling in shrink_folio_list
This commit is contained in:
Linus Torvalds 2025-07-24 19:13:30 -07:00
commit 2942242dde
13 changed files with 60 additions and 9 deletions

View File

@ -694,6 +694,10 @@ Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org> Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com> Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com> Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
Sergey Senozhatsky <senozhatsky@chromium.org> <sergey.senozhatsky.work@gmail.com>
Sergey Senozhatsky <senozhatsky@chromium.org> <sergey.senozhatsky@gmail.com>
Sergey Senozhatsky <senozhatsky@chromium.org> <sergey.senozhatsky@mail.by>
Sergey Senozhatsky <senozhatsky@chromium.org> <senozhatsky@google.com>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com> Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com> Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
Shannon Nelson <sln@onemain.com> <shannon.nelson@amd.com> Shannon Nelson <sln@onemain.com> <shannon.nelson@amd.com>

View File

@ -1397,6 +1397,10 @@ N: Thomas Gleixner
E: tglx@linutronix.de E: tglx@linutronix.de
D: NAND flash hardware support, JFFS2 on NAND flash D: NAND flash hardware support, JFFS2 on NAND flash
N: Jérôme Glisse
E: jglisse@redhat.com
D: HMM - Heterogeneous Memory Management
N: Richard E. Gooch N: Richard E. Gooch
E: rgooch@atnf.csiro.au E: rgooch@atnf.csiro.au
D: parent process death signal to children D: parent process death signal to children

View File

@ -11009,7 +11009,8 @@ F: Documentation/ABI/testing/debugfs-hisi-zip
F: drivers/crypto/hisilicon/zip/ F: drivers/crypto/hisilicon/zip/
HMM - Heterogeneous Memory Management HMM - Heterogeneous Memory Management
M: Jérôme Glisse <jglisse@redhat.com> M: Jason Gunthorpe <jgg@nvidia.com>
M: Leon Romanovsky <leonro@nvidia.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: Documentation/mm/hmm.rst F: Documentation/mm/hmm.rst

View File

@ -472,11 +472,18 @@ static int __nilfs_read_inode(struct super_block *sb,
inode->i_op = &nilfs_symlink_inode_operations; inode->i_op = &nilfs_symlink_inode_operations;
inode_nohighmem(inode); inode_nohighmem(inode);
inode->i_mapping->a_ops = &nilfs_aops; inode->i_mapping->a_ops = &nilfs_aops;
} else { } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &nilfs_special_inode_operations; inode->i_op = &nilfs_special_inode_operations;
init_special_inode( init_special_inode(
inode, inode->i_mode, inode, inode->i_mode,
huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
} else {
nilfs_error(sb,
"invalid file type bits in mode 0%o for inode %lu",
inode->i_mode, ino);
err = -EIO;
goto failed_unmap;
} }
nilfs_ifile_unmap_inode(raw_inode); nilfs_ifile_unmap_inode(raw_inode);
brelse(bh); brelse(bh);

View File

@ -4,6 +4,7 @@
#include <linux/compiler_attributes.h> #include <linux/compiler_attributes.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/stdarg.h>
int num_to_str(char *buf, int size, unsigned long long num, unsigned int width); int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);

View File

@ -1279,8 +1279,9 @@ static int __request_region_locked(struct resource *res, struct resource *parent
* become unavailable to other users. Conflicts are * become unavailable to other users. Conflicts are
* not expected. Warn to aid debugging if encountered. * not expected. Warn to aid debugging if encountered.
*/ */
if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { if (parent == &iomem_resource &&
pr_warn("Unaddressable device %s %pR conflicts with %pR", conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
pr_warn("Unaddressable device %s %pR conflicts with %pR\n",
conflict->name, conflict, res); conflict->name, conflict, res);
} }
if (conflict != parent) { if (conflict != parent) {

View File

@ -754,6 +754,19 @@ static struct damos_quota_goal *damos_nth_quota_goal(
return NULL; return NULL;
} }
static void damos_commit_quota_goal_union(
struct damos_quota_goal *dst, struct damos_quota_goal *src)
{
switch (dst->metric) {
case DAMOS_QUOTA_NODE_MEM_USED_BP:
case DAMOS_QUOTA_NODE_MEM_FREE_BP:
dst->nid = src->nid;
break;
default:
break;
}
}
static void damos_commit_quota_goal( static void damos_commit_quota_goal(
struct damos_quota_goal *dst, struct damos_quota_goal *src) struct damos_quota_goal *dst, struct damos_quota_goal *src)
{ {
@ -762,6 +775,7 @@ static void damos_commit_quota_goal(
if (dst->metric == DAMOS_QUOTA_USER_INPUT) if (dst->metric == DAMOS_QUOTA_USER_INPUT)
dst->current_value = src->current_value; dst->current_value = src->current_value;
/* keep last_psi_total as is, since it will be updated in next cycle */ /* keep last_psi_total as is, since it will be updated in next cycle */
damos_commit_quota_goal_union(dst, src);
} }
/** /**
@ -795,6 +809,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
src_goal->metric, src_goal->target_value); src_goal->metric, src_goal->target_value);
if (!new_goal) if (!new_goal)
return -ENOMEM; return -ENOMEM;
damos_commit_quota_goal_union(new_goal, src_goal);
damos_add_quota_goal(dst, new_goal); damos_add_quota_goal(dst, new_goal);
} }
return 0; return 0;

View File

@ -399,7 +399,9 @@ static void print_address_description(void *addr, u8 tag,
} }
if (is_vmalloc_addr(addr)) { if (is_vmalloc_addr(addr)) {
pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); pr_err("The buggy address belongs to a");
if (!vmalloc_dump_obj(addr))
pr_cont(" vmalloc virtual mapping\n");
page = vmalloc_to_page(addr); page = vmalloc_to_page(addr);
} }

View File

@ -3669,10 +3669,10 @@ static ssize_t advisor_mode_show(struct kobject *kobj,
{ {
const char *output; const char *output;
if (ksm_advisor == KSM_ADVISOR_NONE) if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
output = "[none] scan-time";
else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
output = "none [scan-time]"; output = "none [scan-time]";
else
output = "[none] scan-time";
return sysfs_emit(buf, "%s\n", output); return sysfs_emit(buf, "%s\n", output);
} }

View File

@ -1561,6 +1561,10 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret; return ret;
} }
/*
* The caller must guarantee the folio isn't large folio, except hugetlb.
* try_to_unmap() can't handle it.
*/
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{ {
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;

View File

@ -1138,6 +1138,14 @@ retry:
goto keep; goto keep;
if (folio_contain_hwpoisoned_page(folio)) { if (folio_contain_hwpoisoned_page(folio)) {
/*
* unmap_poisoned_folio() can't handle large
* folio, just skip it. memory_failure() will
* handle it if the UCE is triggered again.
*/
if (folio_test_large(folio))
goto keep_locked;
unmap_poisoned_folio(folio, folio_pfn(folio), false); unmap_poisoned_folio(folio, folio_pfn(folio), false);
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);

View File

@ -1043,6 +1043,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
if (!zspage) if (!zspage)
return NULL; return NULL;
if (!IS_ENABLED(CONFIG_COMPACTION))
gfp &= ~__GFP_MOVABLE;
zspage->magic = ZSPAGE_MAGIC; zspage->magic = ZSPAGE_MAGIC;
zspage->pool = pool; zspage->pool = pool;
zspage->class = class->index; zspage->class = class->index;

View File

@ -31,6 +31,7 @@ uint64_t pmd_pagesize;
#define INPUT_MAX 80 #define INPUT_MAX 80
#define PID_FMT "%d,0x%lx,0x%lx,%d" #define PID_FMT "%d,0x%lx,0x%lx,%d"
#define PID_FMT_OFFSET "%d,0x%lx,0x%lx,%d,%d"
#define PATH_FMT "%s,0x%lx,0x%lx,%d" #define PATH_FMT "%s,0x%lx,0x%lx,%d"
#define PFN_MASK ((1UL<<55)-1) #define PFN_MASK ((1UL<<55)-1)
@ -483,7 +484,7 @@ void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
write_debugfs(PID_FMT, getpid(), (uint64_t)addr, write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
(uint64_t)addr + fd_size, order); (uint64_t)addr + fd_size, order);
else else
write_debugfs(PID_FMT, getpid(), (uint64_t)addr, write_debugfs(PID_FMT_OFFSET, getpid(), (uint64_t)addr,
(uint64_t)addr + fd_size, order, offset); (uint64_t)addr + fd_size, order, offset);
for (i = 0; i < fd_size; i++) for (i = 0; i < fd_size; i++)