2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm: pass page directly instead of using folio_page

In commit_anon_folio_batch(), we iterate over all pages pointed to by the
PTE batch.  Therefore we need to know the first page of the batch;
currently we derive that via folio_page(folio, 0), but, that takes us to
the first (head) page of the folio instead - our PTE batch may lie in the
middle of the folio, leading to incorrectness.

Bite the bullet and throw away the micro-optimization of reusing the folio
in favour of code simplicity.  Derive the page and the folio in
change_pte_range, and pass the page too to commit_anon_folio_batch to fix
the aforementioned issue.

Link: https://lkml.kernel.org/r/20250806145611.3962-1-dev.jain@arm.com
Fixes: cac1db8c3a ("mm: optimize mprotect() by PTE batching")
Reported-by: syzbot+57bcc752f0df8bb1365c@syzkaller.appspotmail.com
Signed-off-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Debugged-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Dev Jain 2025-08-06 20:26:11 +05:30 committed by Andrew Morton
parent ab5ac789ef
commit cf1b80dc31

View File

@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
pte_t oldpte, pte_t *pte, int target_node, pte_t oldpte, pte_t *pte, int target_node,
struct folio **foliop) struct folio *folio)
{ {
struct folio *folio = NULL;
bool ret = true; bool ret = true;
bool toptier; bool toptier;
int nid; int nid;
@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
if (pte_protnone(oldpte)) if (pte_protnone(oldpte))
goto skip; goto skip;
folio = vm_normal_folio(vma, addr, oldpte);
if (!folio) if (!folio)
goto skip; goto skip;
@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
skip: skip:
*foliop = folio;
return ret; return ret;
} }
@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
* retrieve sub-batches. * retrieve sub-batches.
*/ */
static void commit_anon_folio_batch(struct vm_area_struct *vma, static void commit_anon_folio_batch(struct vm_area_struct *vma,
struct folio *folio, unsigned long addr, pte_t *ptep, struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{ {
struct page *first_page = folio_page(folio, 0);
bool expected_anon_exclusive; bool expected_anon_exclusive;
int sub_batch_idx = 0; int sub_batch_idx = 0;
int len; int len;
@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
} }
static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
struct folio *folio, unsigned long addr, pte_t *ptep, struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{ {
bool set_write; bool set_write;
@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
/* idx = */ 0, set_write, tlb); /* idx = */ 0, set_write, tlb);
return; return;
} }
commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb); commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
} }
static long change_pte_range(struct mmu_gather *tlb, static long change_pte_range(struct mmu_gather *tlb,
@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb,
const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE; const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
int max_nr_ptes = (end - addr) >> PAGE_SHIFT; int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
struct folio *folio = NULL; struct folio *folio = NULL;
struct page *page;
pte_t ptent; pte_t ptent;
page = vm_normal_page(vma, addr, oldpte);
if (page)
folio = page_folio(page);
/* /*
* Avoid trapping faults against the zero or KSM * Avoid trapping faults against the zero or KSM
* pages. See similar comment in change_huge_pmd. * pages. See similar comment in change_huge_pmd.
*/ */
if (prot_numa) { if (prot_numa) {
int ret = prot_numa_skip(vma, addr, oldpte, pte, int ret = prot_numa_skip(vma, addr, oldpte, pte,
target_node, &folio); target_node, folio);
if (ret) { if (ret) {
/* determine batch to skip */ /* determine batch to skip */
@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb,
} }
} }
if (!folio)
folio = vm_normal_folio(vma, addr, oldpte);
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags); nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes); oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb,
*/ */
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
!pte_write(ptent)) !pte_write(ptent))
set_write_prot_commit_flush_ptes(vma, folio, set_write_prot_commit_flush_ptes(vma, folio, page,
addr, pte, oldpte, ptent, nr_ptes, tlb); addr, pte, oldpte, ptent, nr_ptes, tlb);
else else
prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent, prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,