mm: use unmap_desc struct for freeing page tables

Pass through the unmap_desc to free_pgtables() because it almost has
everything necessary and is already on the stack.

Updates testing code as necessary.

No functional changes intended.

[Liam.Howlett@oracle.com: fix up unmap desc use on exit_mmap()]
  Link: https://lkml.kernel.org/r/20260210214214.364856-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20260121164946.2093480-12-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: SeongJae Park <sj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett
2026-01-21 11:49:46 -05:00
committed by Andrew Morton
parent 2314fe9ba5
commit a8700d42b0
6 changed files with 45 additions and 35 deletions

View File

@@ -512,10 +512,7 @@ bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long pg_start,
unsigned long pg_end, unsigned long vma_end,
bool mm_wr_locked);
void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);

View File

@@ -373,12 +373,7 @@ void free_pgd_range(struct mmu_gather *tlb,
/**
* free_pgtables() - Free a range of page tables
* @tlb: The mmu gather
* @mas: The maple state
* @vma: The first vma
* @pg_start: The lowest page table address (floor)
* @pg_end: The highest page table address (ceiling)
* @vma_end: The highest vma tree search address
* @mm_wr_locked: boolean indicating if the mm is write locked
* @unmap: The unmap_desc
*
* Note: pg_start and pg_end are provided to indicate the absolute range of the
* page tables that should be removed. This can differ from the vma mappings on
@@ -388,21 +383,19 @@ void free_pgd_range(struct mmu_gather *tlb,
* The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
* unrelated data to the mm_struct being torn down.
*/
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long pg_start,
unsigned long pg_end, unsigned long vma_end,
bool mm_wr_locked)
void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
{
struct unlink_vma_file_batch vb;
struct ma_state *mas = unmap->mas;
struct vm_area_struct *vma = unmap->first;
/*
* Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
* may be 0. Underflow is expected in this case. Otherwise the
* pagetable end is exclusive.
* vma_end is exclusive.
* The last vma address should never be larger than the pagetable end.
* pagetable end is exclusive. vma_end is exclusive. The last vma
* address should never be larger than the pagetable end.
*/
WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
tlb_free_vmas(tlb);
@@ -410,13 +403,13 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
unsigned long addr = vma->vm_start;
struct vm_area_struct *next;
next = mas_find(mas, vma_end - 1);
next = mas_find(mas, unmap->tree_end - 1);
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
if (mm_wr_locked)
if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
@@ -428,16 +421,16 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
vma = next;
next = mas_find(mas, vma_end - 1);
if (mm_wr_locked)
next = mas_find(mas, unmap->tree_end - 1);
if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma_batch_add(&vb, vma);
}
unlink_file_vma_batch_final(&vb);
free_pgd_range(tlb, addr, vma->vm_end,
pg_start, next ? next->vm_start : pg_end);
free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
next ? next->vm_start : unmap->pg_end);
vma = next;
} while (vma);
}

View File

@@ -1307,10 +1307,10 @@ void exit_mmap(struct mm_struct *mm)
*/
mm_flags_set(MMF_OOM_SKIP, mm);
mmap_write_lock(mm);
unmap.mm_wr_locked = true;
mt_clear_in_rcu(&mm->mm_mt);
vma_iter_set(&vmi, vma->vm_end);
free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
unmap_pgtable_init(&unmap, &vmi);
free_pgtables(&tlb, &unmap);
tlb_finish_mmu(&tlb);
/*

View File

@@ -475,15 +475,13 @@ void remove_vma(struct vm_area_struct *vma)
void unmap_region(struct unmap_desc *unmap)
{
struct mm_struct *mm = unmap->first->vm_mm;
struct ma_state *mas = unmap->mas;
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
unmap_vmas(&tlb, unmap);
mas_set(mas, unmap->tree_reset);
free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
unmap->tree_end, unmap->mm_wr_locked);
mas_set(unmap->mas, unmap->tree_reset);
free_pgtables(&tlb, unmap);
tlb_finish_mmu(&tlb);
}

View File

@@ -167,6 +167,10 @@ struct unmap_desc {
bool mm_wr_locked; /* If the mmap write lock is held */
};
/*
* unmap_all_init() - Initialize unmap_desc to remove all vmas, point the
* pg_start and pg_end to a safe location.
*/
static inline void unmap_all_init(struct unmap_desc *unmap,
struct vma_iterator *vmi, struct vm_area_struct *vma)
{
@@ -181,6 +185,25 @@ static inline void unmap_all_init(struct unmap_desc *unmap,
unmap->mm_wr_locked = false;
}
/*
* unmap_pgtable_init() - Initialize unmap_desc to remove all page tables within
* the user range.
*
* ARM can have mappings outside of vmas.
* See: e2cdef8c847b4 ("[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS")
*
* ARM LPAE uses page table mappings beyond the USER_PGTABLES_CEILING
* See: CONFIG_ARM_LPAE in arch/arm/include/asm/pgtable.h
*/
static inline void unmap_pgtable_init(struct unmap_desc *unmap,
struct vma_iterator *vmi)
{
vma_iter_set(vmi, unmap->tree_reset);
unmap->vma_start = FIRST_USER_ADDRESS;
unmap->vma_end = USER_PGTABLES_CEILING;
unmap->tree_end = USER_PGTABLES_CEILING;
}
#define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \
struct unmap_desc name = { \
.mas = &(_vmi)->mas, \

View File

@@ -1137,11 +1137,10 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
{
}
static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
unsigned long ceiling, unsigned long tree_max,
bool mm_wr_locked)
static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
{
(void)tlb;
(void)desc;
}
static inline void mapping_unmap_writable(struct address_space *mapping)