mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
mm: introduce generic lazy_mmu helpers
The implementation of the lazy MMU mode is currently entirely
arch-specific; core code directly calls arch helpers:
arch_{enter,leave}_lazy_mmu_mode().
We are about to introduce support for nested lazy MMU sections. As things
stand we'd have to duplicate that logic in every arch implementing
lazy_mmu - adding to a fair amount of logic already duplicated across
lazy_mmu implementations.
This patch therefore introduces a new generic layer that calls the
existing arch_* helpers. Two pair of calls are introduced:
* lazy_mmu_mode_enable() ... lazy_mmu_mode_disable()
This is the standard case where the mode is enabled for a given
block of code by surrounding it with enable() and disable()
calls.
* lazy_mmu_mode_pause() ... lazy_mmu_mode_resume()
This is for situations where the mode is temporarily disabled
by first calling pause() and then resume() (e.g. to prevent any
batching from occurring in a critical section).
The documentation in <linux/pgtable.h> will be updated in a subsequent
patch.
No functional change should be introduced at this stage. The
implementation of enable()/resume() and disable()/pause() is currently
identical, but nesting support will change that.
Most of the call sites have been updated using the following Coccinelle
script:
@@
@@
{
...
- arch_enter_lazy_mmu_mode();
+ lazy_mmu_mode_enable();
...
- arch_leave_lazy_mmu_mode();
+ lazy_mmu_mode_disable();
...
}
@@
@@
{
...
- arch_leave_lazy_mmu_mode();
+ lazy_mmu_mode_pause();
...
- arch_enter_lazy_mmu_mode();
+ lazy_mmu_mode_resume();
...
}
A couple of notes regarding x86:
* Xen is currently the only case where explicit handling is required
for lazy MMU when context-switching. This is purely an
implementation detail and using the generic lazy_mmu_mode_*
functions would cause trouble when nesting support is introduced,
because the generic functions must be called from the current task.
For that reason we still use arch_leave() and arch_enter() there.
* x86 calls arch_flush_lazy_mmu_mode() unconditionally in a few
places, but only defines it if PARAVIRT_XXL is selected, and we
are removing the fallback in <linux/pgtable.h>. Add a new fallback
definition to <asm/pgtable.h> to keep things building.
Link: https://lkml.kernel.org/r/20251215150323.2218608-8-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
7303ecbfe4
commit
0a096ab7a3
@@ -800,7 +800,7 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&pgtable_split_lock);
|
mutex_lock(&pgtable_split_lock);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The split_kernel_leaf_mapping_locked() may sleep, it is not a
|
* The split_kernel_leaf_mapping_locked() may sleep, it is not a
|
||||||
@@ -822,7 +822,7 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
|
|||||||
ret = split_kernel_leaf_mapping_locked(end);
|
ret = split_kernel_leaf_mapping_locked(end);
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
mutex_unlock(&pgtable_split_lock);
|
mutex_unlock(&pgtable_split_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -883,10 +883,10 @@ static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
ret = walk_kernel_page_table_range_lockless(start, end,
|
ret = walk_kernel_page_table_range_lockless(start, end,
|
||||||
&split_to_ptes_ops, NULL, &gfp);
|
&split_to_ptes_ops, NULL, &gfp);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
|
|||||||
if (WARN_ON_ONCE(ret))
|
if (WARN_ON_ONCE(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller must ensure that the range we are operating on does not
|
* The caller must ensure that the range we are operating on does not
|
||||||
@@ -119,7 +119,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
|
|||||||
*/
|
*/
|
||||||
ret = walk_kernel_page_table_range_lockless(start, start + size,
|
ret = walk_kernel_page_table_range_lockless(start, start + size,
|
||||||
&pageattr_ops, NULL, &data);
|
&pageattr_ops, NULL, &data);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end)
|
|||||||
* way to do things but is fine for our needs here.
|
* way to do things but is fine for our needs here.
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
for (; start < end; start += PAGE_SIZE) {
|
for (; start < end; start += PAGE_SIZE) {
|
||||||
pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
|
pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
|
||||||
unsigned long pte;
|
unsigned long pte;
|
||||||
@@ -217,7 +217,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end)
|
|||||||
continue;
|
continue;
|
||||||
hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
|
hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
|
||||||
}
|
}
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
|
|||||||
* way to do things but is fine for our needs here.
|
* way to do things but is fine for our needs here.
|
||||||
*/
|
*/
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
start_pte = pte_offset_map(pmd, addr);
|
start_pte = pte_offset_map(pmd, addr);
|
||||||
if (!start_pte)
|
if (!start_pte)
|
||||||
goto out;
|
goto out;
|
||||||
@@ -249,6 +249,6 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
|
|||||||
}
|
}
|
||||||
pte_unmap(start_pte);
|
pte_unmap(start_pte);
|
||||||
out:
|
out:
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -73,13 +73,13 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
|
|||||||
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return;
|
return;
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
for (; npages > 0; --npages) {
|
for (; npages > 0; --npages) {
|
||||||
pte_update(mm, addr, pte, 0, 0, 0);
|
pte_update(mm, addr, pte, 0, 0, 0);
|
||||||
addr += PAGE_SIZE;
|
addr += PAGE_SIZE;
|
||||||
++pte;
|
++pte;
|
||||||
}
|
}
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -118,6 +118,7 @@ extern pmdval_t early_pmd_flags;
|
|||||||
#define __pte(x) native_make_pte(x)
|
#define __pte(x) native_make_pte(x)
|
||||||
|
|
||||||
#define arch_end_context_switch(prev) do {} while(0)
|
#define arch_end_context_switch(prev) do {} while(0)
|
||||||
|
static inline void arch_flush_lazy_mmu_mode(void) {}
|
||||||
#endif /* CONFIG_PARAVIRT_XXL */
|
#endif /* CONFIG_PARAVIRT_XXL */
|
||||||
|
|
||||||
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
||||||
|
|||||||
@@ -2739,7 +2739,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
|
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
|
||||||
/* Fast path for performing exclusive WP */
|
/* Fast path for performing exclusive WP */
|
||||||
@@ -2809,7 +2809,7 @@ flush_and_return:
|
|||||||
if (flush_end)
|
if (flush_end)
|
||||||
flush_tlb_range(vma, start, addr);
|
flush_tlb_range(vma, start, addr);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|||||||
@@ -235,10 +235,31 @@ static inline int pmd_dirty(pmd_t pmd)
|
|||||||
*
|
*
|
||||||
* Nesting is not permitted and the mode cannot be used in interrupt context.
|
* Nesting is not permitted and the mode cannot be used in interrupt context.
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_ARCH_HAS_LAZY_MMU_MODE
|
#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
|
||||||
static inline void arch_enter_lazy_mmu_mode(void) {}
|
static inline void lazy_mmu_mode_enable(void)
|
||||||
static inline void arch_leave_lazy_mmu_mode(void) {}
|
{
|
||||||
static inline void arch_flush_lazy_mmu_mode(void) {}
|
arch_enter_lazy_mmu_mode();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void lazy_mmu_mode_disable(void)
|
||||||
|
{
|
||||||
|
arch_leave_lazy_mmu_mode();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void lazy_mmu_mode_pause(void)
|
||||||
|
{
|
||||||
|
arch_leave_lazy_mmu_mode();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void lazy_mmu_mode_resume(void)
|
||||||
|
{
|
||||||
|
arch_enter_lazy_mmu_mode();
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void lazy_mmu_mode_enable(void) {}
|
||||||
|
static inline void lazy_mmu_mode_disable(void) {}
|
||||||
|
static inline void lazy_mmu_mode_pause(void) {}
|
||||||
|
static inline void lazy_mmu_mode_resume(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef pte_batch_hint
|
#ifndef pte_batch_hint
|
||||||
|
|||||||
@@ -305,7 +305,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
|||||||
pte_t pte;
|
pte_t pte;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_pause();
|
||||||
|
|
||||||
index = PFN_DOWN(addr - data->start);
|
index = PFN_DOWN(addr - data->start);
|
||||||
page = data->pages[index];
|
page = data->pages[index];
|
||||||
@@ -319,7 +319,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
|||||||
}
|
}
|
||||||
spin_unlock(&init_mm.page_table_lock);
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_resume();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -471,7 +471,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
|||||||
pte_t pte;
|
pte_t pte;
|
||||||
int none;
|
int none;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_pause();
|
||||||
|
|
||||||
spin_lock(&init_mm.page_table_lock);
|
spin_lock(&init_mm.page_table_lock);
|
||||||
pte = ptep_get(ptep);
|
pte = ptep_get(ptep);
|
||||||
@@ -483,7 +483,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
|
|||||||
if (likely(!none))
|
if (likely(!none))
|
||||||
__free_page(pfn_to_page(pte_pfn(pte)));
|
__free_page(pfn_to_page(pte_pfn(pte)));
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_resume();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
18
mm/madvise.c
18
mm/madvise.c
@@ -453,7 +453,7 @@ restart:
|
|||||||
if (!start_pte)
|
if (!start_pte)
|
||||||
return 0;
|
return 0;
|
||||||
flush_tlb_batched_pending(mm);
|
flush_tlb_batched_pending(mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) {
|
for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) {
|
||||||
nr = 1;
|
nr = 1;
|
||||||
ptent = ptep_get(pte);
|
ptent = ptep_get(pte);
|
||||||
@@ -461,7 +461,7 @@ restart:
|
|||||||
if (++batch_count == SWAP_CLUSTER_MAX) {
|
if (++batch_count == SWAP_CLUSTER_MAX) {
|
||||||
batch_count = 0;
|
batch_count = 0;
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
goto restart;
|
goto restart;
|
||||||
@@ -497,7 +497,7 @@ restart:
|
|||||||
if (!folio_trylock(folio))
|
if (!folio_trylock(folio))
|
||||||
continue;
|
continue;
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
start_pte = NULL;
|
start_pte = NULL;
|
||||||
err = split_folio(folio);
|
err = split_folio(folio);
|
||||||
@@ -508,7 +508,7 @@ restart:
|
|||||||
if (!start_pte)
|
if (!start_pte)
|
||||||
break;
|
break;
|
||||||
flush_tlb_batched_pending(mm);
|
flush_tlb_batched_pending(mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
if (!err)
|
if (!err)
|
||||||
nr = 0;
|
nr = 0;
|
||||||
continue;
|
continue;
|
||||||
@@ -556,7 +556,7 @@ restart:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (start_pte) {
|
if (start_pte) {
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
}
|
}
|
||||||
if (pageout)
|
if (pageout)
|
||||||
@@ -675,7 +675,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
if (!start_pte)
|
if (!start_pte)
|
||||||
return 0;
|
return 0;
|
||||||
flush_tlb_batched_pending(mm);
|
flush_tlb_batched_pending(mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
|
for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
|
||||||
nr = 1;
|
nr = 1;
|
||||||
ptent = ptep_get(pte);
|
ptent = ptep_get(pte);
|
||||||
@@ -724,7 +724,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
if (!folio_trylock(folio))
|
if (!folio_trylock(folio))
|
||||||
continue;
|
continue;
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
start_pte = NULL;
|
start_pte = NULL;
|
||||||
err = split_folio(folio);
|
err = split_folio(folio);
|
||||||
@@ -735,7 +735,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
if (!start_pte)
|
if (!start_pte)
|
||||||
break;
|
break;
|
||||||
flush_tlb_batched_pending(mm);
|
flush_tlb_batched_pending(mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
if (!err)
|
if (!err)
|
||||||
nr = 0;
|
nr = 0;
|
||||||
continue;
|
continue;
|
||||||
@@ -775,7 +775,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
if (nr_swap)
|
if (nr_swap)
|
||||||
add_mm_counter(mm, MM_SWAPENTS, nr_swap);
|
add_mm_counter(mm, MM_SWAPENTS, nr_swap);
|
||||||
if (start_pte) {
|
if (start_pte) {
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(start_pte, ptl);
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
}
|
}
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|||||||
16
mm/memory.c
16
mm/memory.c
@@ -1256,7 +1256,7 @@ again:
|
|||||||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||||
orig_src_pte = src_pte;
|
orig_src_pte = src_pte;
|
||||||
orig_dst_pte = dst_pte;
|
orig_dst_pte = dst_pte;
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
nr = 1;
|
nr = 1;
|
||||||
@@ -1325,7 +1325,7 @@ again:
|
|||||||
} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
|
} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
|
||||||
addr != end);
|
addr != end);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(orig_src_pte, src_ptl);
|
pte_unmap_unlock(orig_src_pte, src_ptl);
|
||||||
add_mm_rss_vec(dst_mm, rss);
|
add_mm_rss_vec(dst_mm, rss);
|
||||||
pte_unmap_unlock(orig_dst_pte, dst_ptl);
|
pte_unmap_unlock(orig_dst_pte, dst_ptl);
|
||||||
@@ -1846,7 +1846,7 @@ retry:
|
|||||||
return addr;
|
return addr;
|
||||||
|
|
||||||
flush_tlb_batched_pending(mm);
|
flush_tlb_batched_pending(mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
do {
|
do {
|
||||||
bool any_skipped = false;
|
bool any_skipped = false;
|
||||||
|
|
||||||
@@ -1878,7 +1878,7 @@ retry:
|
|||||||
direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
|
direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
|
||||||
|
|
||||||
add_mm_rss_vec(mm, rss);
|
add_mm_rss_vec(mm, rss);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
|
|
||||||
/* Do the actual TLB flush before dropping ptl */
|
/* Do the actual TLB flush before dropping ptl */
|
||||||
if (force_flush) {
|
if (force_flush) {
|
||||||
@@ -2816,7 +2816,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
do {
|
do {
|
||||||
BUG_ON(!pte_none(ptep_get(pte)));
|
BUG_ON(!pte_none(ptep_get(pte)));
|
||||||
if (!pfn_modify_allowed(pfn, prot)) {
|
if (!pfn_modify_allowed(pfn, prot)) {
|
||||||
@@ -2826,7 +2826,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
|
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
|
||||||
pfn++;
|
pfn++;
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(mapped_pte, ptl);
|
pte_unmap_unlock(mapped_pte, ptl);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -3177,7 +3177,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
if (fn) {
|
if (fn) {
|
||||||
do {
|
do {
|
||||||
@@ -3190,7 +3190,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
}
|
}
|
||||||
*mask |= PGTBL_PTE_MODIFIED;
|
*mask |= PGTBL_PTE_MODIFIED;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
|
|
||||||
if (mm != &init_mm)
|
if (mm != &init_mm)
|
||||||
pte_unmap_unlock(mapped_pte, ptl);
|
pte_unmap_unlock(mapped_pte, ptl);
|
||||||
|
|||||||
@@ -271,7 +271,7 @@ again:
|
|||||||
ptep = pte_offset_map_lock(mm, pmdp, start, &ptl);
|
ptep = pte_offset_map_lock(mm, pmdp, start, &ptl);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
goto again;
|
goto again;
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
ptep += (addr - start) / PAGE_SIZE;
|
ptep += (addr - start) / PAGE_SIZE;
|
||||||
|
|
||||||
for (; addr < end; addr += PAGE_SIZE, ptep++) {
|
for (; addr < end; addr += PAGE_SIZE, ptep++) {
|
||||||
@@ -313,7 +313,7 @@ again:
|
|||||||
if (folio_test_large(folio)) {
|
if (folio_test_large(folio)) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
ret = migrate_vma_split_folio(folio,
|
ret = migrate_vma_split_folio(folio,
|
||||||
migrate->fault_page);
|
migrate->fault_page);
|
||||||
@@ -356,7 +356,7 @@ again:
|
|||||||
if (folio && folio_test_large(folio)) {
|
if (folio && folio_test_large(folio)) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
ret = migrate_vma_split_folio(folio,
|
ret = migrate_vma_split_folio(folio,
|
||||||
migrate->fault_page);
|
migrate->fault_page);
|
||||||
@@ -485,7 +485,7 @@ next:
|
|||||||
if (unmapped)
|
if (unmapped)
|
||||||
flush_tlb_range(walk->vma, start, end);
|
flush_tlb_range(walk->vma, start, end);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(ptep - 1, ptl);
|
pte_unmap_unlock(ptep - 1, ptl);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -233,7 +233,7 @@ static long change_pte_range(struct mmu_gather *tlb,
|
|||||||
is_private_single_threaded = vma_is_single_threaded_private(vma);
|
is_private_single_threaded = vma_is_single_threaded_private(vma);
|
||||||
|
|
||||||
flush_tlb_batched_pending(vma->vm_mm);
|
flush_tlb_batched_pending(vma->vm_mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
do {
|
do {
|
||||||
nr_ptes = 1;
|
nr_ptes = 1;
|
||||||
oldpte = ptep_get(pte);
|
oldpte = ptep_get(pte);
|
||||||
@@ -379,7 +379,7 @@ static long change_pte_range(struct mmu_gather *tlb,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
|
} while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
|
|
||||||
return pages;
|
return pages;
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ static int move_ptes(struct pagetable_move_control *pmc,
|
|||||||
if (new_ptl != old_ptl)
|
if (new_ptl != old_ptl)
|
||||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||||
flush_tlb_batched_pending(vma->vm_mm);
|
flush_tlb_batched_pending(vma->vm_mm);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
|
for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
|
||||||
new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
|
new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
|
||||||
@@ -305,7 +305,7 @@ static int move_ptes(struct pagetable_move_control *pmc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
if (force_flush)
|
if (force_flush)
|
||||||
flush_tlb_range(vma, old_end - len, old_end);
|
flush_tlb_range(vma, old_end - len, old_end);
|
||||||
if (new_ptl != old_ptl)
|
if (new_ptl != old_ptl)
|
||||||
|
|||||||
@@ -1103,7 +1103,7 @@ static long move_present_ptes(struct mm_struct *mm,
|
|||||||
/* It's safe to drop the reference now as the page-table is holding one. */
|
/* It's safe to drop the reference now as the page-table is holding one. */
|
||||||
folio_put(*first_src_folio);
|
folio_put(*first_src_folio);
|
||||||
*first_src_folio = NULL;
|
*first_src_folio = NULL;
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
|
orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
|
||||||
@@ -1140,7 +1140,7 @@ static long move_present_ptes(struct mm_struct *mm,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
if (src_addr > src_start)
|
if (src_addr > src_start)
|
||||||
flush_tlb_range(src_vma, src_start, src_addr);
|
flush_tlb_range(src_vma, src_start, src_addr);
|
||||||
|
|
||||||
|
|||||||
12
mm/vmalloc.c
12
mm/vmalloc.c
@@ -108,7 +108,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||||||
if (!pte)
|
if (!pte)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (unlikely(!pte_none(ptep_get(pte)))) {
|
if (unlikely(!pte_none(ptep_get(pte)))) {
|
||||||
@@ -134,7 +134,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||||||
pfn++;
|
pfn++;
|
||||||
} while (pte += PFN_DOWN(size), addr += size, addr != end);
|
} while (pte += PFN_DOWN(size), addr += size, addr != end);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
*mask |= PGTBL_PTE_MODIFIED;
|
*mask |= PGTBL_PTE_MODIFIED;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -371,7 +371,7 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||||||
unsigned long size = PAGE_SIZE;
|
unsigned long size = PAGE_SIZE;
|
||||||
|
|
||||||
pte = pte_offset_kernel(pmd, addr);
|
pte = pte_offset_kernel(pmd, addr);
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
@@ -390,7 +390,7 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||||||
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
||||||
} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
|
} while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
*mask |= PGTBL_PTE_MODIFIED;
|
*mask |= PGTBL_PTE_MODIFIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -538,7 +538,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
if (!pte)
|
if (!pte)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct page *page = pages[*nr];
|
struct page *page = pages[*nr];
|
||||||
@@ -560,7 +560,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
(*nr)++;
|
(*nr)++;
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
*mask |= PGTBL_PTE_MODIFIED;
|
*mask |= PGTBL_PTE_MODIFIED;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|||||||
12
mm/vmscan.c
12
mm/vmscan.c
@@ -3516,7 +3516,7 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
restart:
|
restart:
|
||||||
for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
|
for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
@@ -3557,7 +3557,7 @@ restart:
|
|||||||
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
|
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
|
||||||
goto restart;
|
goto restart;
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
|
||||||
return suitable_to_scan(total, young);
|
return suitable_to_scan(total, young);
|
||||||
@@ -3598,7 +3598,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
|
|||||||
if (!spin_trylock(ptl))
|
if (!spin_trylock(ptl))
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
@@ -3645,7 +3645,7 @@ next:
|
|||||||
|
|
||||||
walk_update_folio(walk, last, gen, dirty);
|
walk_update_folio(walk, last, gen, dirty);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
done:
|
done:
|
||||||
*first = -1;
|
*first = -1;
|
||||||
@@ -4244,7 +4244,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_enter_lazy_mmu_mode();
|
lazy_mmu_mode_enable();
|
||||||
|
|
||||||
pte -= (addr - start) / PAGE_SIZE;
|
pte -= (addr - start) / PAGE_SIZE;
|
||||||
|
|
||||||
@@ -4278,7 +4278,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
|
|||||||
|
|
||||||
walk_update_folio(walk, last, gen, dirty);
|
walk_update_folio(walk, last, gen, dirty);
|
||||||
|
|
||||||
arch_leave_lazy_mmu_mode();
|
lazy_mmu_mode_disable();
|
||||||
|
|
||||||
/* feedback from rmap walkers to page table walkers */
|
/* feedback from rmap walkers to page table walkers */
|
||||||
if (mm_state && suitable_to_scan(i, young))
|
if (mm_state && suitable_to_scan(i, young))
|
||||||
|
|||||||
Reference in New Issue
Block a user