mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm: remove arch_flush_tlb_batched_pending() arch helper
Since commit4b63491838
("arm64/mm: Close theoretical race where stale TLB entry remains valid"), all arches that use tlbbatch for reclaim (arm64, riscv, x86) implement arch_flush_tlb_batched_pending() with a flush_tlb_mm(). So let's simplify by removing the unnecessary abstraction and doing the flush_tlb_mm() directly in flush_tlb_batched_pending(). This effectively reverts commitdb6c1f6f23
("mm/tlbbatch: introduce arch_flush_tlb_batched_pending()"). Link: https://lkml.kernel.org/r/20250609103132.447370-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Suggested-by: Will Deacon <will@kernel.org> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Will Deacon <will@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Borislav Betkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
441413d2a9
commit
a9e056de66
@ -322,17 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
|
||||
* all the previously issued TLBIs targeting mm have completed. But since we
|
||||
* can be executing on a remote CPU, a DSB cannot guarantee this like it can
|
||||
* for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
|
||||
*/
|
||||
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* To support TLB batched flush for multiple pages unmapping, we only send
|
||||
* the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
|
||||
|
@ -63,7 +63,6 @@ void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
bool arch_tlbbatch_should_defer(struct mm_struct *mm);
|
||||
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
||||
struct mm_struct *mm, unsigned long start, unsigned long end);
|
||||
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
|
||||
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
||||
|
||||
extern unsigned long tlb_flush_all_threshold;
|
||||
|
@ -234,11 +234,6 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
||||
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
|
||||
}
|
||||
|
||||
void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
{
|
||||
__flush_tlb_range(NULL, &batch->cpumask,
|
||||
|
@ -356,11 +356,6 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
|
||||
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
|
||||
}
|
||||
|
||||
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
||||
|
||||
static inline bool pte_flags_need_flush(unsigned long oldflags,
|
||||
|
@ -746,7 +746,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
|
||||
|
||||
if (pending != flushed) {
|
||||
arch_flush_tlb_batched_pending(mm);
|
||||
flush_tlb_mm(mm);
|
||||
/*
|
||||
* If the new TLB flushing is pending during flushing, leave
|
||||
* mm->tlb_flush_batched as is, to avoid losing flushing.
|
||||
|
Loading…
Reference in New Issue
Block a user