2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

mm: split folio_pte_batch() into folio_pte_batch() and folio_pte_batch_flags()

Many users (including upcoming ones) don't really need the flags etc, and
can live with the possible overhead of a function call.

So let's provide a basic, non-inlined folio_pte_batch(), to avoid code
bloat while still providing a variant that optimizes out all flag checks
at runtime.  folio_pte_batch_flags() will get inlined into
folio_pte_batch(), optimizing out any conditionals that depend on input
flags.

folio_pte_batch() will behave like folio_pte_batch_flags() when no flags
are specified.  It's okay to add new users of folio_pte_batch_flags(), but
using folio_pte_batch() if applicable is preferred.

So, before this change, folio_pte_batch() was inlined into the C file
optimized by propagating constants within the resulting object file.

With this change, we now also have a folio_pte_batch() that is optimized
by propagating all constants.  But instead of having one instance per
object file, we have a single shared one.

In zap_present_ptes(), where we care about performance, the compiler
already seem to generate a call to a common inlined folio_pte_batch()
variant, shared with fork() code.  So calling the new non-inlined variant
should not make a difference.

While at it, drop the "addr" parameter that is unused.

Link: https://lkml.kernel.org/r/20250702104926.212243-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/linux-mm/20250503182858.5a02729fcffd6d4723afcfc2@linux-foundation.org/
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mathew Brost <matthew.brost@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2025-07-02 12:49:25 +02:00 committed by Andrew Morton
parent 233e28e2a7
commit dd80cfd487
8 changed files with 46 additions and 18 deletions

View File

@ -218,9 +218,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
} }
/** /**
* folio_pte_batch - detect a PTE batch for a large folio * folio_pte_batch_flags - detect a PTE batch for a large folio
* @folio: The large folio to detect a PTE batch for. * @folio: The large folio to detect a PTE batch for.
* @addr: The user virtual address the first page is mapped at.
* @ptep: Page table pointer for the first entry. * @ptep: Page table pointer for the first entry.
* @pte: Page table entry for the first page. * @pte: Page table entry for the first page.
* @max_nr: The maximum number of table entries to consider. * @max_nr: The maximum number of table entries to consider.
@ -243,9 +242,12 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
* must be limited by the caller so scanning cannot exceed a single VMA and * must be limited by the caller so scanning cannot exceed a single VMA and
* a single page table. * a single page table.
* *
* This function will be inlined to optimize based on the input parameters;
* consider using folio_pte_batch() instead if applicable.
*
* Return: the number of table entries in the batch. * Return: the number of table entries in the batch.
*/ */
static inline unsigned int folio_pte_batch(struct folio *folio, unsigned long addr, static inline unsigned int folio_pte_batch_flags(struct folio *folio,
pte_t *ptep, pte_t pte, unsigned int max_nr, fpb_t flags, pte_t *ptep, pte_t pte, unsigned int max_nr, fpb_t flags,
bool *any_writable, bool *any_young, bool *any_dirty) bool *any_writable, bool *any_young, bool *any_dirty)
{ {
@ -293,6 +295,9 @@ static inline unsigned int folio_pte_batch(struct folio *folio, unsigned long ad
return min(nr, max_nr); return min(nr, max_nr);
} }
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr);
/** /**
* pte_move_swp_offset - Move the swap entry offset field of a swap pte * pte_move_swp_offset - Move the swap entry offset field of a swap pte
* forward or backward by delta * forward or backward by delta

View File

@ -348,8 +348,8 @@ static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
{ {
int max_nr = (end - addr) / PAGE_SIZE; int max_nr = (end - addr) / PAGE_SIZE;
return folio_pte_batch(folio, addr, ptep, pte, max_nr, 0, NULL, return folio_pte_batch_flags(folio, ptep, pte, max_nr, 0, NULL,
any_young, any_dirty); any_young, any_dirty);
} }
static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,

View File

@ -995,8 +995,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
if (vma_soft_dirty_enabled(src_vma)) if (vma_soft_dirty_enabled(src_vma))
flags |= FPB_RESPECT_SOFT_DIRTY; flags |= FPB_RESPECT_SOFT_DIRTY;
nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, nr = folio_pte_batch_flags(folio, src_pte, pte, max_nr, flags,
&any_writable, NULL, NULL); &any_writable, NULL, NULL);
folio_ref_add(folio, nr); folio_ref_add(folio, nr);
if (folio_test_anon(folio)) { if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
@ -1564,9 +1564,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
* by keeping the batching logic separate. * by keeping the batching logic separate.
*/ */
if (unlikely(folio_test_large(folio) && max_nr != 1)) { if (unlikely(folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, 0, nr = folio_pte_batch(folio, pte, ptent, max_nr);
NULL, NULL, NULL);
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush, addr, details, rss, force_flush,
force_break, any_skipped); force_break, any_skipped);

View File

@ -711,8 +711,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (!folio || folio_is_zone_device(folio)) if (!folio || folio_is_zone_device(folio))
continue; continue;
if (folio_test_large(folio) && max_nr != 1) if (folio_test_large(folio) && max_nr != 1)
nr = folio_pte_batch(folio, addr, pte, ptent, nr = folio_pte_batch(folio, pte, ptent, max_nr);
max_nr, 0, NULL, NULL, NULL);
/* /*
* vm_normal_folio() filters out zero pages, but there might * vm_normal_folio() filters out zero pages, but there might
* still be reserved folios to skip, perhaps in a VDSO. * still be reserved folios to skip, perhaps in a VDSO.

View File

@ -313,8 +313,7 @@ static inline unsigned int folio_mlock_step(struct folio *folio,
if (!folio_test_large(folio)) if (!folio_test_large(folio))
return 1; return 1;
return folio_pte_batch(folio, addr, pte, ptent, count, 0, NULL, return folio_pte_batch(folio, pte, ptent, count);
NULL, NULL);
} }
static inline bool allow_mlock_munlock(struct folio *folio, static inline bool allow_mlock_munlock(struct folio *folio,

View File

@ -182,8 +182,7 @@ static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr
if (!folio || !folio_test_large(folio)) if (!folio || !folio_test_large(folio))
return 1; return 1;
return folio_pte_batch(folio, addr, ptep, pte, max_nr, 0, NULL, return folio_pte_batch(folio, ptep, pte, max_nr);
NULL, NULL);
} }
static int move_ptes(struct pagetable_move_control *pmc, static int move_ptes(struct pagetable_move_control *pmc,

View File

@ -1868,8 +1868,7 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
if (pte_unused(pte)) if (pte_unused(pte))
return 1; return 1;
return folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, 0, return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
NULL, NULL, NULL);
} }
/* /*

View File

@ -1171,3 +1171,32 @@ int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma)
return 0; return 0;
} }
EXPORT_SYMBOL(compat_vma_mmap_prepare); EXPORT_SYMBOL(compat_vma_mmap_prepare);
#ifdef CONFIG_MMU
/**
* folio_pte_batch - detect a PTE batch for a large folio
* @folio: The large folio to detect a PTE batch for.
* @ptep: Page table pointer for the first entry.
* @pte: Page table entry for the first page.
* @max_nr: The maximum number of table entries to consider.
*
* This is a simplified variant of folio_pte_batch_flags().
*
* Detect a PTE batch: consecutive (present) PTEs that map consecutive
* pages of the same large folio in a single VMA and a single page table.
*
* All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
* the accessed bit, writable bit, dirt-bit and soft-dirty bit.
*
* ptep must map any page of the folio. max_nr must be at least one and
* must be limited by the caller so scanning cannot exceed a single VMA and
* a single page table.
*
* Return: the number of table entries in the batch.
*/
unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
unsigned int max_nr)
{
return folio_pte_batch_flags(folio, ptep, pte, max_nr, 0, NULL, NULL, NULL);
}
#endif /* CONFIG_MMU */