mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

We already have a generic implementation of alloc/free up to P4D level, as well as pgd_free(). Let's finish the work and add a generic PGD-level alloc helper as well. Unlike at lower levels, almost all architectures need some specific magic at PGD level (typically initialising PGD entries), so introducing a generic pgd_alloc() isn't worth it. Instead we introduce two new helpers, __pgd_alloc() and __pgd_free(), and make use of them in the arch-specific pgd_alloc() and pgd_free() wherever possible. To accommodate as many arch as possible, __pgd_alloc() takes a page allocation order. Because pagetable_alloc() allocates zeroed pages, explicit zeroing in pgd_alloc() becomes redundant and we can get rid of it. Some trivial implementations of pgd_free() also become unnecessary once __pgd_alloc() is used; remove them. Another small improvement is consistent accounting of PGD pages by using GFP_PGTABLE_{USER,KERNEL} as appropriate. Not all PGD allocations can be handled by the generic helpers. In particular, multiple architectures allocate PGDs from a kmem_cache, and those PGDs may not be page-sized. Link: https://lkml.kernel.org/r/20250103184415.2744423-6-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Linus Walleij <linus.walleij@linaro.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
159 lines
3.8 KiB
C
159 lines
3.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_PGALLOC_H
|
|
#define _ASM_RISCV_PGALLOC_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/sbi.h>
|
|
#include <asm/tlb.h>
|
|
|
|
#ifdef CONFIG_MMU
|
|
#define __HAVE_ARCH_PUD_FREE
|
|
#include <asm-generic/pgalloc.h>
|
|
|
|
/*
|
|
* While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
|
|
* perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
|
|
* SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
|
|
* case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
|
|
* comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
|
|
* for more details.
|
|
*/
|
|
static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
|
|
{
|
|
if (riscv_use_sbi_for_rfence()) {
|
|
tlb_remove_ptdesc(tlb, pt);
|
|
} else {
|
|
pagetable_dtor(pt);
|
|
tlb_remove_page_ptdesc(tlb, pt);
|
|
}
|
|
}
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm,
|
|
pmd_t *pmd, pte_t *pte)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(pte);
|
|
|
|
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm,
|
|
pmd_t *pmd, pgtable_t pte)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(page_address(pte));
|
|
|
|
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
{
|
|
unsigned long pfn = virt_to_pfn(pmd);
|
|
|
|
set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
|
|
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
|
|
{
|
|
if (pgtable_l4_enabled) {
|
|
unsigned long pfn = virt_to_pfn(pud);
|
|
|
|
set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
}
|
|
|
|
static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
|
|
pud_t *pud)
|
|
{
|
|
if (pgtable_l4_enabled) {
|
|
unsigned long pfn = virt_to_pfn(pud);
|
|
|
|
set_p4d_safe(p4d,
|
|
__p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
}
|
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
|
|
{
|
|
if (pgtable_l5_enabled) {
|
|
unsigned long pfn = virt_to_pfn(p4d);
|
|
|
|
set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
}
|
|
|
|
static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
|
|
p4d_t *p4d)
|
|
{
|
|
if (pgtable_l5_enabled) {
|
|
unsigned long pfn = virt_to_pfn(p4d);
|
|
|
|
set_pgd_safe(pgd,
|
|
__pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
|
|
}
|
|
}
|
|
|
|
#define pud_free pud_free
|
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|
{
|
|
if (pgtable_l4_enabled)
|
|
__pud_free(mm, pud);
|
|
}
|
|
|
|
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|
unsigned long addr)
|
|
{
|
|
if (pgtable_l4_enabled)
|
|
riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
|
|
}
|
|
|
|
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
|
|
unsigned long addr)
|
|
{
|
|
if (pgtable_l5_enabled)
|
|
riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
|
|
}
|
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
|
static inline void sync_kernel_mappings(pgd_t *pgd)
|
|
{
|
|
memcpy(pgd + USER_PTRS_PER_PGD,
|
|
init_mm.pgd + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
}
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *pgd;
|
|
|
|
pgd = __pgd_alloc(mm, 0);
|
|
if (likely(pgd != NULL)) {
|
|
/* Copy kernel mappings */
|
|
sync_kernel_mappings(pgd);
|
|
}
|
|
return pgd;
|
|
}
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
|
unsigned long addr)
|
|
{
|
|
riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
|
|
}
|
|
|
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
|
unsigned long addr)
|
|
{
|
|
riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#endif /* _ASM_RISCV_PGALLOC_H */
|