mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
290 lines
8.3 KiB
C
290 lines
8.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2017 SiFive
|
|
*/
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/of.h>
|
|
#include <linux/prctl.h>
|
|
#include <asm/acpi.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/sbi.h>
|
|
|
|
static void ipi_remote_fence_i(void *info)
|
|
{
|
|
return local_flush_icache_all();
|
|
}
|
|
|
|
void flush_icache_all(void)
|
|
{
|
|
local_flush_icache_all();
|
|
|
|
if (num_online_cpus() < 2)
|
|
return;
|
|
|
|
/*
|
|
* Make sure all previous writes to the D$ are ordered before making
|
|
* the IPI. The RISC-V spec states that a hart must execute a data fence
|
|
* before triggering a remote fence.i in order to make the modification
|
|
* visable for remote harts.
|
|
*
|
|
* IPIs on RISC-V are triggered by MMIO writes to either CLINT or
|
|
* S-IMSIC, so the fence ensures previous data writes "happen before"
|
|
* the MMIO.
|
|
*/
|
|
RISCV_FENCE(w, o);
|
|
|
|
if (riscv_use_sbi_for_rfence())
|
|
sbi_remote_fence_i(NULL);
|
|
else
|
|
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
|
}
|
|
EXPORT_SYMBOL(flush_icache_all);
|
|
|
|
/*
|
|
* Performs an icache flush for the given MM context. RISC-V has no direct
|
|
* mechanism for instruction cache shoot downs, so instead we send an IPI that
|
|
* informs the remote harts they need to flush their local instruction caches.
|
|
* To avoid pathologically slow behavior in a common case (a bunch of
|
|
* single-hart processes on a many-hart machine, ie 'make -j') we avoid the
|
|
* IPIs for harts that are not currently executing a MM context and instead
|
|
* schedule a deferred local instruction cache flush to be performed before
|
|
* execution resumes on each hart.
|
|
*/
|
|
void flush_icache_mm(struct mm_struct *mm, bool local)
|
|
{
|
|
unsigned int cpu;
|
|
cpumask_t others, *mask;
|
|
|
|
preempt_disable();
|
|
|
|
/* Mark every hart's icache as needing a flush for this MM. */
|
|
mask = &mm->context.icache_stale_mask;
|
|
cpumask_setall(mask);
|
|
/* Flush this hart's I$ now, and mark it as flushed. */
|
|
cpu = smp_processor_id();
|
|
cpumask_clear_cpu(cpu, mask);
|
|
local_flush_icache_all();
|
|
|
|
/*
|
|
* Flush the I$ of other harts concurrently executing, and mark them as
|
|
* flushed.
|
|
*/
|
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
|
local |= cpumask_empty(&others);
|
|
if (mm == current->active_mm && local) {
|
|
/*
|
|
* It's assumed that at least one strongly ordered operation is
|
|
* performed on this hart between setting a hart's cpumask bit
|
|
* and scheduling this MM context on that hart. Sending an SBI
|
|
* remote message will do this, but in the case where no
|
|
* messages are sent we still need to order this hart's writes
|
|
* with flush_icache_deferred().
|
|
*/
|
|
smp_mb();
|
|
} else if (riscv_use_sbi_for_rfence()) {
|
|
sbi_remote_fence_i(&others);
|
|
} else {
|
|
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#ifdef CONFIG_MMU
|
|
void flush_icache_pte(struct mm_struct *mm, pte_t pte)
|
|
{
|
|
struct folio *folio = page_folio(pte_page(pte));
|
|
|
|
if (!test_bit(PG_dcache_clean, &folio->flags.f)) {
|
|
flush_icache_mm(mm, false);
|
|
set_bit(PG_dcache_clean, &folio->flags.f);
|
|
}
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
unsigned int riscv_cbom_block_size;
|
|
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
|
|
|
|
unsigned int riscv_cboz_block_size;
|
|
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
|
|
|
|
unsigned int riscv_cbop_block_size;
|
|
EXPORT_SYMBOL_GPL(riscv_cbop_block_size);
|
|
|
|
static void __init cbo_get_block_size(struct device_node *node,
|
|
const char *name, u32 *block_size,
|
|
unsigned long *first_hartid)
|
|
{
|
|
unsigned long hartid;
|
|
u32 val;
|
|
|
|
if (riscv_of_processor_hartid(node, &hartid))
|
|
return;
|
|
|
|
if (of_property_read_u32(node, name, &val))
|
|
return;
|
|
|
|
if (!*block_size) {
|
|
*block_size = val;
|
|
*first_hartid = hartid;
|
|
} else if (*block_size != val) {
|
|
pr_warn("%s mismatched between harts %lu and %lu\n",
|
|
name, *first_hartid, hartid);
|
|
}
|
|
}
|
|
|
|
void __init riscv_init_cbo_blocksizes(void)
|
|
{
|
|
unsigned long cbom_hartid, cboz_hartid, cbop_hartid;
|
|
u32 cbom_block_size = 0, cboz_block_size = 0, cbop_block_size = 0;
|
|
struct device_node *node;
|
|
struct acpi_table_header *rhct;
|
|
acpi_status status;
|
|
|
|
if (acpi_disabled) {
|
|
for_each_of_cpu_node(node) {
|
|
/* set block-size for cbom and/or cboz extension if available */
|
|
cbo_get_block_size(node, "riscv,cbom-block-size",
|
|
&cbom_block_size, &cbom_hartid);
|
|
cbo_get_block_size(node, "riscv,cboz-block-size",
|
|
&cboz_block_size, &cboz_hartid);
|
|
cbo_get_block_size(node, "riscv,cbop-block-size",
|
|
&cbop_block_size, &cbop_hartid);
|
|
}
|
|
} else {
|
|
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
|
|
if (ACPI_FAILURE(status))
|
|
return;
|
|
|
|
acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, &cbop_block_size);
|
|
acpi_put_table((struct acpi_table_header *)rhct);
|
|
}
|
|
|
|
if (cbom_block_size)
|
|
riscv_cbom_block_size = cbom_block_size;
|
|
|
|
if (cboz_block_size)
|
|
riscv_cboz_block_size = cboz_block_size;
|
|
|
|
if (cbop_block_size)
|
|
riscv_cbop_block_size = cbop_block_size;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static void set_icache_stale_mask(void)
|
|
{
|
|
int cpu = get_cpu();
|
|
cpumask_t *mask;
|
|
bool stale_cpu;
|
|
|
|
/*
|
|
* Mark every other hart's icache as needing a flush for
|
|
* this MM. Maintain the previous value of the current
|
|
* cpu to handle the case when this function is called
|
|
* concurrently on different harts.
|
|
*/
|
|
mask = ¤t->mm->context.icache_stale_mask;
|
|
stale_cpu = cpumask_test_cpu(cpu, mask);
|
|
|
|
cpumask_setall(mask);
|
|
__assign_cpu(cpu, mask, stale_cpu);
|
|
put_cpu();
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* riscv_set_icache_flush_ctx() - Enable/disable icache flushing instructions in
|
|
* userspace.
|
|
* @ctx: Set the type of icache flushing instructions permitted/prohibited in
|
|
* userspace. Supported values described below.
|
|
*
|
|
* Supported values for ctx:
|
|
*
|
|
* * %PR_RISCV_CTX_SW_FENCEI_ON: Allow fence.i in user space.
|
|
*
|
|
* * %PR_RISCV_CTX_SW_FENCEI_OFF: Disallow fence.i in user space. All threads in
|
|
* a process will be affected when ``scope == PR_RISCV_SCOPE_PER_PROCESS``.
|
|
* Therefore, caution must be taken; use this flag only when you can guarantee
|
|
* that no thread in the process will emit fence.i from this point onward.
|
|
*
|
|
* @scope: Set scope of where icache flushing instructions are allowed to be
|
|
* emitted. Supported values described below.
|
|
*
|
|
* Supported values for scope:
|
|
*
|
|
* * %PR_RISCV_SCOPE_PER_PROCESS: Ensure the icache of any thread in this process
|
|
* is coherent with instruction storage upon
|
|
* migration.
|
|
*
|
|
* * %PR_RISCV_SCOPE_PER_THREAD: Ensure the icache of the current thread is
|
|
* coherent with instruction storage upon
|
|
* migration.
|
|
*
|
|
* When ``scope == PR_RISCV_SCOPE_PER_PROCESS``, all threads in the process are
|
|
* permitted to emit icache flushing instructions. Whenever any thread in the
|
|
* process is migrated, the corresponding hart's icache will be guaranteed to be
|
|
* consistent with instruction storage. This does not enforce any guarantees
|
|
* outside of migration. If a thread modifies an instruction that another thread
|
|
* may attempt to execute, the other thread must still emit an icache flushing
|
|
* instruction before attempting to execute the potentially modified
|
|
* instruction. This must be performed by the user-space program.
|
|
*
|
|
* In per-thread context (eg. ``scope == PR_RISCV_SCOPE_PER_THREAD``) only the
|
|
* thread calling this function is permitted to emit icache flushing
|
|
* instructions. When the thread is migrated, the corresponding hart's icache
|
|
* will be guaranteed to be consistent with instruction storage.
|
|
*
|
|
* On kernels configured without SMP, this function is a nop as migrations
|
|
* across harts will not occur.
|
|
*/
|
|
int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
switch (ctx) {
|
|
case PR_RISCV_CTX_SW_FENCEI_ON:
|
|
switch (scope) {
|
|
case PR_RISCV_SCOPE_PER_PROCESS:
|
|
current->mm->context.force_icache_flush = true;
|
|
break;
|
|
case PR_RISCV_SCOPE_PER_THREAD:
|
|
current->thread.force_icache_flush = true;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
case PR_RISCV_CTX_SW_FENCEI_OFF:
|
|
switch (scope) {
|
|
case PR_RISCV_SCOPE_PER_PROCESS:
|
|
set_icache_stale_mask();
|
|
current->mm->context.force_icache_flush = false;
|
|
break;
|
|
case PR_RISCV_SCOPE_PER_THREAD:
|
|
set_icache_stale_mask();
|
|
current->thread.force_icache_flush = false;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
return 0;
|
|
#else
|
|
switch (ctx) {
|
|
case PR_RISCV_CTX_SW_FENCEI_ON:
|
|
case PR_RISCV_CTX_SW_FENCEI_OFF:
|
|
return 0;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
#endif
|
|
}
|