mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

The page containing VDSO time data is swapped with the one containing TIME namespace data when a process uses a non-root time namespace. For other data like powerpc specific data and RNG data, it means tracking whether time namespace is the root one or not to know which page to use. Simplify the logic behind by moving time data out of first data page so that the first data page which contains everything else always remains the first page. Time data is in the second or third page depending on selected time namespace. While we are playing with get_datapage macro, directly take into account the data offset inside the macro instead of adding that offset afterwards. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://patch.msgid.link/0557d3ec898c1d0ea2fc59fa8757618e524c5d94.1727858295.git.christophe.leroy@csgroup.eu
100 lines
1.9 KiB
ArmAsm
100 lines
1.9 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* vDSO provided cache flush routines
|
|
*
|
|
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
|
|
* IBM Corp.
|
|
*/
|
|
#include <asm/processor.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/vdso_datapage.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
|
|
.text
|
|
|
|
/*
|
|
* Default "generic" version of __kernel_sync_dicache.
|
|
*
|
|
* void __kernel_sync_dicache(unsigned long start, unsigned long end)
|
|
*
|
|
* Flushes the data cache & invalidate the instruction cache for the
|
|
* provided range [start, end[
|
|
*/
|
|
V_FUNCTION_BEGIN(__kernel_sync_dicache)
|
|
.cfi_startproc
|
|
BEGIN_FTR_SECTION
|
|
b 3f
|
|
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
|
|
#ifdef CONFIG_PPC64
|
|
mflr r12
|
|
.cfi_register lr,r12
|
|
get_datapage r10
|
|
mtlr r12
|
|
.cfi_restore lr
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
lwz r7,CFG_DCACHE_BLOCKSZ(r10)
|
|
addi r5,r7,-1
|
|
#else
|
|
li r5, L1_CACHE_BYTES - 1
|
|
#endif
|
|
andc r6,r3,r5 /* round low to line bdy */
|
|
subf r8,r6,r4 /* compute length */
|
|
add r8,r8,r5 /* ensure we get enough */
|
|
#ifdef CONFIG_PPC64
|
|
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
|
|
PPC_SRL. r8,r8,r9 /* compute line count */
|
|
#else
|
|
srwi. r8, r8, L1_CACHE_SHIFT
|
|
mr r7, r6
|
|
#endif
|
|
crclr cr0*4+so
|
|
beqlr /* nothing to do? */
|
|
mtctr r8
|
|
1: dcbst 0,r6
|
|
#ifdef CONFIG_PPC64
|
|
add r6,r6,r7
|
|
#else
|
|
addi r6, r6, L1_CACHE_BYTES
|
|
#endif
|
|
bdnz 1b
|
|
sync
|
|
|
|
/* Now invalidate the instruction cache */
|
|
|
|
#ifdef CONFIG_PPC64
|
|
lwz r7,CFG_ICACHE_BLOCKSZ(r10)
|
|
addi r5,r7,-1
|
|
andc r6,r3,r5 /* round low to line bdy */
|
|
subf r8,r6,r4 /* compute length */
|
|
add r8,r8,r5
|
|
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
|
|
PPC_SRL. r8,r8,r9 /* compute line count */
|
|
crclr cr0*4+so
|
|
beqlr /* nothing to do? */
|
|
#endif
|
|
mtctr r8
|
|
#ifdef CONFIG_PPC64
|
|
2: icbi 0,r6
|
|
add r6,r6,r7
|
|
#else
|
|
2: icbi 0, r7
|
|
addi r7, r7, L1_CACHE_BYTES
|
|
#endif
|
|
bdnz 2b
|
|
isync
|
|
li r3,0
|
|
blr
|
|
3:
|
|
crclr cr0*4+so
|
|
sync
|
|
icbi 0,r1
|
|
isync
|
|
li r3,0
|
|
blr
|
|
.cfi_endproc
|
|
V_FUNCTION_END(__kernel_sync_dicache)
|