mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 a2faac3986
			
		
	
	
		a2faac3986
		
	
	
	
	
		
			
			Similar to commita6c30873ee("ARM: 8989/1: use .fpu assembler directives instead of assembler arguments"). GCC and GNU binutils support setting the "sub arch" via -march=, -Wa,-march, target function attribute, and .arch assembler directive. Clang was missing support for -Wa,-march=, but this was implemented in clang-13. The behavior of both GCC and Clang is to prefer -Wa,-march= over -march= for assembler and assembler-with-cpp sources, but Clang will warn about the -march= being unused. clang: warning: argument unused during compilation: '-march=armv6k' [-Wunused-command-line-argument] Since most assembler is non-conditionally assembled with one sub arch (modulo arch/arm/delay-loop.S which conditionally is assembled as armv4 based on CONFIG_ARCH_RPC, and arch/arm/mach-at91/pm-suspend.S which is conditionally assembled as armv7-a based on CONFIG_CPU_V7), prefer the .arch assembler directive. Add a few more instances found in compile testing as found by Arnd and Nathan. Link:1d51c699b9Link: https://bugs.llvm.org/show_bug.cgi?id=48894 Link: https://github.com/ClangBuiltLinux/linux/issues/1195 Link: https://github.com/ClangBuiltLinux/linux/issues/1315 Suggested-by: Arnd Bergmann <arnd@arndb.de> Suggested-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Tested-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
		
			
				
	
	
		
			93 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			93 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0-only */
 | |
| /*
 | |
|  *  linux/arch/arm/mm/tlb-v6.S
 | |
|  *
 | |
|  *  Copyright (C) 1997-2002 Russell King
 | |
|  *
 | |
|  *  ARM architecture version 6 TLB handling functions.
 | |
|  *  These assume a split I/D TLB.
 | |
|  */
 | |
| #include <linux/init.h>
 | |
| #include <linux/linkage.h>
 | |
| #include <asm/asm-offsets.h>
 | |
| #include <asm/assembler.h>
 | |
| #include <asm/page.h>
 | |
| #include <asm/tlbflush.h>
 | |
| #include "proc-macros.S"
 | |
| 
 | |
| #define HARVARD_TLB
 | |
| 
 | |
| .arch armv6
 | |
| 
 | |
| /*
 | |
|  *	v6wbi_flush_user_tlb_range(start, end, vma)
 | |
|  *
 | |
|  *	Invalidate a range of TLB entries in the specified address space.
 | |
|  *
 | |
|  *	- start - start address (may not be aligned)
 | |
|  *	- end   - end address (exclusive, may not be aligned)
 | |
|  *	- vma   - vm_area_struct describing address range
 | |
|  *
 | |
|  *	It is assumed that:
 | |
|  *	- the "Invalidate single entry" instruction will invalidate
 | |
|  *	  both the I and the D TLBs on Harvard-style TLBs
 | |
|  */
 | |
| ENTRY(v6wbi_flush_user_tlb_range)
 | |
| 	vma_vm_mm r3, r2			@ get vma->vm_mm
 | |
| 	mov	ip, #0
 | |
| 	mmid	r3, r3				@ get vm_mm->context.id
 | |
| 	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
 | |
| 	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
 | |
| 	mov	r1, r1, lsr #PAGE_SHIFT
 | |
| 	asid	r3, r3				@ mask ASID
 | |
| 	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA
 | |
| 	mov	r1, r1, lsl #PAGE_SHIFT
 | |
| 	vma_vm_flags r2, r2			@ get vma->vm_flags
 | |
| 1:
 | |
| #ifdef HARVARD_TLB
 | |
| 	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA (was 1)
 | |
| 	tst	r2, #VM_EXEC			@ Executable area ?
 | |
| 	mcrne	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA (was 1)
 | |
| #else
 | |
| 	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA (was 1)
 | |
| #endif
 | |
| 	add	r0, r0, #PAGE_SZ
 | |
| 	cmp	r0, r1
 | |
| 	blo	1b
 | |
| 	mcr	p15, 0, ip, c7, c10, 4		@ data synchronization barrier
 | |
| 	ret	lr
 | |
| 
 | |
| /*
 | |
|  *	v6wbi_flush_kern_tlb_range(start,end)
 | |
|  *
 | |
|  *	Invalidate a range of kernel TLB entries
 | |
|  *
 | |
|  *	- start - start address (may not be aligned)
 | |
|  *	- end   - end address (exclusive, may not be aligned)
 | |
|  */
 | |
| ENTRY(v6wbi_flush_kern_tlb_range)
 | |
| 	mov	r2, #0
 | |
| 	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
 | |
| 	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
 | |
| 	mov	r1, r1, lsr #PAGE_SHIFT
 | |
| 	mov	r0, r0, lsl #PAGE_SHIFT
 | |
| 	mov	r1, r1, lsl #PAGE_SHIFT
 | |
| 1:
 | |
| #ifdef HARVARD_TLB
 | |
| 	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA
 | |
| 	mcr	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA
 | |
| #else
 | |
| 	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA
 | |
| #endif
 | |
| 	add	r0, r0, #PAGE_SZ
 | |
| 	cmp	r0, r1
 | |
| 	blo	1b
 | |
| 	mcr	p15, 0, r2, c7, c10, 4		@ data synchronization barrier
 | |
| 	mcr	p15, 0, r2, c7, c5, 4		@ prefetch flush (isb)
 | |
| 	ret	lr
 | |
| 
 | |
| 	__INIT
 | |
| 
 | |
| 	/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
 | |
| 	define_tlb_functions v6wbi, v6wbi_tlb_flags
 |