2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00
linux/arch/powerpc/include/asm/module.h
Naveen N Rao eec37961a5 powerpc64/ftrace: Move ftrace sequence out of line
Function profile sequence on powerpc includes two instructions at the
beginning of each function:
	mflr	r0
	bl	ftrace_caller

The call to ftrace_caller() gets nop'ed out during kernel boot and is
patched in when ftrace is enabled.

Given the sequence, we cannot return from ftrace_caller with 'blr' as we
need to keep LR and r0 intact. This results in link stack (return
address predictor) imbalance when ftrace is enabled. To address that, we
would like to use a three instruction sequence:
	mflr	r0
	bl	ftrace_caller
	mtlr	r0

Further more, to support DYNAMIC_FTRACE_WITH_CALL_OPS, we need to
reserve two instruction slots before the function. This results in a
total of five instruction slots to be reserved for ftrace use on each
function that is traced.

Move the function profile sequence out-of-line to minimize its impact.
To do this, we reserve a single nop at function entry using
-fpatchable-function-entry=1 and add a pass on vmlinux.o to determine
the total number of functions that can be traced. This is then used to
generate a .S file reserving the appropriate amount of space for use as
ftrace stubs, which is built and linked into vmlinux.

On bootup, the stub space is split into separate stubs per function and
populated with the proper instruction sequence. A pointer to the
associated stub is maintained in dyn_arch_ftrace.

For modules, space for ftrace stubs is reserved from the generic module
stub space.

This is restricted to and enabled by default only on 64-bit powerpc,
though there are some changes to accommodate 32-bit powerpc. This is
done so that 32-bit powerpc could choose to opt into this based on
further tests and benchmarks.

As an example, after this patch, kernel functions will have a single nop
at function entry:
<kernel_clone>:
	addis	r2,r12,467
	addi	r2,r2,-16028
	nop
	mfocrf	r11,8
	...

When ftrace is enabled, the nop is converted to an unconditional branch
to the stub associated with that function:
<kernel_clone>:
	addis	r2,r12,467
	addi	r2,r2,-16028
	b	ftrace_ool_stub_text_end+0x11b28
	mfocrf	r11,8
	...

The associated stub:
<ftrace_ool_stub_text_end+0x11b28>:
	mflr	r0
	bl	ftrace_caller
	mtlr	r0
	b	kernel_clone+0xc
	...

This change showed an improvement of ~10% in null_syscall benchmark on a
Power 10 system with ftrace enabled.

Signed-off-by: Naveen N Rao <naveen@kernel.org>
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/20241030070850.1361304-13-hbathini@linux.ibm.com
2024-10-31 11:00:54 +11:00

90 lines
2.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_MODULE_H
#define _ASM_POWERPC_MODULE_H
#ifdef __KERNEL__
#include <linux/list.h>
#include <asm/bug.h>
#include <asm-generic/module.h>
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
*
* PPC can only do rel jumps += 32MB, and often the kernel and other
* modules are further away than this. So, we jump to a table of
* trampolines attached to the module (the Procedure Linkage Table)
* whenever that happens.
*/
struct ppc_plt_entry {
/* 16 byte jump instruction sequence (4 instructions) */
unsigned int jump[4];
};
#endif /* __powerpc64__ */
struct mod_arch_specific {
#ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */
#ifdef CONFIG_PPC_KERNEL_PCREL
unsigned int got_section; /* What section is the GOT? */
unsigned int pcpu_section; /* .data..percpu section */
#else
unsigned int toc_section; /* What section is the TOC? */
bool toc_fixed; /* Have we fixed up .TOC.? */
#endif
/* For module function descriptor dereference */
unsigned long start_opd;
unsigned long end_opd;
#else /* powerpc64 */
/* Indices of PLT sections within module. */
unsigned int core_plt_section;
unsigned int init_plt_section;
#endif /* powerpc64 */
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
unsigned long tramp_regs;
#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
struct ftrace_ool_stub *ool_stubs;
unsigned int ool_stub_count;
unsigned int ool_stub_index;
#endif
#endif
};
/*
* Select ELF headers.
* Make empty sections for module_frob_arch_sections to expand.
*/
#ifdef __powerpc64__
# ifdef MODULE
asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
# ifdef CONFIG_PPC_KERNEL_PCREL
asm(".section .mygot,\"a\",@nobits; .align 3; .previous");
# endif
# endif
#else
# ifdef MODULE
asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
# endif /* MODULE */
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
#else
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
return 0;
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */