bpf, arm64: Add JIT support for timed may_goto

When verifier sees a timed may_goto instruction, it emits a call to
arch_bpf_timed_may_goto() with a stack offset in BPF_REG_AX (arm64 r9)
and expects a count value to be returned in the same register. The
verifier doesn't save or restore any registers before emitting this
call.

arch_bpf_timed_may_goto() should act as a trampoline to call
bpf_check_timed_may_goto() with AAPCS64 calling convention.

To support this custom calling convention, implement
arch_bpf_timed_may_goto() in assembly and make sure BPF caller saved
registers are saved and restored, call bpf_check_timed_may_goto with
arm64 calling convention where first argument and return value both are
in x0, then put the result back into BPF_REG_AX before returning.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20250827113245.52629-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Puranjay Mohan
2025-08-27 11:32:43 +00:00
committed by Alexei Starovoitov
parent 4c229f337e
commit 16175375da
3 changed files with 53 additions and 2 deletions

View File

@@ -2,4 +2,4 @@
#
# ARM64 networking code
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o

View File

@@ -1558,7 +1558,13 @@ emit_cond_jmp:
if (ret < 0)
return ret;
emit_call(func_addr, ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
/*
* Call to arch_bpf_timed_may_goto() is emitted by the
* verifier and called with custom calling convention with
* first argument and return value in BPF_REG_AX (x9).
*/
if (func_addr != (u64)arch_bpf_timed_may_goto)
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
}
/* tail call */
@@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void)
return true;
}
bool bpf_jit_supports_timed_may_goto(void)
{
return true;
}
bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {

View File

@@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2025 Puranjay Mohan <puranjay@kernel.org> */
#include <linux/linkage.h>
SYM_FUNC_START(arch_bpf_timed_may_goto)
/* Allocate stack space and emit frame record */
stp x29, x30, [sp, #-64]!
mov x29, sp
/* Save BPF registers R0 - R5 (x7, x0-x4)*/
stp x7, x0, [sp, #16]
stp x1, x2, [sp, #32]
stp x3, x4, [sp, #48]
/*
* Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP
* (x25) to get the pointer to count and timestamp and pass it as the
* first argument in x0.
*
* Before generating the call to arch_bpf_timed_may_goto, the verifier
* generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP -
* stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64
* jit in this case.
*/
add x0, x9, x25
bl bpf_check_timed_may_goto
/* BPF_REG_AX(x9) will be stored into count, so move return value to it. */
mov x9, x0
/* Restore BPF registers R0 - R5 (x7, x0-x4) */
ldp x7, x0, [sp, #16]
ldp x1, x2, [sp, #32]
ldp x3, x4, [sp, #48]
/* Restore FP and LR */
ldp x29, x30, [sp], #64
ret
SYM_FUNC_END(arch_bpf_timed_may_goto)