mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
bpf, riscv64: Skip redundant zext instruction after load-acquire
Currently, the verifier inserts a zext instruction right after every 8-, 16- or 32-bit load-acquire, which is already zero-extending. Skip such redundant zext instructions. While we are here, update that already-obsolete comment about "skip the next instruction" in build_body(). Also change emit_atomic_rmw()'s parameters to keep it consistent with emit_atomic_ld_st(). Note that checking 'insn[1]' relies on 'insn' not being the last instruction, which should have been guaranteed by the verifier; we already use 'insn[1]' elsewhere in the file for similar purposes. Additionally, we don't check if 'insn[1]' is actually a zext for our load-acquire's dst_reg, or some other registers - in other words, here we are relying on the verifier to always insert a redundant zext right after a 8/16/32-bit load-acquire, for its dst_reg. Acked-by: Björn Töpel <bjorn@kernel.org> Reviewed-by: Pu Lehui <pulehui@huawei.com> Tested-by: Björn Töpel <bjorn@rivosinc.com> # QEMU/RVA23 Signed-off-by: Peilin Ye <yepeilin@google.com> Link: https://lore.kernel.org/r/10e90e0eab042f924d35ad0d1c1f7ca29f673152.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
8afd3170d5
commit
db7a3822b5
@@ -607,8 +607,13 @@ static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx)
|
||||
emit_sd(RV_REG_T1, 0, rs, ctx);
|
||||
}
|
||||
|
||||
static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_jit_context *ctx)
|
||||
static int emit_atomic_ld_st(u8 rd, u8 rs, const struct bpf_insn *insn,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
u8 code = insn->code;
|
||||
s32 imm = insn->imm;
|
||||
s16 off = insn->off;
|
||||
|
||||
switch (imm) {
|
||||
/* dst_reg = load_acquire(src_reg + off16) */
|
||||
case BPF_LOAD_ACQ:
|
||||
@@ -627,6 +632,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_
|
||||
break;
|
||||
}
|
||||
emit_fence_r_rw(ctx);
|
||||
|
||||
/* If our next insn is a redundant zext, return 1 to tell
|
||||
* build_body() to skip it.
|
||||
*/
|
||||
if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
/* store_release(dst_reg + off16, src_reg) */
|
||||
case BPF_STORE_REL:
|
||||
@@ -654,10 +665,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emit_atomic_rmw(u8 rd, u8 rs, s16 off, s32 imm, u8 code,
|
||||
static int emit_atomic_rmw(u8 rd, u8 rs, const struct bpf_insn *insn,
|
||||
struct rv_jit_context *ctx)
|
||||
{
|
||||
u8 r0;
|
||||
u8 r0, code = insn->code;
|
||||
s16 off = insn->off;
|
||||
s32 imm = insn->imm;
|
||||
int jmp_offset;
|
||||
bool is64;
|
||||
|
||||
@@ -2026,9 +2039,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
||||
if (bpf_atomic_is_load_store(insn))
|
||||
ret = emit_atomic_ld_st(rd, rs, off, imm, code, ctx);
|
||||
ret = emit_atomic_ld_st(rd, rs, insn, ctx);
|
||||
else
|
||||
ret = emit_atomic_rmw(rd, rs, off, imm, code, ctx);
|
||||
ret = emit_atomic_rmw(rd, rs, insn, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
@@ -26,9 +26,8 @@ static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
|
||||
int ret;
|
||||
|
||||
ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
|
||||
/* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
|
||||
if (ret > 0)
|
||||
i++;
|
||||
i++; /* skip the next instruction */
|
||||
if (offset)
|
||||
offset[i] = ctx->ninsns;
|
||||
if (ret < 0)
|
||||
|
||||
Reference in New Issue
Block a user