Merge branch 'bpf-fix-and-test-aux-usage-after-do_check_insn'

Luis Gerhorst says:

====================
bpf: Fix and test aux usage after do_check_insn()

Fix cur_aux()->nospec_result test after do_check_insn() referring to the
to-be-analyzed (potentially unsafe) instruction, not the
already-analyzed (safe) instruction. This might allow a unsafe insn to
slip through on a speculative path. Create some tests from the
reproducer [1].

Commit d6f1c85f22 ("bpf: Fall back to nospec for Spectre v1") should
not be in any stable kernel yet, therefore bpf-next should suffice.

[1] https://lore.kernel.org/bpf/685b3c1b.050a0220.2303ee.0010.GAE@google.com/

Changes since v2:
- Use insn_aux variable instead of introducing prev_aux() as suggested
  by Eduard (and therefore also drop patch 1)
- v2: https://lore.kernel.org/bpf/20250628145016.784256-1-luis.gerhorst@fau.de/

Changes since v1:
- Fix compiler error due to missed rename of prev_insn_idx in first
  patch
- v1: https://lore.kernel.org/bpf/20250628125927.763088-1-luis.gerhorst@fau.de/

Changes since RFC:
- Introduce prev_aux() as suggested by Alexei. For this, we must move
  the env->prev_insn_idx assignment to happen directly after
  do_check_insn(), for which I have created a separate commit. This
  patch could be simplified by using a local prev_aux variable as
  sugested by Eduard, but I figured one might find the new
  assignment-strategy easier to understand (before, prev_insn_idx and
  env->prev_insn_idx were out-of-sync for the latter part of the loop).
  Also, like this we do not have an additional prev_* variable that must
  be kept in-sync and the local variable's usage (old prev_insn_idx, new
  tmp) is much more local. If you think it would be better to not take
  the risk and keep the fix simple by just introducing the prev_aux
  variable, let me know.
- Change WARN_ON_ONCE() to verifier_bug_if() as suggested by Alexei
- Change assertion to check instruction is BPF_JMP[32] as suggested by
  Eduard
- RFC: https://lore.kernel.org/bpf/8734bmoemx.fsf@fau.de/
====================

Link: https://patch.msgid.link/20250705190908.1756862-1-luis.gerhorst@fau.de
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2025-07-07 08:32:34 -07:00
3 changed files with 167 additions and 5 deletions

View File

@@ -19953,6 +19953,7 @@ static int do_check(struct bpf_verifier_env *env)
for (;;) {
struct bpf_insn *insn;
struct bpf_insn_aux_data *insn_aux;
int err;
/* reset current history entry on each new instruction */
@@ -19966,6 +19967,7 @@ static int do_check(struct bpf_verifier_env *env)
}
insn = &insns[env->insn_idx];
insn_aux = &env->insn_aux_data[env->insn_idx];
if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose(env,
@@ -20042,7 +20044,7 @@ static int do_check(struct bpf_verifier_env *env)
/* Reduce verification complexity by stopping speculative path
* verification when a nospec is encountered.
*/
if (state->speculative && cur_aux(env)->nospec)
if (state->speculative && insn_aux->nospec)
goto process_bpf_exit;
err = do_check_insn(env, &do_print_state);
@@ -20050,11 +20052,11 @@ static int do_check(struct bpf_verifier_env *env)
/* Prevent this speculative path from ever reaching the
* insn that would have been unsafe to execute.
*/
cur_aux(env)->nospec = true;
insn_aux->nospec = true;
/* If it was an ADD/SUB insn, potentially remove any
* markings for alu sanitization.
*/
cur_aux(env)->alu_state = 0;
insn_aux->alu_state = 0;
goto process_bpf_exit;
} else if (err < 0) {
return err;
@@ -20063,7 +20065,7 @@ static int do_check(struct bpf_verifier_env *env)
}
WARN_ON_ONCE(err);
if (state->speculative && cur_aux(env)->nospec_result) {
if (state->speculative && insn_aux->nospec_result) {
/* If we are on a path that performed a jump-op, this
* may skip a nospec patched-in after the jump. This can
* currently never happen because nospec_result is only
@@ -20072,8 +20074,15 @@ static int do_check(struct bpf_verifier_env *env)
* never skip the following insn. Still, add a warning
* to document this in case nospec_result is used
* elsewhere in the future.
*
* All non-branch instructions have a single
* fall-through edge. For these, nospec_result should
* already work.
*/
WARN_ON_ONCE(env->insn_idx != prev_insn_idx + 1);
if (verifier_bug_if(BPF_CLASS(insn->code) == BPF_JMP ||
BPF_CLASS(insn->code) == BPF_JMP32, env,
"speculation barrier after jump instruction may not have the desired effect"))
return -EFAULT;
process_bpf_exit:
mark_verifier_state_scratched(env);
err = update_branch_counts(env, env->cur_state);

View File

@@ -237,4 +237,8 @@
#define SPEC_V1
#endif
#if defined(__TARGET_ARCH_x86)
#define SPEC_V4
#endif
#endif

View File

@@ -801,4 +801,153 @@ l2_%=: \
: __clobber_all);
}
SEC("socket")
__description("unpriv: ldimm64 before Spectre v4 barrier")
__success __success_unpriv
__retval(0)
#ifdef SPEC_V4
__xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */
__xlated_unpriv("*(u64 *)(r10 -8) = r1")
__xlated_unpriv("nospec")
#endif
__naked void unpriv_ldimm64_spectre_v4(void)
{
asm volatile (" \
r1 = 0x2020200005642020 ll; \
*(u64 *)(r10 -8) = r1; \
r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: Spectre v1 and v4 barrier")
__success __success_unpriv
__retval(0)
#ifdef SPEC_V1
#ifdef SPEC_V4
/* starts with r0 == r8 == r9 == 0 */
__xlated_unpriv("if r8 != 0x0 goto pc+1")
__xlated_unpriv("goto pc+2")
__xlated_unpriv("if r9 == 0x0 goto pc+4")
__xlated_unpriv("r2 = r0")
/* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64)
* = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The
* test therefore ensures the Spectre-v4--induced nospec does not prevent the
* Spectre-v1--induced speculative path from being fully analyzed.
*/
__xlated_unpriv("nospec") /* Spectre v1 */
__xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */
__xlated_unpriv("nospec") /* Spectre v4 */
#endif
#endif
__naked void unpriv_spectre_v1_and_v4(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r8 = r0; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
r9 = r0; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
if r8 != 0 goto l0_%=; \
if r9 != 0 goto l0_%=; \
r0 = 0; \
l0_%=: if r8 != 0 goto l1_%=; \
goto l2_%=; \
l1_%=: if r9 == 0 goto l3_%=; \
r2 = r0; \
l2_%=: *(u64 *)(r2 -64) = r1; \
l3_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("unpriv: Spectre v1 and v4 barrier (simple)")
__success __success_unpriv
__retval(0)
#ifdef SPEC_V1
#ifdef SPEC_V4
__xlated_unpriv("if r8 != 0x0 goto pc+1")
__xlated_unpriv("goto pc+2")
__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
__xlated_unpriv("goto pc-1") /* r2 = r0 */
__xlated_unpriv("nospec")
__xlated_unpriv("*(u64 *)(r2 -64) = r1")
__xlated_unpriv("nospec")
#endif
#endif
__naked void unpriv_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
r9 = 0; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
if r8 != 0 goto l0_%=; \
if r9 != 0 goto l0_%=; \
r0 = 0; \
l0_%=: if r8 != 0 goto l1_%=; \
goto l2_%=; \
l1_%=: if r9 == 0 goto l3_%=; \
r2 = r0; \
l2_%=: *(u64 *)(r2 -64) = r1; \
l3_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)")
__success __success_unpriv
__retval(0)
#ifdef SPEC_V1
#ifdef SPEC_V4
__xlated_unpriv("if r8 != 0x0 goto pc+1")
__xlated_unpriv("goto pc+4")
__xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */
__xlated_unpriv("goto pc-1") /* r2 = r0 */
__xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */
__xlated_unpriv("goto pc-1") /* second part of ldimm64 */
__xlated_unpriv("nospec")
__xlated_unpriv("*(u64 *)(r2 -64) = r1")
__xlated_unpriv("nospec")
#endif
#endif
__naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
r9 = 0; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
if r8 != 0 goto l0_%=; \
if r9 != 0 goto l0_%=; \
r0 = 0; \
l0_%=: if r8 != 0 goto l1_%=; \
goto l2_%=; \
l1_%=: if r9 == 0 goto l3_%=; \
r2 = r0; \
r1 = 0x2020200005642020 ll; \
l2_%=: *(u64 *)(r2 -64) = r1; \
l3_%=: r0 = 0; \
exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";