mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
Merge branch 'bpf-optimize-recursion-detection-on-arm64'
Puranjay Mohan says:
====================
bpf: Optimize recursion detection on arm64
V2: https://lore.kernel.org/all/20251217233608.2374187-1-puranjay@kernel.org/
Changes in v2->v3:
- Added acked by Yonghong
- Patch 2:
- Change alignment of active from 8 to 4
- Use le32_to_cpu in place of get_unaligned_le32()
V1: https://lore.kernel.org/all/20251217162830.2597286-1-puranjay@kernel.org/
Changes in V1->V2:
- Patch 2:
- Put preempt_enable()/disable() around RMW accesses to mitigate
race conditions. Because on CONFIG_PREEMPT_RCU and sleepable
bpf programs, preemption can cause no bpf prog to execute in
case of recursion.
BPF programs detect recursion using a per-CPU 'active' flag in struct
bpf_prog. The trampoline currently sets/clears this flag with atomic
operations.
On some arm64 platforms (e.g., Neoverse V2 with LSE), per-CPU atomic
operations are relatively slow. Unlike x86_64 - where per-CPU updates
can avoid cross-core atomicity, arm64 LSE atomics are always atomic
across all cores, which is unnecessary overhead for strictly per-CPU
state.
This patch removes atomics from the recursion detection path on arm64.
It was discovered in [1] that per-CPU atomics that don't return a value
were extremely slow on some arm64 platforms, Catalin added a fix in
commit 535fdfc5a2 ("arm64: Use load LSE atomics for the non-return
per-CPU atomic operations") to solve this issue, but it seems to have
caused a regression on the fentry benchmark.
Using the fentry benchmark from the bpf selftests shows the following:
./tools/testing/selftests/bpf/bench trig-fentry
+---------------------------------------------+------------------------+
| Configuration | Total Operations (M/s) |
+---------------------------------------------+------------------------+
| bpf-next/master with Catalin’s fix reverted | 51.770 |
|---------------------------------------------|------------------------|
| bpf-next/master | 43.271 |
| bpf-next/master with this change | 43.271 |
+---------------------------------------------+------------------------+
All benchmarks were run on a KVM based vm with Neoverse-V2 and 8 cpus.
This patch yields a 25% improvement in this benchmark compared to
bpf-next. Notably, reverting Catalin's fix also results in a performance
gain for this benchmark, which is interesting but expected.
For completeness, this benchmark was also run with the change enabled on
x86-64, which resulted in a 30% regression in the fentry benchmark. So,
it is only enabled on arm64.
P.S. - Here is more data with other program types:
+-----------------+-----------+-----------+----------+
| Metric | Before | After | % Diff |
+-----------------+-----------+-----------+----------+
| fentry | 43.149 | 53.948 | +25.03% |
| fentry.s | 41.831 | 50.937 | +21.76% |
| rawtp | 50.834 | 58.731 | +15.53% |
| fexit | 31.118 | 34.360 | +10.42% |
| tp | 39.536 | 41.632 | +5.30% |
| syscall-count | 8.053 | 8.305 | +3.13% |
| fmodret | 33.940 | 34.769 | +2.44% |
| kprobe | 9.970 | 9.998 | +0.28% |
| usermode-count | 224.886 | 224.839 | -0.02% |
| kernel-count | 154.229 | 153.043 | -0.77% |
+-----------------+-----------+-----------+----------+
[1] https://lore.kernel.org/all/e7d539ed-ced0-4b96-8ecd-048a5b803b85@paulmck-laptop/
====================
Link: https://patch.msgid.link/20251219184422.2899902-1-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -1746,6 +1746,8 @@ struct bpf_prog_aux {
|
||||
struct bpf_map __rcu *st_ops_assoc;
|
||||
};
|
||||
|
||||
#define BPF_NR_CONTEXTS 4 /* normal, softirq, hardirq, NMI */
|
||||
|
||||
struct bpf_prog {
|
||||
u16 pages; /* Number of allocated pages */
|
||||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
@@ -1772,7 +1774,7 @@ struct bpf_prog {
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
};
|
||||
struct bpf_prog_stats __percpu *stats;
|
||||
int __percpu *active;
|
||||
u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for recursion protection */
|
||||
unsigned int (*bpf_func)(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
@@ -2004,6 +2006,40 @@ struct bpf_struct_ops_common_value {
|
||||
enum bpf_struct_ops_state state;
|
||||
};
|
||||
|
||||
static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
|
||||
{
|
||||
#ifdef CONFIG_ARM64
|
||||
u8 rctx = interrupt_context_level();
|
||||
u8 *active = this_cpu_ptr(prog->active);
|
||||
u32 val;
|
||||
|
||||
preempt_disable();
|
||||
active[rctx]++;
|
||||
val = le32_to_cpu(*(__le32 *)active);
|
||||
preempt_enable();
|
||||
if (val != BIT(rctx * 8))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
#else
|
||||
return this_cpu_inc_return(*(int __percpu *)(prog->active)) == 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
|
||||
{
|
||||
#ifdef CONFIG_ARM64
|
||||
u8 rctx = interrupt_context_level();
|
||||
u8 *active = this_cpu_ptr(prog->active);
|
||||
|
||||
preempt_disable();
|
||||
active[rctx]--;
|
||||
preempt_enable();
|
||||
#else
|
||||
this_cpu_dec(*(int __percpu *)(prog->active));
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
|
||||
/* This macro helps developer to register a struct_ops type and generate
|
||||
* type information correctly. Developers should use this macro to register
|
||||
|
||||
@@ -112,7 +112,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||
vfree(fp);
|
||||
return NULL;
|
||||
}
|
||||
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||
fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 4,
|
||||
bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
|
||||
if (!fp->active) {
|
||||
vfree(fp);
|
||||
kfree(aux);
|
||||
|
||||
@@ -949,7 +949,7 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
|
||||
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
|
||||
bpf_prog_inc_misses_counter(prog);
|
||||
if (prog->aux->recursion_detected)
|
||||
prog->aux->recursion_detected(prog);
|
||||
@@ -993,7 +993,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
update_prog_stats(prog, start);
|
||||
this_cpu_dec(*(prog->active));
|
||||
bpf_prog_put_recursion_context(prog);
|
||||
rcu_read_unlock_migrate();
|
||||
}
|
||||
|
||||
@@ -1029,7 +1029,7 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
|
||||
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
|
||||
bpf_prog_inc_misses_counter(prog);
|
||||
if (prog->aux->recursion_detected)
|
||||
prog->aux->recursion_detected(prog);
|
||||
@@ -1044,7 +1044,7 @@ void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
update_prog_stats(prog, start);
|
||||
this_cpu_dec(*(prog->active));
|
||||
bpf_prog_put_recursion_context(prog);
|
||||
migrate_enable();
|
||||
rcu_read_unlock_trace();
|
||||
}
|
||||
|
||||
@@ -2063,7 +2063,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
|
||||
struct bpf_trace_run_ctx run_ctx;
|
||||
|
||||
cant_sleep();
|
||||
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
|
||||
if (unlikely(!bpf_prog_get_recursion_context(prog))) {
|
||||
bpf_prog_inc_misses_counter(prog);
|
||||
goto out;
|
||||
}
|
||||
@@ -2077,7 +2077,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
|
||||
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
out:
|
||||
this_cpu_dec(*(prog->active));
|
||||
bpf_prog_put_recursion_context(prog);
|
||||
}
|
||||
|
||||
#define UNPACK(...) __VA_ARGS__
|
||||
|
||||
Reference in New Issue
Block a user