mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
The control knobs set before loading BPF programs should be declared as
'const volatile' so that it can be optimized by the BPF core.
Committer testing:
root@x1:~# perf ftrace latency --use-bpf -T schedule
^C# DURATION | COUNT | GRAPH |
0 - 1 us | 0 | |
1 - 2 us | 0 | |
2 - 4 us | 0 | |
4 - 8 us | 0 | |
8 - 16 us | 1 | |
16 - 32 us | 5 | |
32 - 64 us | 2 | |
64 - 128 us | 6 | |
128 - 256 us | 7 | |
256 - 512 us | 5 | |
512 - 1024 us | 22 | # |
1 - 2 ms | 36 | ## |
2 - 4 ms | 68 | ##### |
4 - 8 ms | 22 | # |
8 - 16 ms | 91 | ####### |
16 - 32 ms | 11 | |
32 - 64 ms | 26 | ## |
64 - 128 ms | 213 | ################# |
128 - 256 ms | 19 | # |
256 - 512 ms | 14 | # |
512 - 1024 ms | 5 | |
1 - ... s | 8 | |
root@x1:~#
root@x1:~# perf trace -e bpf perf ftrace latency --use-bpf -T schedule
0.000 ( 0.015 ms): perf/2944525 bpf(cmd: 36, uattr: 0x7ffe80de7b40, size: 8) = -1 EOPNOTSUPP (Operation not supported)
0.025 ( 0.102 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7870, size: 148) = 8
0.136 ( 0.026 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7930, size: 148) = 8
0.174 ( 0.026 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de77e0, size: 148) = 8
0.205 ( 0.010 ms): perf/2944525 bpf(uattr: 0x7ffe80de7990, size: 80) = 8
0.227 ( 0.011 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7810, size: 40) = 8
0.244 ( 0.004 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7880, size: 40) = 8
0.257 ( 0.006 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7660, size: 40) = 8
0.265 ( 0.058 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7730, size: 148) = 9
0.330 ( 0.004 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de78e0, size: 40) = 8
0.337 ( 0.003 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7890, size: 40) = 8
0.343 ( 0.004 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7880, size: 40) = 8
0.349 ( 0.003 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de78b0, size: 40) = 8
0.355 ( 0.004 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7890, size: 40) = 8
0.361 ( 0.003 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de78b0, size: 40) = 8
0.367 ( 0.003 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7880, size: 40) = 8
0.373 ( 0.014 ms): perf/2944525 bpf(cmd: BTF_LOAD, uattr: 0x7ffe80de7a00, size: 40) = 8
0.390 ( 0.358 ms): perf/2944525 bpf(uattr: 0x7ffe80de7950, size: 80) = 9
0.763 ( 0.014 ms): perf/2944525 bpf(uattr: 0x7ffe80de7950, size: 80) = 9
0.783 ( 0.011 ms): perf/2944525 bpf(uattr: 0x7ffe80de7950, size: 80) = 9
0.798 ( 0.017 ms): perf/2944525 bpf(uattr: 0x7ffe80de7950, size: 80) = 9
0.819 ( 0.003 ms): perf/2944525 bpf(uattr: 0x7ffe80de7700, size: 80) = 9
0.824 ( 0.047 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de76c0, size: 148) = 10
0.878 ( 0.008 ms): perf/2944525 bpf(uattr: 0x7ffe80de7950, size: 80) = 9
0.891 ( 0.014 ms): perf/2944525 bpf(cmd: MAP_UPDATE_ELEM, uattr: 0x7ffe80de79e0, size: 32) = 0
0.910 ( 0.103 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7880, size: 148) = 9
1.016 ( 0.143 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7880, size: 148) = 10
3.777 ( 0.068 ms): perf/2944525 bpf(cmd: PROG_LOAD, uattr: 0x7ffe80de7570, size: 148) = 12
3.848 ( 0.003 ms): perf/2944525 bpf(cmd: LINK_CREATE, uattr: 0x7ffe80de7550, size: 64) = -1 EBADF (Bad file descriptor)
3.859 ( 0.006 ms): perf/2944525 bpf(cmd: LINK_CREATE, uattr: 0x7ffe80de77c0, size: 64) = 12
6.504 ( 0.010 ms): perf/2944525 bpf(cmd: LINK_CREATE, uattr: 0x7ffe80de77c0, size: 64) = 14
^C# DURATION | COUNT | GRAPH |
0 - 1 us | 0 | |
1 - 2 us | 0 | |
2 - 4 us | 1 | |
4 - 8 us | 3 | |
8 - 16 us | 3 | |
16 - 32 us | 11 | |
32 - 64 us | 9 | |
64 - 128 us | 17 | |
128 - 256 us | 30 | # |
256 - 512 us | 20 | |
512 - 1024 us | 42 | # |
1 - 2 ms | 151 | ###### |
2 - 4 ms | 106 | #### |
4 - 8 ms | 18 | |
8 - 16 ms | 149 | ###### |
16 - 32 ms | 30 | # |
32 - 64 ms | 17 | |
64 - 128 ms | 360 | ############### |
128 - 256 ms | 52 | ## |
256 - 512 ms | 18 | |
512 - 1024 ms | 28 | # |
1 - ... s | 5 | |
root@x1:~#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20240902200515.2103769-3-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
118 lines
2.2 KiB
C
118 lines
2.2 KiB
C
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
|
// Copyright (c) 2021 Google
|
|
#include "vmlinux.h"
|
|
#include <bpf/bpf_helpers.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
|
|
// This should be in sync with "util/ftrace.h"
|
|
#define NUM_BUCKET 22
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u64));
|
|
__uint(value_size, sizeof(__u64));
|
|
__uint(max_entries, 10000);
|
|
} functime SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} cpu_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} task_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u64));
|
|
__uint(max_entries, NUM_BUCKET);
|
|
} latency SEC(".maps");
|
|
|
|
|
|
int enabled = 0;
|
|
|
|
const volatile int has_cpu = 0;
|
|
const volatile int has_task = 0;
|
|
const volatile int use_nsec = 0;
|
|
|
|
SEC("kprobe/func")
|
|
int BPF_PROG(func_begin)
|
|
{
|
|
__u64 key, now;
|
|
|
|
if (!enabled)
|
|
return 0;
|
|
|
|
key = bpf_get_current_pid_tgid();
|
|
|
|
if (has_cpu) {
|
|
__u32 cpu = bpf_get_smp_processor_id();
|
|
__u8 *ok;
|
|
|
|
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
if (has_task) {
|
|
__u32 pid = key & 0xffffffff;
|
|
__u8 *ok;
|
|
|
|
ok = bpf_map_lookup_elem(&task_filter, &pid);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
now = bpf_ktime_get_ns();
|
|
|
|
// overwrite timestamp for nested functions
|
|
bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
|
|
return 0;
|
|
}
|
|
|
|
SEC("kretprobe/func")
|
|
int BPF_PROG(func_end)
|
|
{
|
|
__u64 tid;
|
|
__u64 *start;
|
|
__u64 cmp_base = use_nsec ? 1 : 1000;
|
|
|
|
if (!enabled)
|
|
return 0;
|
|
|
|
tid = bpf_get_current_pid_tgid();
|
|
|
|
start = bpf_map_lookup_elem(&functime, &tid);
|
|
if (start) {
|
|
__s64 delta = bpf_ktime_get_ns() - *start;
|
|
__u32 key;
|
|
__u64 *hist;
|
|
|
|
bpf_map_delete_elem(&functime, &tid);
|
|
|
|
if (delta < 0)
|
|
return 0;
|
|
|
|
// calculate index using delta
|
|
for (key = 0; key < (NUM_BUCKET - 1); key++) {
|
|
if (delta < (cmp_base << key))
|
|
break;
|
|
}
|
|
|
|
hist = bpf_map_lookup_elem(&latency, &key);
|
|
if (!hist)
|
|
return 0;
|
|
|
|
*hist += 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|