mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00

This is to filter lock contention from specific slab objects only. Like in the lock symbol output, we can use '&' prefix to filter slab object names. root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1 contended total wait max wait avg wait address symbol 3 14.99 us 14.44 us 5.00 us ffffffff851c0940 pack_mutex (mutex) 2 2.75 us 2.56 us 1.38 us ffff98d7031fb498 &task_struct (mutex) 4 1.42 us 557 ns 355 ns ffff98d706311400 &kmalloc-cg-512 (mutex) 2 953 ns 714 ns 476 ns ffffffff851c3620 delayed_uprobe_lock (mutex) 1 929 ns 929 ns 929 ns ffff98d7031fb538 &task_struct (mutex) 3 561 ns 210 ns 187 ns ffffffff84a8b3a0 text_mutex (mutex) 1 479 ns 479 ns 479 ns ffffffff851b4cf8 tracepoint_srcu_srcu_usage (mutex) 2 320 ns 195 ns 160 ns ffffffff851cf840 pcpu_alloc_mutex (mutex) 1 212 ns 212 ns 212 ns ffff98d7031784d8 &signal_cache (mutex) 1 177 ns 177 ns 177 ns ffffffff851b4c28 tracepoint_srcu_srcu_usage (mutex) With the filter, it can show contentions from the task_struct only. root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl -L '&task_struct' sleep 1 contended total wait max wait avg wait address symbol 2 1.97 us 1.71 us 987 ns ffff98d7032fd658 &task_struct (mutex) 1 1.20 us 1.20 us 1.20 us ffff98d7032fd6f8 &task_struct (mutex) It can work with other aggregation mode: root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -ab -L '&task_struct' sleep 1 contended total wait max wait avg wait type caller 1 25.10 us 25.10 us 25.10 us mutex perf_event_exit_task+0x39 1 21.60 us 21.60 us 21.60 us mutex futex_exit_release+0x21 1 5.56 us 5.56 us 5.56 us mutex futex_exec_release+0x21 Committer testing: root@number:~# perf lock con -abl sleep 1 contended total wait max wait avg wait address symbol 1 20.80 us 20.80 us 20.80 us ffff9d417fbd65d0 (spinlock) 8 12.85 us 2.41 us 1.61 us ffff9d415eeb6a40 rq_lock (spinlock) 1 2.55 us 2.55 us 2.55 us ffff9d415f636a40 rq_lock (spinlock) 7 1.92 us 840 ns 274 ns ffff9d39c2cbc8c4 (spinlock) 1 1.23 us 1.23 us 1.23 us ffff9d415fb36a40 rq_lock (spinlock) 2 928 ns 738 ns 464 ns ffff9d39c1fa6660 &kmalloc-rnd-14-192 (rwlock) 4 788 ns 252 ns 197 ns ffffffffb8608a80 jiffies_lock (spinlock) 1 304 ns 304 ns 304 ns ffff9d39c2c979c4 (spinlock) 1 216 ns 216 ns 216 ns ffff9d3a0225c660 &kmalloc-rnd-14-192 (rwlock) 1 89 ns 89 ns 89 ns ffff9d3a0adbf3e0 &kmalloc-rnd-14-192 (rwlock) 1 61 ns 61 ns 61 ns ffff9d415f9b6a40 rq_lock (spinlock) root@number:~# uname -r 6.13.0-rc2 root@number:~# Signed-off-by: Namhyung Kim <namhyung@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Chun-Tse Shao <ctshao@google.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kees Cook <kees@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Link: https://lore.kernel.org/r/20241220060009.507297-5-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
656 lines
15 KiB
C
656 lines
15 KiB
C
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
|
// Copyright (c) 2022 Google
|
|
#include "vmlinux.h"
|
|
#include <bpf/bpf_helpers.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
#include <bpf/bpf_core_read.h>
|
|
#include <asm-generic/errno-base.h>
|
|
|
|
#include "lock_data.h"
|
|
|
|
/* for collect_lock_syms(). 4096 was rejected by the verifier */
|
|
#define MAX_CPUS 1024
|
|
|
|
/* lock contention flags from include/trace/events/lock.h */
|
|
#define LCB_F_SPIN (1U << 0)
|
|
#define LCB_F_READ (1U << 1)
|
|
#define LCB_F_WRITE (1U << 2)
|
|
#define LCB_F_RT (1U << 3)
|
|
#define LCB_F_PERCPU (1U << 4)
|
|
#define LCB_F_MUTEX (1U << 5)
|
|
|
|
/* callstack storage */
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u64));
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
} stacks SEC(".maps");
|
|
|
|
/* maintain timestamp at the beginning of contention */
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__type(key, int);
|
|
__type(value, struct tstamp_data);
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
} tstamp SEC(".maps");
|
|
|
|
/* maintain per-CPU timestamp at the beginning of contention */
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(struct tstamp_data));
|
|
__uint(max_entries, 1);
|
|
} tstamp_cpu SEC(".maps");
|
|
|
|
/* actual lock contention statistics */
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(struct contention_key));
|
|
__uint(value_size, sizeof(struct contention_data));
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
} lock_stat SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(struct contention_task_data));
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
} task_data SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u64));
|
|
__uint(value_size, sizeof(__u32));
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
} lock_syms SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} cpu_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} task_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u32));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} type_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u64));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} addr_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(__u64));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} cgroup_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(long));
|
|
__uint(value_size, sizeof(__u8));
|
|
__uint(max_entries, 1);
|
|
} slab_filter SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(key_size, sizeof(long));
|
|
__uint(value_size, sizeof(struct slab_cache_data));
|
|
__uint(max_entries, 1);
|
|
} slab_caches SEC(".maps");
|
|
|
|
struct rw_semaphore___old {
|
|
struct task_struct *owner;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
struct rw_semaphore___new {
|
|
atomic_long_t owner;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
struct mm_struct___old {
|
|
struct rw_semaphore mmap_sem;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
struct mm_struct___new {
|
|
struct rw_semaphore mmap_lock;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym __weak;
|
|
|
|
/* control flags */
|
|
const volatile int has_cpu;
|
|
const volatile int has_task;
|
|
const volatile int has_type;
|
|
const volatile int has_addr;
|
|
const volatile int has_cgroup;
|
|
const volatile int has_slab;
|
|
const volatile int needs_callstack;
|
|
const volatile int stack_skip;
|
|
const volatile int lock_owner;
|
|
const volatile int use_cgroup_v2;
|
|
|
|
/* determine the key of lock stat */
|
|
const volatile int aggr_mode;
|
|
|
|
int enabled;
|
|
|
|
int perf_subsys_id = -1;
|
|
|
|
__u64 end_ts;
|
|
|
|
__u32 slab_cache_id;
|
|
|
|
/* error stat */
|
|
int task_fail;
|
|
int stack_fail;
|
|
int time_fail;
|
|
int data_fail;
|
|
|
|
int task_map_full;
|
|
int data_map_full;
|
|
|
|
static inline __u64 get_current_cgroup_id(void)
|
|
{
|
|
struct task_struct *task;
|
|
struct cgroup *cgrp;
|
|
|
|
if (use_cgroup_v2)
|
|
return bpf_get_current_cgroup_id();
|
|
|
|
task = bpf_get_current_task_btf();
|
|
|
|
if (perf_subsys_id == -1) {
|
|
#if __has_builtin(__builtin_preserve_enum_value)
|
|
perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
|
|
perf_event_cgrp_id);
|
|
#else
|
|
perf_subsys_id = perf_event_cgrp_id;
|
|
#endif
|
|
}
|
|
|
|
cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
|
|
return BPF_CORE_READ(cgrp, kn, id);
|
|
}
|
|
|
|
static inline int can_record(u64 *ctx)
|
|
{
|
|
if (has_cpu) {
|
|
__u32 cpu = bpf_get_smp_processor_id();
|
|
__u8 *ok;
|
|
|
|
ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
if (has_task) {
|
|
__u8 *ok;
|
|
__u32 pid = bpf_get_current_pid_tgid();
|
|
|
|
ok = bpf_map_lookup_elem(&task_filter, &pid);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
if (has_type) {
|
|
__u8 *ok;
|
|
__u32 flags = (__u32)ctx[1];
|
|
|
|
ok = bpf_map_lookup_elem(&type_filter, &flags);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
if (has_addr) {
|
|
__u8 *ok;
|
|
__u64 addr = ctx[0];
|
|
|
|
ok = bpf_map_lookup_elem(&addr_filter, &addr);
|
|
if (!ok && !has_slab)
|
|
return 0;
|
|
}
|
|
|
|
if (has_cgroup) {
|
|
__u8 *ok;
|
|
__u64 cgrp = get_current_cgroup_id();
|
|
|
|
ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
if (has_slab && bpf_get_kmem_cache) {
|
|
__u8 *ok;
|
|
__u64 addr = ctx[0];
|
|
long kmem_cache_addr;
|
|
|
|
kmem_cache_addr = (long)bpf_get_kmem_cache(addr);
|
|
ok = bpf_map_lookup_elem(&slab_filter, &kmem_cache_addr);
|
|
if (!ok)
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static inline int update_task_data(struct task_struct *task)
|
|
{
|
|
struct contention_task_data *p;
|
|
int pid, err;
|
|
|
|
err = bpf_core_read(&pid, sizeof(pid), &task->pid);
|
|
if (err)
|
|
return -1;
|
|
|
|
p = bpf_map_lookup_elem(&task_data, &pid);
|
|
if (p == NULL && !task_map_full) {
|
|
struct contention_task_data data = {};
|
|
|
|
BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
|
|
if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG)
|
|
task_map_full = 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifndef __has_builtin
|
|
# define __has_builtin(x) 0
|
|
#endif
|
|
|
|
static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)
|
|
{
|
|
struct task_struct *task;
|
|
__u64 owner = 0;
|
|
|
|
if (flags & LCB_F_MUTEX) {
|
|
struct mutex *mutex = (void *)lock;
|
|
owner = BPF_CORE_READ(mutex, owner.counter);
|
|
} else if (flags == LCB_F_READ || flags == LCB_F_WRITE) {
|
|
/*
|
|
* Support for the BPF_TYPE_MATCHES argument to the
|
|
* __builtin_preserve_type_info builtin was added at some point during
|
|
* development of clang 15 and it's what is needed for
|
|
* bpf_core_type_matches.
|
|
*/
|
|
#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
|
|
if (bpf_core_type_matches(struct rw_semaphore___old)) {
|
|
struct rw_semaphore___old *rwsem = (void *)lock;
|
|
owner = (unsigned long)BPF_CORE_READ(rwsem, owner);
|
|
} else if (bpf_core_type_matches(struct rw_semaphore___new)) {
|
|
struct rw_semaphore___new *rwsem = (void *)lock;
|
|
owner = BPF_CORE_READ(rwsem, owner.counter);
|
|
}
|
|
#else
|
|
/* assume new struct */
|
|
struct rw_semaphore *rwsem = (void *)lock;
|
|
owner = BPF_CORE_READ(rwsem, owner.counter);
|
|
#endif
|
|
}
|
|
|
|
if (!owner)
|
|
return NULL;
|
|
|
|
task = (void *)(owner & ~7UL);
|
|
return task;
|
|
}
|
|
|
|
static inline __u32 check_lock_type(__u64 lock, __u32 flags)
|
|
{
|
|
struct task_struct *curr;
|
|
struct mm_struct___old *mm_old;
|
|
struct mm_struct___new *mm_new;
|
|
struct sighand_struct *sighand;
|
|
|
|
switch (flags) {
|
|
case LCB_F_READ: /* rwsem */
|
|
case LCB_F_WRITE:
|
|
curr = bpf_get_current_task_btf();
|
|
if (curr->mm == NULL)
|
|
break;
|
|
mm_new = (void *)curr->mm;
|
|
if (bpf_core_field_exists(mm_new->mmap_lock)) {
|
|
if (&mm_new->mmap_lock == (void *)lock)
|
|
return LCD_F_MMAP_LOCK;
|
|
break;
|
|
}
|
|
mm_old = (void *)curr->mm;
|
|
if (bpf_core_field_exists(mm_old->mmap_sem)) {
|
|
if (&mm_old->mmap_sem == (void *)lock)
|
|
return LCD_F_MMAP_LOCK;
|
|
}
|
|
break;
|
|
case LCB_F_SPIN: /* spinlock */
|
|
curr = bpf_get_current_task_btf();
|
|
sighand = curr->sighand;
|
|
|
|
if (sighand && &sighand->siglock == (void *)lock)
|
|
return LCD_F_SIGHAND_LOCK;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
|
|
{
|
|
__u32 pid;
|
|
struct tstamp_data *pelem;
|
|
|
|
/* Use per-cpu array map for spinlock and rwlock */
|
|
if ((flags & (LCB_F_SPIN | LCB_F_MUTEX)) == LCB_F_SPIN) {
|
|
__u32 idx = 0;
|
|
|
|
pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
|
|
/* Do not update the element for nested locks */
|
|
if (pelem && pelem->lock)
|
|
pelem = NULL;
|
|
return pelem;
|
|
}
|
|
|
|
pid = bpf_get_current_pid_tgid();
|
|
pelem = bpf_map_lookup_elem(&tstamp, &pid);
|
|
/* Do not update the element for nested locks */
|
|
if (pelem && pelem->lock)
|
|
return NULL;
|
|
|
|
if (pelem == NULL) {
|
|
struct tstamp_data zero = {};
|
|
|
|
if (bpf_map_update_elem(&tstamp, &pid, &zero, BPF_NOEXIST) < 0) {
|
|
__sync_fetch_and_add(&task_fail, 1);
|
|
return NULL;
|
|
}
|
|
|
|
pelem = bpf_map_lookup_elem(&tstamp, &pid);
|
|
if (pelem == NULL) {
|
|
__sync_fetch_and_add(&task_fail, 1);
|
|
return NULL;
|
|
}
|
|
}
|
|
return pelem;
|
|
}
|
|
|
|
SEC("tp_btf/contention_begin")
|
|
int contention_begin(u64 *ctx)
|
|
{
|
|
struct tstamp_data *pelem;
|
|
|
|
if (!enabled || !can_record(ctx))
|
|
return 0;
|
|
|
|
pelem = get_tstamp_elem(ctx[1]);
|
|
if (pelem == NULL)
|
|
return 0;
|
|
|
|
pelem->timestamp = bpf_ktime_get_ns();
|
|
pelem->lock = (__u64)ctx[0];
|
|
pelem->flags = (__u32)ctx[1];
|
|
|
|
if (needs_callstack) {
|
|
pelem->stack_id = bpf_get_stackid(ctx, &stacks,
|
|
BPF_F_FAST_STACK_CMP | stack_skip);
|
|
if (pelem->stack_id < 0)
|
|
__sync_fetch_and_add(&stack_fail, 1);
|
|
} else if (aggr_mode == LOCK_AGGR_TASK) {
|
|
struct task_struct *task;
|
|
|
|
if (lock_owner) {
|
|
task = get_lock_owner(pelem->lock, pelem->flags);
|
|
|
|
/* The flags is not used anymore. Pass the owner pid. */
|
|
if (task)
|
|
pelem->flags = BPF_CORE_READ(task, pid);
|
|
else
|
|
pelem->flags = -1U;
|
|
|
|
} else {
|
|
task = bpf_get_current_task_btf();
|
|
}
|
|
|
|
if (task) {
|
|
if (update_task_data(task) < 0 && lock_owner)
|
|
pelem->flags = -1U;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/contention_end")
|
|
int contention_end(u64 *ctx)
|
|
{
|
|
__u32 pid = 0, idx = 0;
|
|
struct tstamp_data *pelem;
|
|
struct contention_key key = {};
|
|
struct contention_data *data;
|
|
__u64 duration;
|
|
bool need_delete = false;
|
|
|
|
if (!enabled)
|
|
return 0;
|
|
|
|
/*
|
|
* For spinlock and rwlock, it needs to get the timestamp for the
|
|
* per-cpu map. However, contention_end does not have the flags
|
|
* so it cannot know whether it reads percpu or hash map.
|
|
*
|
|
* Try per-cpu map first and check if there's active contention.
|
|
* If it is, do not read hash map because it cannot go to sleeping
|
|
* locks before releasing the spinning locks.
|
|
*/
|
|
pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
|
|
if (pelem && pelem->lock) {
|
|
if (pelem->lock != ctx[0])
|
|
return 0;
|
|
} else {
|
|
pid = bpf_get_current_pid_tgid();
|
|
pelem = bpf_map_lookup_elem(&tstamp, &pid);
|
|
if (!pelem || pelem->lock != ctx[0])
|
|
return 0;
|
|
need_delete = true;
|
|
}
|
|
|
|
duration = bpf_ktime_get_ns() - pelem->timestamp;
|
|
if ((__s64)duration < 0) {
|
|
__sync_fetch_and_add(&time_fail, 1);
|
|
goto out;
|
|
}
|
|
|
|
switch (aggr_mode) {
|
|
case LOCK_AGGR_CALLER:
|
|
key.stack_id = pelem->stack_id;
|
|
break;
|
|
case LOCK_AGGR_TASK:
|
|
if (lock_owner)
|
|
key.pid = pelem->flags;
|
|
else {
|
|
if (!need_delete)
|
|
pid = bpf_get_current_pid_tgid();
|
|
key.pid = pid;
|
|
}
|
|
if (needs_callstack)
|
|
key.stack_id = pelem->stack_id;
|
|
break;
|
|
case LOCK_AGGR_ADDR:
|
|
key.lock_addr_or_cgroup = pelem->lock;
|
|
if (needs_callstack)
|
|
key.stack_id = pelem->stack_id;
|
|
break;
|
|
case LOCK_AGGR_CGROUP:
|
|
key.lock_addr_or_cgroup = get_current_cgroup_id();
|
|
break;
|
|
default:
|
|
/* should not happen */
|
|
return 0;
|
|
}
|
|
|
|
data = bpf_map_lookup_elem(&lock_stat, &key);
|
|
if (!data) {
|
|
if (data_map_full) {
|
|
__sync_fetch_and_add(&data_fail, 1);
|
|
goto out;
|
|
}
|
|
|
|
struct contention_data first = {
|
|
.total_time = duration,
|
|
.max_time = duration,
|
|
.min_time = duration,
|
|
.count = 1,
|
|
.flags = pelem->flags,
|
|
};
|
|
int err;
|
|
|
|
if (aggr_mode == LOCK_AGGR_ADDR) {
|
|
first.flags |= check_lock_type(pelem->lock,
|
|
pelem->flags & LCB_F_TYPE_MASK);
|
|
|
|
/* Check if it's from a slab object */
|
|
if (bpf_get_kmem_cache) {
|
|
struct kmem_cache *s;
|
|
struct slab_cache_data *d;
|
|
|
|
s = bpf_get_kmem_cache(pelem->lock);
|
|
if (s != NULL) {
|
|
/*
|
|
* Save the ID of the slab cache in the flags
|
|
* (instead of full address) to reduce the
|
|
* space in the contention_data.
|
|
*/
|
|
d = bpf_map_lookup_elem(&slab_caches, &s);
|
|
if (d != NULL)
|
|
first.flags |= d->id;
|
|
}
|
|
}
|
|
}
|
|
|
|
err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
|
|
if (err < 0) {
|
|
if (err == -EEXIST) {
|
|
/* it lost the race, try to get it again */
|
|
data = bpf_map_lookup_elem(&lock_stat, &key);
|
|
if (data != NULL)
|
|
goto found;
|
|
}
|
|
if (err == -E2BIG)
|
|
data_map_full = 1;
|
|
__sync_fetch_and_add(&data_fail, 1);
|
|
}
|
|
goto out;
|
|
}
|
|
|
|
found:
|
|
__sync_fetch_and_add(&data->total_time, duration);
|
|
__sync_fetch_and_add(&data->count, 1);
|
|
|
|
/* FIXME: need atomic operations */
|
|
if (data->max_time < duration)
|
|
data->max_time = duration;
|
|
if (data->min_time > duration)
|
|
data->min_time = duration;
|
|
|
|
out:
|
|
pelem->lock = 0;
|
|
if (need_delete)
|
|
bpf_map_delete_elem(&tstamp, &pid);
|
|
return 0;
|
|
}
|
|
|
|
extern struct rq runqueues __ksym;
|
|
|
|
struct rq___old {
|
|
raw_spinlock_t lock;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
struct rq___new {
|
|
raw_spinlock_t __lock;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
SEC("raw_tp/bpf_test_finish")
|
|
int BPF_PROG(collect_lock_syms)
|
|
{
|
|
__u64 lock_addr, lock_off;
|
|
__u32 lock_flag;
|
|
|
|
if (bpf_core_field_exists(struct rq___new, __lock))
|
|
lock_off = offsetof(struct rq___new, __lock);
|
|
else
|
|
lock_off = offsetof(struct rq___old, lock);
|
|
|
|
for (int i = 0; i < MAX_CPUS; i++) {
|
|
struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
|
|
|
|
if (rq == NULL)
|
|
break;
|
|
|
|
lock_addr = (__u64)(void *)rq + lock_off;
|
|
lock_flag = LOCK_CLASS_RQLOCK;
|
|
bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
SEC("raw_tp/bpf_test_finish")
|
|
int BPF_PROG(end_timestamp)
|
|
{
|
|
end_ts = bpf_ktime_get_ns();
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* bpf_iter__kmem_cache added recently so old kernels don't have it in the
|
|
* vmlinux.h. But we cannot add it here since it will cause a compiler error
|
|
* due to redefinition of the struct on later kernels.
|
|
*
|
|
* So it uses a CO-RE trick to access the member only if it has the type.
|
|
* This will support both old and new kernels without compiler errors.
|
|
*/
|
|
struct bpf_iter__kmem_cache___new {
|
|
struct kmem_cache *s;
|
|
} __attribute__((preserve_access_index));
|
|
|
|
SEC("iter/kmem_cache")
|
|
int slab_cache_iter(void *ctx)
|
|
{
|
|
struct kmem_cache *s = NULL;
|
|
struct slab_cache_data d;
|
|
const char *nameptr;
|
|
|
|
if (bpf_core_type_exists(struct bpf_iter__kmem_cache)) {
|
|
struct bpf_iter__kmem_cache___new *iter = ctx;
|
|
|
|
s = iter->s;
|
|
}
|
|
|
|
if (s == NULL)
|
|
return 0;
|
|
|
|
nameptr = s->name;
|
|
bpf_probe_read_kernel_str(d.name, sizeof(d.name), nameptr);
|
|
|
|
d.id = ++slab_cache_id << LCB_F_SLAB_ID_SHIFT;
|
|
if (d.id >= LCB_F_SLAB_ID_END)
|
|
return 0;
|
|
|
|
bpf_map_update_elem(&slab_caches, &s, &d, BPF_NOEXIST);
|
|
return 0;
|
|
}
|
|
|
|
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|