mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
bpf: Switch to bpf_selem_unlink_nofail in bpf_local_storage_{map_free, destroy}
Take care of rqspinlock error in bpf_local_storage_{map_free, destroy}()
properly by switching to bpf_selem_unlink_nofail().
Both functions iterate their own RCU-protected list of selems and call
bpf_selem_unlink_nofail(). In map_free(), to prevent infinite loop when
both map_free() and destroy() fail to remove a selem from b->list
(extremely unlikely), switch to hlist_for_each_entry_rcu(). In destroy(),
also switch to hlist_for_each_entry_rcu() since we no longer iterate
local_storage->list under local_storage->lock.
bpf_selem_unlink() now becomes dedicated to helpers and syscalls paths
so reuse_now should always be false. Remove it from the argument and
hardcode it.
Acked-by: Alexei Starovoitov <ast@kernel.org>
Co-developed-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Amery Hung <ameryhung@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://patch.msgid.link/20260205222916.1788211-12-ameryhung@gmail.com
This commit is contained in:
committed by
Martin KaFai Lau
parent
5d800f87d0
commit
0be08389c7
@@ -171,7 +171,7 @@ bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
|
||||
return SDATA(selem);
|
||||
}
|
||||
|
||||
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
|
||||
u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
|
||||
|
||||
void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
struct bpf_local_storage_cache *cache);
|
||||
@@ -184,7 +184,7 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
||||
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_elem *selem);
|
||||
|
||||
int bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
|
||||
int bpf_selem_unlink(struct bpf_local_storage_elem *selem);
|
||||
|
||||
int bpf_selem_link_map(struct bpf_local_storage_map *smap,
|
||||
struct bpf_local_storage *local_storage,
|
||||
|
||||
@@ -89,7 +89,7 @@ static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
return bpf_selem_unlink(SELEM(sdata), false);
|
||||
return bpf_selem_unlink(SELEM(sdata));
|
||||
}
|
||||
|
||||
static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
|
||||
@@ -110,7 +110,7 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
return bpf_selem_unlink(SELEM(sdata), false);
|
||||
return bpf_selem_unlink(SELEM(sdata));
|
||||
}
|
||||
|
||||
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
|
||||
@@ -377,7 +377,11 @@ static void bpf_selem_link_map_nolock(struct bpf_local_storage_map_bucket *b,
|
||||
hlist_add_head_rcu(&selem->map_node, &b->list);
|
||||
}
|
||||
|
||||
int bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
|
||||
/*
|
||||
* Unlink an selem from map and local storage with lock held.
|
||||
* This is the common path used by local storages to delete an selem.
|
||||
*/
|
||||
int bpf_selem_unlink(struct bpf_local_storage_elem *selem)
|
||||
{
|
||||
struct bpf_local_storage *local_storage;
|
||||
bool free_local_storage = false;
|
||||
@@ -411,10 +415,10 @@ int bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
|
||||
out:
|
||||
raw_res_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
bpf_selem_free_list(&selem_free_list, reuse_now);
|
||||
bpf_selem_free_list(&selem_free_list, false);
|
||||
|
||||
if (free_local_storage)
|
||||
bpf_local_storage_free(local_storage, reuse_now);
|
||||
bpf_local_storage_free(local_storage, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -804,13 +808,13 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
||||
/*
|
||||
* Destroy local storage when the owner is going away. Caller must uncharge memory
|
||||
* if memory charging is used.
|
||||
*/
|
||||
u32 bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
||||
{
|
||||
struct bpf_local_storage_elem *selem;
|
||||
bool free_storage = false;
|
||||
HLIST_HEAD(free_selem_list);
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
|
||||
/* Neither the bpf_prog nor the bpf_map's syscall
|
||||
* could be modifying the local_storage->list now.
|
||||
@@ -821,32 +825,20 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
||||
* when unlinking elem from the local_storage->list and
|
||||
* the map's bucket->list.
|
||||
*/
|
||||
raw_res_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
|
||||
/* Always unlink from map before unlinking from
|
||||
* local_storage.
|
||||
*/
|
||||
bpf_selem_unlink_map(selem);
|
||||
/* If local_storage list has only one element, the
|
||||
* bpf_selem_unlink_storage_nolock() will return true.
|
||||
* Otherwise, it will return false. The current loop iteration
|
||||
* intends to remove all local storage. So the last iteration
|
||||
* of the loop will set the free_cgroup_storage to true.
|
||||
*/
|
||||
free_storage = bpf_selem_unlink_storage_nolock(
|
||||
local_storage, selem, &free_selem_list);
|
||||
}
|
||||
raw_res_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
bpf_selem_free_list(&free_selem_list, true);
|
||||
|
||||
if (free_storage)
|
||||
bpf_local_storage_free(local_storage, true);
|
||||
hlist_for_each_entry_rcu(selem, &local_storage->list, snode)
|
||||
bpf_selem_unlink_nofail(selem, NULL);
|
||||
|
||||
if (!refcount_dec_and_test(&local_storage->owner_refcnt)) {
|
||||
while (refcount_read(&local_storage->owner_refcnt))
|
||||
cpu_relax();
|
||||
/*
|
||||
* Paired with refcount_dec() in bpf_selem_unlink_nofail()
|
||||
* to make sure destroy() sees the correct local_storage->mem_charge.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
return local_storage->mem_charge;
|
||||
}
|
||||
|
||||
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
|
||||
@@ -940,11 +932,14 @@ void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
|
||||
rcu_read_lock();
|
||||
/* No one is adding to b->list now */
|
||||
while ((selem = hlist_entry_safe(
|
||||
rcu_dereference_raw(hlist_first_rcu(&b->list)),
|
||||
struct bpf_local_storage_elem, map_node))) {
|
||||
bpf_selem_unlink(selem, true);
|
||||
cond_resched_rcu();
|
||||
restart:
|
||||
hlist_for_each_entry_rcu(selem, &b->list, map_node) {
|
||||
bpf_selem_unlink_nofail(selem, b);
|
||||
|
||||
if (need_resched()) {
|
||||
cond_resched_rcu();
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
return bpf_selem_unlink(SELEM(sdata), false);
|
||||
return bpf_selem_unlink(SELEM(sdata));
|
||||
}
|
||||
|
||||
static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
|
||||
@@ -40,20 +40,23 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
return bpf_selem_unlink(SELEM(sdata), false);
|
||||
return bpf_selem_unlink(SELEM(sdata));
|
||||
}
|
||||
|
||||
/* Called by __sk_destruct() & bpf_sk_storage_clone() */
|
||||
void bpf_sk_storage_free(struct sock *sk)
|
||||
{
|
||||
struct bpf_local_storage *sk_storage;
|
||||
u32 uncharge;
|
||||
|
||||
rcu_read_lock_dont_migrate();
|
||||
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
||||
if (!sk_storage)
|
||||
goto out;
|
||||
|
||||
bpf_local_storage_destroy(sk_storage);
|
||||
uncharge = bpf_local_storage_destroy(sk_storage);
|
||||
if (uncharge)
|
||||
atomic_sub(uncharge, &sk->sk_omem_alloc);
|
||||
out:
|
||||
rcu_read_unlock_migrate();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user