mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 07:27:12 +08:00
mm/slab: allow freeing kmalloc_nolock()'d objects using kfree[_rcu]()
Slab objects that are allocated with kmalloc_nolock() must be freed using kfree_nolock() because only a subset of alloc hooks are called, since kmalloc_nolock() can't spin on a lock during allocation. This imposes a limitation: such objects cannot be freed with kfree_rcu(), forcing users to work around this limitation by calling call_rcu() with a callback that frees the object using kfree_nolock(). Remove this limitation by teaching kmemleak to gracefully ignore cases when kmemleak_free() or kmemleak_ignore() is called without a prior kmemleak_alloc(). Unlike kmemleak, kfence already handles this case, because, due to its design, only a subset of allocations are served from kfence. With this change, kfree() and kfree_rcu() can be used to free objects that are allocated using kmalloc_nolock(). Suggested-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20260210044642.139482-2-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
committed by
Vlastimil Babka
parent
a1e244a9f1
commit
c4d6d78298
@@ -1076,8 +1076,8 @@ static inline void rcu_read_unlock_migrate(void)
|
||||
* either fall back to use of call_rcu() or rearrange the structure to
|
||||
* position the rcu_head structure into the first 4096 bytes.
|
||||
*
|
||||
* The object to be freed can be allocated either by kmalloc() or
|
||||
* kmem_cache_alloc().
|
||||
* The object to be freed can be allocated either by kmalloc(),
|
||||
* kmalloc_nolock(), or kmem_cache_alloc().
|
||||
*
|
||||
* Note that the allowable offset might decrease in the future.
|
||||
*
|
||||
|
||||
@@ -837,13 +837,12 @@ static void delete_object_full(unsigned long ptr, unsigned int objflags)
|
||||
struct kmemleak_object *object;
|
||||
|
||||
object = find_and_remove_object(ptr, 0, objflags);
|
||||
if (!object) {
|
||||
#ifdef DEBUG
|
||||
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
|
||||
ptr);
|
||||
#endif
|
||||
if (!object)
|
||||
/*
|
||||
* kmalloc_nolock() -> kfree() calls kmemleak_free()
|
||||
* without kmemleak_alloc().
|
||||
*/
|
||||
return;
|
||||
}
|
||||
__delete_object(object);
|
||||
}
|
||||
|
||||
@@ -926,13 +925,12 @@ static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
|
||||
struct kmemleak_object *object;
|
||||
|
||||
object = __find_and_get_object(ptr, 0, objflags);
|
||||
if (!object) {
|
||||
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
|
||||
ptr,
|
||||
(color == KMEMLEAK_GREY) ? "Grey" :
|
||||
(color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
|
||||
if (!object)
|
||||
/*
|
||||
* kmalloc_nolock() -> kfree_rcu() calls kmemleak_ignore()
|
||||
* without kmemleak_alloc().
|
||||
*/
|
||||
return;
|
||||
}
|
||||
paint_it(object, color);
|
||||
put_object(object);
|
||||
}
|
||||
|
||||
21
mm/slub.c
21
mm/slub.c
@@ -2585,6 +2585,24 @@ struct rcu_delayed_free {
|
||||
* Returns true if freeing of the object can proceed, false if its reuse
|
||||
* was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
|
||||
* to KFENCE.
|
||||
*
|
||||
* For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
|
||||
* are invoked, so some free hooks must handle asymmetric hook calls.
|
||||
*
|
||||
* Alloc hooks called for kmalloc_nolock():
|
||||
* - kmsan_slab_alloc()
|
||||
* - kasan_slab_alloc()
|
||||
* - memcg_slab_post_alloc_hook()
|
||||
* - alloc_tagging_slab_alloc_hook()
|
||||
*
|
||||
* Free hooks that must handle missing corresponding alloc hooks:
|
||||
* - kmemleak_free_recursive()
|
||||
* - kfence_free()
|
||||
*
|
||||
* Free hooks that have no alloc hook counterpart, and thus safe to call:
|
||||
* - debug_check_no_locks_freed()
|
||||
* - debug_check_no_obj_freed()
|
||||
* - __kcsan_check_access()
|
||||
*/
|
||||
static __always_inline
|
||||
bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
|
||||
@@ -6394,7 +6412,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
|
||||
|
||||
/**
|
||||
* kfree - free previously allocated memory
|
||||
* @object: pointer returned by kmalloc() or kmem_cache_alloc()
|
||||
* @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
|
||||
*
|
||||
* If @object is NULL, no operation is performed.
|
||||
*/
|
||||
@@ -6413,6 +6431,7 @@ void kfree(const void *object)
|
||||
page = virt_to_page(object);
|
||||
slab = page_slab(page);
|
||||
if (!slab) {
|
||||
/* kmalloc_nolock() doesn't support large kmalloc */
|
||||
free_large_kmalloc(page, (void *)object);
|
||||
return;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user