treewide: Replace kmalloc with kmalloc_obj for non-scalar types

This is the result of running the Coccinelle script from
scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to
avoid scalar types (which need careful case-by-case checking), and
instead replace kmalloc-family calls that allocate struct or union
object instances:

Single allocations:	kmalloc(sizeof(TYPE), ...)
are replaced with:	kmalloc_obj(TYPE, ...)

Array allocations:	kmalloc_array(COUNT, sizeof(TYPE), ...)
are replaced with:	kmalloc_objs(TYPE, COUNT, ...)

Flex array allocations:	kmalloc(struct_size(PTR, FAM, COUNT), ...)
are replaced with:	kmalloc_flex(*PTR, FAM, COUNT, ...)

(where TYPE may also be *VAR)

The resulting allocations no longer return "void *", instead returning
"TYPE *".

Signed-off-by: Kees Cook <kees@kernel.org>
This commit is contained in:
Kees Cook
2026-02-20 23:49:23 -08:00
parent d39a1d7486
commit 69050f8d6d
8016 changed files with 20055 additions and 20913 deletions

View File

@@ -669,8 +669,9 @@ static int __init alloc_mod_tags_mem(void)
return -ENOMEM;
}
vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
vm_module_tags->pages = kmalloc_objs(struct page *,
get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
GFP_KERNEL | __GFP_ZERO);
if (!vm_module_tags->pages) {
free_vm_area(vm_module_tags);
return -ENOMEM;

View File

@@ -454,7 +454,7 @@ static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
pr_devel("-->%s()\n", __func__);
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n0 = kzalloc_obj(struct assoc_array_node, GFP_KERNEL);
if (!new_n0)
return false;
@@ -536,11 +536,11 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n0 = kzalloc_obj(struct assoc_array_node, GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n1 = kzalloc_obj(struct assoc_array_node, GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
@@ -741,7 +741,7 @@ all_leaves_cluster_together:
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
@@ -832,7 +832,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
/* Create a new node now since we're going to need it anyway */
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n0 = kzalloc_obj(struct assoc_array_node, GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
@@ -848,8 +848,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
GFP_KERNEL);
new_s0 = kzalloc_flex(*new_s0, index_key, keylen, GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
@@ -898,8 +897,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
GFP_KERNEL);
new_s1 = kzalloc_flex(*new_s1, index_key, keylen, GFP_KERNEL);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
@@ -977,7 +975,7 @@ struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
*/
BUG_ON(assoc_array_ptr_is_meta(object));
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
edit = kzalloc_obj(struct assoc_array_edit, GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1089,7 +1087,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
edit = kzalloc_obj(struct assoc_array_edit, GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1206,7 +1204,8 @@ found_leaf:
node = parent;
/* Create a new node to collapse into */
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n0 = kzalloc_obj(struct assoc_array_node,
GFP_KERNEL);
if (!new_n0)
goto enomem;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
@@ -1281,7 +1280,7 @@ struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
if (!array->root)
return NULL;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
edit = kzalloc_obj(struct assoc_array_edit, GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1469,7 +1468,7 @@ int assoc_array_gc(struct assoc_array *array,
if (!array->root)
return 0;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
edit = kzalloc_obj(struct assoc_array_edit, GFP_KERNEL);
if (!edit)
return -ENOMEM;
edit->array = array;
@@ -1490,8 +1489,7 @@ descend:
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s = kmalloc(struct_size(new_s, index_key, keylen),
GFP_KERNEL);
new_s = kmalloc_flex(*new_s, index_key, keylen, GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
@@ -1505,7 +1503,7 @@ descend:
/* Duplicate the node at this position */
node = assoc_array_ptr_to_node(cursor);
new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
new_n = kzalloc_obj(struct assoc_array_node, GFP_KERNEL);
if (!new_n)
goto enomem;
pr_devel("dup node %p -> %p\n", node, new_n);

View File

@@ -1320,7 +1320,7 @@ struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
if (prim_poly == 0)
prim_poly = prim_poly_tab[m-min_m];
bch = kzalloc(sizeof(*bch), GFP_KERNEL);
bch = kzalloc_obj(*bch, GFP_KERNEL);
if (bch == NULL)
goto fail;

View File

@@ -31,7 +31,7 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
}
if (sizeof(spinlock_t) != 0) {
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
tlocks = kvmalloc_objs(spinlock_t, size, gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++) {

View File

@@ -193,7 +193,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
BUG_ON(range.start > range.stop);
cmod = kmalloc(sizeof(*cmod), GFP_KERNEL);
cmod = kmalloc_obj(*cmod, GFP_KERNEL);
if (unlikely(!cmod))
return -ENOMEM;
@@ -383,7 +383,7 @@ codetag_register_type(const struct codetag_type_desc *desc)
BUG_ON(desc->tag_size <= 0);
cttype = kzalloc(sizeof(*cttype), GFP_KERNEL);
cttype = kzalloc_obj(*cttype, GFP_KERNEL);
if (unlikely(!cttype))
return ERR_PTR(-ENOMEM);

View File

@@ -309,7 +309,7 @@ EXPORT_SYMBOL(irq_cpu_rmap_remove);
*/
int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
{
struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
struct irq_glue *glue = kzalloc_obj(*glue, GFP_KERNEL);
int rc;
if (!glue)

View File

@@ -245,12 +245,12 @@ struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
struct gf128mul_64k *t;
int i, j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
t = kzalloc_obj(*t, GFP_KERNEL);
if (!t)
goto out;
for (i = 0; i < 16; i++) {
t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
t->t[i] = kzalloc_obj(*t->t[i], GFP_KERNEL);
if (!t->t[i]) {
gf128mul_free_64k(t);
t = NULL;
@@ -326,7 +326,7 @@ struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
struct gf128mul_4k *t;
int j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
t = kzalloc_obj(*t, GFP_KERNEL);
if (!t)
goto out;

View File

@@ -372,7 +372,7 @@ mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
return -ENOMEM;
} else {
if (!ctx->next) {
ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL);
ctx->next = kzalloc_obj(*ctx, GFP_KERNEL);
if (!ctx->next)
return -ENOMEM;
}

View File

@@ -33,7 +33,7 @@ MPI mpi_alloc(unsigned nlimbs)
{
MPI a;
a = kmalloc(sizeof *a, GFP_KERNEL);
a = kmalloc_obj(*a, GFP_KERNEL);
if (!a)
return a;
@@ -93,14 +93,14 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; /* no need to do it */
if (a->d) {
p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
p = kzalloc_objs(mpi_limb_t, nlimbs, GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
kfree_sensitive(a->d);
a->d = p;
} else {
a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
a->d = kzalloc_objs(mpi_limb_t, nlimbs, GFP_KERNEL);
if (!a->d)
return -ENOMEM;
}

View File

@@ -139,11 +139,11 @@ int dhry(int n)
/* Initializations */
Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
Next_Ptr_Glob = (Rec_Pointer) kzalloc_obj(Rec_Type, GFP_ATOMIC);
if (!Next_Ptr_Glob)
return -ENOMEM;
Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
Ptr_Glob = (Rec_Pointer) kzalloc_obj(Rec_Type, GFP_ATOMIC);
if (!Ptr_Glob) {
kfree(Next_Ptr_Glob);
return -ENOMEM;

View File

@@ -105,7 +105,7 @@ int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
struct dim_irq_moder *moder;
int len;
dev->irq_moder = kzalloc(sizeof(*dev->irq_moder), GFP_KERNEL);
dev->irq_moder = kzalloc_obj(*dev->irq_moder, GFP_KERNEL);
if (!dev->irq_moder)
return -ENOMEM;

View File

@@ -1241,7 +1241,7 @@ static int ddebug_add_module(struct _ddebug_info *di, const char *modname)
return 0;
}
dt = kzalloc(sizeof(*dt), GFP_KERNEL);
dt = kzalloc_obj(*dt, GFP_KERNEL);
if (dt == NULL) {
pr_err("error adding module: %s\n", modname);
return -ENOMEM;

View File

@@ -80,7 +80,7 @@ static void populate_error_injection_list(struct error_injection_entry *start,
continue;
}
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
ent = kmalloc_obj(*ent, GFP_KERNEL);
if (!ent)
break;
ent->start_addr = entry;

View File

@@ -47,7 +47,7 @@ static cpumask_var_t *alloc_node_to_cpumask(void)
cpumask_var_t *masks;
int node;
masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
masks = kzalloc_objs(cpumask_var_t, nr_node_ids, GFP_KERNEL);
if (!masks)
return NULL;
@@ -320,10 +320,10 @@ static int alloc_cluster_groups(unsigned int ncpus,
goto no_cluster;
/* Allocate memory based on cluster number. */
clusters = kcalloc(ncluster, sizeof(*clusters), GFP_KERNEL);
clusters = kzalloc_objs(*clusters, ncluster, GFP_KERNEL);
if (!clusters)
goto no_cluster;
cluster_groups = kcalloc(ncluster, sizeof(struct node_groups), GFP_KERNEL);
cluster_groups = kzalloc_objs(struct node_groups, ncluster, GFP_KERNEL);
if (!cluster_groups)
goto fail_cluster_groups;
@@ -432,9 +432,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
return numgrps;
}
node_groups = kcalloc(nr_node_ids,
sizeof(struct node_groups),
GFP_KERNEL);
node_groups = kzalloc_objs(struct node_groups, nr_node_ids, GFP_KERNEL);
if (!node_groups)
return -ENOMEM;
@@ -508,7 +506,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
if (!node_to_cpumask)
goto fail_npresmsk;
masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
masks = kzalloc_objs(*masks, numgrps, GFP_KERNEL);
if (!masks)
goto fail_node_to_cpumask;
@@ -574,7 +572,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
if (numgrps == 0)
return NULL;
masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
masks = kzalloc_objs(*masks, numgrps, GFP_KERNEL);
if (!masks)
return NULL;

View File

@@ -417,7 +417,7 @@ next:
}
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
bitmap = kzalloc_obj(*bitmap, GFP_NOWAIT);
if (!bitmap)
goto alloc;
bitmap->bitmap[0] = tmp;
@@ -444,7 +444,7 @@ next:
} else {
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
bitmap = kzalloc_obj(*bitmap, GFP_NOWAIT);
if (!bitmap)
goto alloc;
__set_bit(bit, bitmap->bitmap);
@@ -465,7 +465,7 @@ out:
return xas.xa_index * IDA_BITMAP_BITS + bit;
alloc:
xas_unlock_irqrestore(&xas, flags);
alloc = kzalloc(sizeof(*bitmap), gfp);
alloc = kzalloc_obj(*bitmap, gfp);
if (!alloc)
return -ENOMEM;
xas_set(&xas, min / IDA_BITMAP_BITS);

View File

@@ -311,8 +311,7 @@ static inline int span_iteration_check(void) {return 0; }
static int interval_tree_test_init(void)
{
nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
GFP_KERNEL);
nodes = kmalloc_objs(struct interval_tree_node, nnodes, GFP_KERNEL);
if (!nodes)
return -ENOMEM;

View File

@@ -903,7 +903,7 @@ static int want_pages_array(struct page ***res, size_t size,
count = maxpages;
WARN_ON(!count); // caller should've prevented that
if (!*res) {
*res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
*res = kvmalloc_objs(struct page *, count, GFP_KERNEL);
if (!*res)
return 0;
}
@@ -1318,7 +1318,7 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
if (nr_segs > UIO_MAXIOV)
return ERR_PTR(-EINVAL);
if (nr_segs > fast_segs) {
iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
iov = kmalloc_objs(struct iovec, nr_segs, GFP_KERNEL);
if (!iov)
return ERR_PTR(-ENOMEM);
}

View File

@@ -765,7 +765,7 @@ static struct kobject *kobject_create(void)
{
struct kobject *kobj;
kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
kobj = kzalloc_obj(*kobj, GFP_KERNEL);
if (!kobj)
return NULL;
@@ -962,7 +962,7 @@ static struct kset *kset_create(const char *name,
struct kset *kset;
int retval;
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
kset = kzalloc_obj(*kset, GFP_KERNEL);
if (!kset)
return NULL;
retval = kobject_set_name(&kset->kobj, "%s", name);

View File

@@ -124,7 +124,7 @@ static int kobject_action_args(const char *buf, size_t count,
if (!count)
return -EINVAL;
env = kzalloc(sizeof(*env), GFP_KERNEL);
env = kzalloc_obj(*env, GFP_KERNEL);
if (!env)
return -ENOMEM;
@@ -537,7 +537,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
}
/* environment buffer */
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
env = kzalloc_obj(struct kobj_uevent_env, GFP_KERNEL);
if (!env)
return -ENOMEM;
@@ -776,7 +776,7 @@ static int uevent_net_init(struct net *net)
.flags = NL_CFG_F_NONROOT_RECV
};
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
ue_sk = kzalloc_obj(*ue_sk, GFP_KERNEL);
if (!ue_sk)
return -ENOMEM;

View File

@@ -410,7 +410,7 @@ struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suit
kunit_suite_for_each_test_case(suite, test_case) { n++; }
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
filtered = kzalloc_objs(*filtered, n + 1, GFP_KERNEL);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);

View File

@@ -111,7 +111,7 @@ static struct kunit_device *kunit_device_register_internal(struct kunit *test,
struct kunit_device *kunit_dev;
int err = -ENOMEM;
kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL);
kunit_dev = kzalloc_obj(*kunit_dev, GFP_KERNEL);
if (!kunit_dev)
return ERR_PTR(err);

View File

@@ -131,7 +131,7 @@ kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_
if (!copy)
return ERR_PTR(-ENOMEM);
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
filtered = kzalloc_objs(*filtered, n + 1, GFP_KERNEL);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);
@@ -179,7 +179,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
const size_t max = suite_set->end - suite_set->start;
copy = kcalloc(max, sizeof(*copy), GFP_KERNEL);
copy = kzalloc_objs(*copy, max, GFP_KERNEL);
if (!copy) { /* won't be able to run anything, return an empty set */
return filtered;
}
@@ -194,7 +194,8 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
parsed_filters = kzalloc_objs(*parsed_filters, filter_count,
GFP_KERNEL);
if (!parsed_filters) {
*err = -ENOMEM;
goto free_parsed_glob;

View File

@@ -272,7 +272,7 @@ static void free_suite_set_at_end(struct kunit *test, const void *to_free)
if (!((struct kunit_suite_set *)to_free)->start)
return;
free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
free = kzalloc_obj(struct kunit_suite_set, GFP_KERNEL);
*free = *(struct kunit_suite_set *)to_free;
kunit_add_action(test, free_suite_set, (void *)free);

View File

@@ -283,7 +283,7 @@ static void example_slow_test(struct kunit *test)
*/
static int example_resource_init(struct kunit_resource *res, void *context)
{
int *info = kmalloc(sizeof(*info), GFP_KERNEL);
int *info = kmalloc_obj(*info, GFP_KERNEL);
if (!info)
return -ENOMEM;

View File

@@ -538,8 +538,7 @@ static void kunit_resource_test_action_ordering(struct kunit *test)
static int kunit_resource_test_init(struct kunit *test)
{
struct kunit_test_resource_context *ctx =
kzalloc(sizeof(*ctx), GFP_KERNEL);
struct kunit_test_resource_context *ctx = kzalloc_obj(*ctx, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);

View File

@@ -98,7 +98,7 @@ int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx)
KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!");
action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL);
action_ctx = kzalloc_obj(*action_ctx, GFP_KERNEL);
if (!action_ctx)
return -ENOMEM;

View File

@@ -111,7 +111,7 @@ void __kunit_activate_static_stub(struct kunit *test,
/* We got an extra reference from find_resource(), so put it. */
kunit_put_resource(res);
} else {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
ctx = kmalloc_obj(*ctx, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
ctx->real_fn_addr = real_fn_addr;
ctx->replacement_addr = replacement_addr;

View File

@@ -18,7 +18,7 @@ static struct string_stream_fragment *alloc_string_stream_fragment(int len, gfp_
{
struct string_stream_fragment *frag;
frag = kzalloc(sizeof(*frag), gfp);
frag = kzalloc_obj(*frag, gfp);
if (!frag)
return ERR_PTR(-ENOMEM);
@@ -158,7 +158,7 @@ struct string_stream *alloc_string_stream(gfp_t gfp)
{
struct string_stream *stream;
stream = kzalloc(sizeof(*stream), gfp);
stream = kzalloc_obj(*stream, gfp);
if (!stream)
return ERR_PTR(-ENOMEM);

View File

@@ -48,7 +48,7 @@ int logic_iomem_add_region(struct resource *resource,
if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
return -EINVAL;
rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
rreg = kzalloc_obj(*rreg, GFP_KERNEL);
if (!rreg)
return -ENOMEM;

View File

@@ -94,14 +94,14 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
if (e_count > LC_MAX_ACTIVE)
return NULL;
slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
slot = kzalloc_objs(struct hlist_head, e_count, GFP_KERNEL);
if (!slot)
goto out_fail;
element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
element = kzalloc_objs(struct lc_element *, e_count, GFP_KERNEL);
if (!element)
goto out_fail;
lc = kzalloc(sizeof(*lc), GFP_KERNEL);
lc = kzalloc_obj(*lc, GFP_KERNEL);
if (!lc)
goto out_fail;

View File

@@ -110,7 +110,7 @@ static int lwq_test(void)
for (i = 0; i < ARRAY_SIZE(threads); i++)
threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
for (i = 0; i < 100; i++) {
t = kmalloc(sizeof(*t), GFP_KERNEL);
t = kmalloc_obj(*t, GFP_KERNEL);
if (!t)
break;
t->i = i;

View File

@@ -525,7 +525,7 @@ struct objagg *objagg_create(const struct objagg_ops *ops,
!ops->delta_destroy))
return ERR_PTR(-EINVAL);
objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
objagg = kzalloc_obj(*objagg, GFP_KERNEL);
if (!objagg)
return ERR_PTR(-ENOMEM);
objagg->ops = ops;
@@ -610,8 +610,8 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
struct objagg_obj *objagg_obj;
int i;
objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
objagg->obj_count), GFP_KERNEL);
objagg_stats = kzalloc_flex(*objagg_stats, stats_info,
objagg->obj_count, GFP_KERNEL);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
@@ -786,11 +786,11 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
struct objagg_obj *objagg_obj;
int i, j;
graph = kzalloc(sizeof(*graph), GFP_KERNEL);
graph = kzalloc_obj(*graph, GFP_KERNEL);
if (!graph)
return NULL;
graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL);
graph->nodes = kzalloc_objs(*graph->nodes, nodes_count, GFP_KERNEL);
if (!graph->nodes)
goto err_nodes_alloc;
graph->nodes_count = nodes_count;
@@ -930,7 +930,7 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
struct objagg_hints *objagg_hints;
int err;
objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL);
objagg_hints = kzalloc_obj(*objagg_hints, GFP_KERNEL);
if (!objagg_hints)
return ERR_PTR(-ENOMEM);
@@ -1010,9 +1010,8 @@ objagg_hints_stats_get(struct objagg_hints *objagg_hints)
struct objagg_hints_node *hnode;
int i;
objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
objagg_hints->node_count),
GFP_KERNEL);
objagg_stats = kzalloc_flex(*objagg_stats, stats_info,
objagg_hints->node_count, GFP_KERNEL);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);

View File

@@ -26,7 +26,7 @@ static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
w = kmalloc(sizeof(*w), GFP_ATOMIC);
w = kmalloc_obj(*w, GFP_ATOMIC);
if (!w)
return;

View File

@@ -268,7 +268,7 @@ struct parman *parman_create(const struct parman_ops *ops, void *priv)
{
struct parman *parman;
parman = kzalloc(sizeof(*parman), GFP_KERNEL);
parman = kzalloc_obj(*parman, GFP_KERNEL);
if (!parman)
return NULL;
INIT_LIST_HEAD(&parman->prio_list);

View File

@@ -73,7 +73,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
if (!ref->percpu_count_ptr)
return -ENOMEM;
data = kzalloc(sizeof(*ref->data), gfp);
data = kzalloc_obj(*ref->data, gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
ref->percpu_count_ptr = 0;

View File

@@ -287,7 +287,7 @@ pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8
if (err)
return err;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
desc = kzalloc_obj(*desc, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -328,7 +328,7 @@ pldm_parse_one_record(struct pldmfw_priv *data,
int i;
/* Make a copy and insert it into the record list */
record = kzalloc(sizeof(*record), GFP_KERNEL);
record = kzalloc_obj(*record, GFP_KERNEL);
if (!record)
return -ENOMEM;
@@ -465,7 +465,7 @@ static int pldm_parse_components(struct pldmfw_priv *data)
if (err)
return err;
component = kzalloc(sizeof(*component), GFP_KERNEL);
component = kzalloc_obj(*component, GFP_KERNEL);
if (!component)
return -ENOMEM;
@@ -848,7 +848,7 @@ int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw)
struct pldmfw_priv *data;
int err;
data = kzalloc(sizeof(*data), GFP_KERNEL);
data = kzalloc_obj(*data, GFP_KERNEL);
if (!data)
return -ENOMEM;

View File

@@ -399,7 +399,7 @@ static int augmented_check(void)
static int __init rbtree_test_init(void)
{
nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
nodes = kmalloc_objs(*nodes, nnodes, GFP_KERNEL);
if (!nodes)
return -ENOMEM;

View File

@@ -73,7 +73,7 @@ static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int),
int i, j, sr, root, iprim;
struct rs_codec *rs;
rs = kzalloc(sizeof(*rs), gfp);
rs = kzalloc_obj(*rs, gfp);
if (!rs)
return NULL;

View File

@@ -111,7 +111,7 @@ static struct wspace *alloc_ws(struct rs_codec *rs)
struct wspace *ws;
int nn = rs->nn;
ws = kzalloc(sizeof(*ws), GFP_KERNEL);
ws = kzalloc_obj(*ws, GFP_KERNEL);
if (!ws)
return NULL;
@@ -124,7 +124,7 @@ static struct wspace *alloc_ws(struct rs_codec *rs)
ws->s = ws->r + nn;
ws->corr = ws->s + nroots;
ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
ws->errlocs = kmalloc_objs(int, nn + nroots, GFP_KERNEL);
if (!ws->errlocs)
goto err;

View File

@@ -74,8 +74,7 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
struct ref_tracker_dir_stats *stats;
struct ref_tracker *tracker;
stats = kmalloc(struct_size(stats, stacks, limit),
GFP_NOWAIT);
stats = kmalloc_flex(*stats, stacks, limit, GFP_NOWAIT);
if (!stats)
return ERR_PTR(-ENOMEM);
stats->total = 0;
@@ -268,7 +267,7 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir,
}
if (gfp & __GFP_DIRECT_RECLAIM)
gfp_mask |= __GFP_NOFAIL;
*trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
*trackerp = tracker = kzalloc_obj(*tracker, gfp_mask);
if (unlikely(!tracker)) {
pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
refcount_inc(&dir->untracked);

View File

@@ -168,8 +168,7 @@ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
return kmalloc_array(nents, sizeof(struct scatterlist),
gfp_mask);
return kmalloc_objs(struct scatterlist, nents, gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
@@ -632,8 +631,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
return NULL;
nalloc++;
}
sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
gfp & ~GFP_DMA);
sgl = kmalloc_objs(struct scatterlist, nalloc, gfp & ~GFP_DMA);
if (!sgl)
return NULL;

View File

@@ -152,7 +152,7 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
int i, ret;
struct sg_splitter *splitters;
splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
splitters = kzalloc_objs(*splitters, nb_splits, gfp_mask);
if (!splitters)
return -ENOMEM;
@@ -163,9 +163,8 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
ret = -ENOMEM;
for (i = 0; i < nb_splits; i++) {
splitters[i].out_sg = kmalloc_array(splitters[i].nents,
sizeof(struct scatterlist),
gfp_mask);
splitters[i].out_sg = kmalloc_objs(struct scatterlist,
splitters[i].nents, gfp_mask);
if (!splitters[i].out_sg)
goto err;
}

View File

@@ -260,7 +260,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
stack_table = kvzalloc_objs(struct list_head, entries, GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;

View File

@@ -147,7 +147,7 @@ int parse_int_array(const char *buf, size_t count, int **array)
if (!nints)
return -ENOENT;
ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
ints = kzalloc_objs(*ints, nints + 1, GFP_KERNEL);
if (!ints)
return -ENOMEM;

View File

@@ -94,7 +94,7 @@ static int bpf_fill_maxinsns1(struct bpf_test *self)
__u32 k = ~0;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -113,7 +113,7 @@ static int bpf_fill_maxinsns2(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -133,7 +133,7 @@ static int bpf_fill_maxinsns3(struct bpf_test *self)
struct rnd_state rnd;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -159,7 +159,7 @@ static int bpf_fill_maxinsns4(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -178,7 +178,7 @@ static int bpf_fill_maxinsns5(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -201,7 +201,7 @@ static int bpf_fill_maxinsns6(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -223,7 +223,7 @@ static int bpf_fill_maxinsns7(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -249,7 +249,7 @@ static int bpf_fill_maxinsns8(struct bpf_test *self)
struct sock_filter *insn;
int i, jmp_off = len - 3;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -272,7 +272,7 @@ static int bpf_fill_maxinsns9(struct bpf_test *self)
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -298,7 +298,7 @@ static int bpf_fill_maxinsns10(struct bpf_test *self)
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -324,7 +324,7 @@ static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
unsigned int rlen;
int i, j;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -358,7 +358,7 @@ static int bpf_fill_maxinsns12(struct bpf_test *self)
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -381,7 +381,7 @@ static int bpf_fill_maxinsns13(struct bpf_test *self)
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -410,7 +410,7 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -434,7 +434,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size)
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -484,7 +484,7 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu3
int len = S16_MAX + 5;
int i;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
insns = kmalloc_objs(*insns, len, GFP_KERNEL);
if (!insns)
return -ENOMEM;
@@ -626,7 +626,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
int imm, k;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -759,7 +759,7 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
int i = 0;
u64 val;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -1244,7 +1244,7 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
u32 imm;
int rd;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
insns = kmalloc_objs(*insns, len, GFP_KERNEL);
if (!insns)
return -ENOMEM;
@@ -1426,7 +1426,7 @@ static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
int rd, rs;
int i = 0;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
insns = kmalloc_objs(*insns, len, GFP_KERNEL);
if (!insns)
return -ENOMEM;
@@ -1917,7 +1917,7 @@ static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
u64 mem, upd, res;
int rd, rs, i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -2163,7 +2163,7 @@ static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
int bit, adj, sign;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -2217,7 +2217,7 @@ static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
u32 rand = 1;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
insn = kmalloc_objs(*insn, len, GFP_KERNEL);
if (!insn)
return -ENOMEM;
@@ -2724,7 +2724,7 @@ static int __bpf_fill_staggered_jumps(struct bpf_test *self,
struct bpf_insn *insns;
int off, ind;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
insns = kmalloc_objs(*insns, len, GFP_KERNEL);
if (!insns)
return -ENOMEM;
@@ -15461,7 +15461,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
int which, err;
/* Allocate the table of programs to be used for tail calls */
progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
progs = kzalloc_flex(*progs, ptrs, ntests + 1, GFP_KERNEL);
if (!progs)
goto out_nomem;

View File

@@ -29,7 +29,7 @@ static int __init test_debug_virtual_init(void)
pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va);
foo = kzalloc(sizeof(*foo), GFP_KERNEL);
foo = kzalloc_obj(*foo, GFP_KERNEL);
if (!foo)
return -ENOMEM;

View File

@@ -1309,7 +1309,7 @@ static ssize_t upload_register_store(struct device *dev,
goto free_name;
}
tst = kzalloc(sizeof(*tst), GFP_KERNEL);
tst = kzalloc_obj(*tst, GFP_KERNEL);
if (!tst) {
ret = -ENOMEM;
goto free_name;
@@ -1526,7 +1526,7 @@ static int __init test_firmware_init(void)
{
int rc;
test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
test_fw_config = kzalloc_obj(struct test_config, GFP_KERNEL);
if (!test_fw_config)
return -ENOMEM;

View File

@@ -166,7 +166,7 @@ static int dmirror_fops_open(struct inode *inode, struct file *filp)
int ret;
/* Mirror this process address space */
dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
dmirror = kzalloc_obj(*dmirror, GFP_KERNEL);
if (dmirror == NULL)
return -ENOMEM;
@@ -504,7 +504,7 @@ static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
void *ptr;
int ret = -ENOMEM;
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
devmem = kzalloc_obj(*devmem, GFP_KERNEL);
if (!devmem)
return ret;

View File

@@ -211,7 +211,7 @@ static int kho_test_save(void)
max_mem = PAGE_ALIGN(max_mem);
max_nr = max_mem >> PAGE_SHIFT;
folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL);
folios = kvmalloc_objs(*state->folios, max_nr, GFP_KERNEL);
if (!folios)
return -ENOMEM;
state->folios = folios;

View File

@@ -24,20 +24,20 @@ static int __init test_memcat_p_init(void)
struct test_struct **in0, **in1, **out, **p;
int err = -ENOMEM, i, r, total = 0;
in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
in0 = kzalloc_objs(*in0, INPUT_MAX, GFP_KERNEL);
if (!in0)
return err;
in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
in1 = kzalloc_objs(*in1, INPUT_MAX, GFP_KERNEL);
if (!in1)
goto err_free_in0;
for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
in0[i] = kmalloc_obj(**in0, GFP_KERNEL);
if (!in0[i])
goto err_free_elements;
in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
in1[i] = kmalloc_obj(**in1, GFP_KERNEL);
if (!in1[i]) {
kfree(in0[i]);
goto err_free_elements;

View File

@@ -107,7 +107,7 @@ static void *delta_create(void *priv, void *parent_obj, void *obj)
if (!delta_check(priv, parent_obj, obj))
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
delta = kzalloc_obj(*delta, GFP_KERNEL);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->key_id_diff = diff;
@@ -130,7 +130,7 @@ static void *root_create(void *priv, void *obj, unsigned int id)
struct tokey *key = obj;
struct root *root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
root = kzalloc_obj(*root, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
memcpy(&root->key, key, sizeof(root->key));

View File

@@ -219,7 +219,7 @@ static struct test_parman *test_parman_create(const struct parman_ops *ops)
struct test_parman *test_parman;
int err;
test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
test_parman = kzalloc_obj(*test_parman, GFP_KERNEL);
if (!test_parman)
return ERR_PTR(-ENOMEM);
err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);

View File

@@ -524,7 +524,7 @@ static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
const char *key;
int err = 0;
rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
rhlt = kmalloc_obj(*rhlt, GFP_KERNEL);
if (WARN_ON(!rhlt))
return -EINVAL;

View File

@@ -396,7 +396,7 @@ vm_map_ram_test(void)
int i;
map_nr_pages = nr_pages > 0 ? nr_pages:1;
pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL);
pages = kzalloc_objs(struct page *, map_nr_pages, GFP_KERNEL);
if (!pages)
return -1;
@@ -542,7 +542,7 @@ init_test_configuration(void)
nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
/* Allocate the space for test instances. */
tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
tdriver = kvzalloc_objs(*tdriver, nr_threads, GFP_KERNEL);
if (tdriver == NULL)
return -1;

View File

@@ -387,7 +387,7 @@ static void __init iov_kunit_load_folioq(struct kunit *test,
for (i = 0; i < npages; i++) {
if (folioq_full(p)) {
p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
p->next = kzalloc_obj(struct folio_queue, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
folioq_init(p->next, 0);
p->next->prev = p;
@@ -403,7 +403,7 @@ static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
{
struct folio_queue *folioq;
folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
folioq = kzalloc_obj(struct folio_queue, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
folioq_init(folioq, 0);
@@ -565,7 +565,7 @@ static struct xarray *iov_kunit_create_xarray(struct kunit *test)
{
struct xarray *xarray;
xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
xarray = kzalloc_obj(struct xarray, GFP_KERNEL);
xa_init(xarray);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);

View File

@@ -26,10 +26,10 @@ static void list_test_list_init(struct kunit *test)
INIT_LIST_HEAD(&list2);
list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
list4 = kzalloc_obj(*list4, GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(list4);
list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
list5 = kmalloc_obj(*list5, GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_LIST_HEAD(list5);
@@ -829,10 +829,10 @@ static void hlist_test_init(struct kunit *test)
INIT_HLIST_HEAD(&list2);
list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
list4 = kzalloc_obj(*list4, GFP_KERNEL | __GFP_NOFAIL);
INIT_HLIST_HEAD(list4);
list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
list5 = kmalloc_obj(*list5, GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_HLIST_HEAD(list5);

View File

@@ -104,7 +104,8 @@ static void test_ratelimit_stress(struct kunit *test)
int i;
const int n_stress_kthread = cpumask_weight(cpu_online_mask);
struct stress_kthread skt = { 0 };
struct stress_kthread *sktp = kcalloc(n_stress_kthread, sizeof(*sktp), GFP_KERNEL);
struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread,
GFP_KERNEL);
KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
for (i = 0; i < n_stress_kthread; i++) {

View File

@@ -591,7 +591,7 @@ enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct xz_dec_bcj *s = kmalloc_obj(*s, GFP_KERNEL);
if (s != NULL)
s->single_call = single_call;

View File

@@ -1138,7 +1138,7 @@ enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b)
struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct xz_dec_lzma2 *s = kmalloc_obj(*s, GFP_KERNEL);
if (s == NULL)
return NULL;
@@ -1296,7 +1296,7 @@ struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
if (dict_size < 4096 || dict_size > (3U << 30))
return NULL;
s = kmalloc(sizeof(*s), GFP_KERNEL);
s = kmalloc_obj(*s, GFP_KERNEL);
if (s == NULL)
return NULL;

View File

@@ -784,7 +784,7 @@ enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct xz_dec *s = kmalloc_obj(*s, GFP_KERNEL);
if (s == NULL)
return NULL;

View File

@@ -14,7 +14,7 @@ int zlib_inflate_blob(void *gunzip_buf, unsigned int sz,
int rc;
rc = -ENOMEM;
strm = kmalloc(sizeof(*strm), GFP_KERNEL);
strm = kmalloc_obj(*strm, GFP_KERNEL);
if (strm == NULL)
goto gunzip_nomem1;
strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);