Convert more 'alloc_obj' cases to default GFP_KERNEL arguments

This converts some of the visually simpler cases that have been split
over multiple lines.  I only did the ones that are easy to verify the
resulting diff by having just that final GFP_KERNEL argument on the next
line.

Somebody should probably do a proper coccinelle script for this, but for
me the trivial script actually resulted in an assertion failure in the
middle of the script.  I probably had made it a bit _too_ trivial.

So after fighting that far a while I decided to just do some of the
syntactically simpler cases with variations of the previous 'sed'
scripts.

The more syntactically complex multi-line cases would mostly really want
whitespace cleanup anyway.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds
2026-02-21 20:03:00 -08:00
parent 323bbfcf1e
commit 32a92f8c89
826 changed files with 1211 additions and 2422 deletions

View File

@@ -26,8 +26,7 @@ struct damon_sysfs_scheme_region {
static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc(
struct damon_region *region)
{
struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region,
GFP_KERNEL);
struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region);
if (!sysfs_region)
return NULL;
@@ -138,8 +137,7 @@ struct damon_sysfs_scheme_regions {
static struct damon_sysfs_scheme_regions *
damon_sysfs_scheme_regions_alloc(void)
{
struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions,
GFP_KERNEL);
struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions);
if (!regions)
return NULL;
@@ -851,8 +849,7 @@ static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
enum damos_wmark_metric metric, unsigned long interval_us,
unsigned long high, unsigned long mid, unsigned long low)
{
struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks,
GFP_KERNEL);
struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks);
if (!watermarks)
return NULL;
@@ -1659,8 +1656,7 @@ struct damon_sysfs_access_pattern {
static
struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
{
struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern,
GFP_KERNEL);
struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern);
if (!access_pattern)
return NULL;
@@ -2681,12 +2677,10 @@ static int damos_sysfs_add_migrate_dest(struct damos *scheme,
struct damos_migrate_dests *dests = &scheme->migrate_dests;
int i;
dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr,
GFP_KERNEL);
dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr);
if (!dests->node_id_arr)
return -ENOMEM;
dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr,
GFP_KERNEL);
dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr);
if (!dests->weight_arr)
/* ->node_id_arr will be freed by scheme destruction */
return -ENOMEM;

View File

@@ -609,8 +609,7 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
unsigned long sample_us, unsigned long aggr_us,
unsigned long update_us)
{
struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals,
GFP_KERNEL);
struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals);
if (!intervals)
return NULL;

View File

@@ -725,12 +725,10 @@ static int damos_test_help_dests_setup(struct damos_migrate_dests *dests,
{
size_t i;
dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests,
GFP_KERNEL);
dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests);
if (!dests->node_id_arr)
return -ENOMEM;
dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests,
GFP_KERNEL);
dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests);
if (!dests->weight_arr) {
kfree(dests->node_id_arr);
dests->node_id_arr = NULL;

View File

@@ -821,8 +821,7 @@ static unsigned long damos_va_migrate(struct damon_target *target,
use_target_nid = dests->nr_dests == 0;
nr_dests = use_target_nid ? 1 : dests->nr_dests;
priv.scheme = s;
priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests,
GFP_KERNEL);
priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests);
if (!priv.migration_lists)
return 0;

View File

@@ -3586,8 +3586,7 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj,
* Allocate stable and unstable together:
* MAXSMP NODES_SHIFT 10 will use 16kB.
*/
buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids,
GFP_KERNEL);
buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids);
/* Let us assume that RB_ROOT is NULL is zero */
if (!buf)
err = -ENOMEM;

View File

@@ -912,8 +912,7 @@ static int __init memory_tier_init(void)
panic("%s() failed to register memory tier subsystem\n", __func__);
#ifdef CONFIG_MIGRATION
node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids,
GFP_KERNEL);
node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids);
WARN_ON(!node_demotion);
#endif

View File

@@ -229,8 +229,7 @@ int mempolicy_set_node_perf(unsigned int node, struct access_coordinate *coords)
if (!new_bw)
return -ENOMEM;
new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids,
GFP_KERNEL);
new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state) {
kfree(new_bw);
return -ENOMEM;
@@ -3642,8 +3641,7 @@ static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
kstrtou8(buf, 0, &weight) || weight == 0)
return -EINVAL;
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
GFP_KERNEL);
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
@@ -3695,8 +3693,7 @@ static ssize_t weighted_interleave_auto_store(struct kobject *kobj,
if (kstrtobool(buf, &input))
return -EINVAL;
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids,
GFP_KERNEL);
new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids);
if (!new_wi_state)
return -ENOMEM;
for (i = 0; i < nr_node_ids; i++)

View File

@@ -618,8 +618,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
* know that mm->notifier_subscriptions can't change while we
* hold the write side of the mmap_lock.
*/
subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions,
GFP_KERNEL);
subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions);
if (!subscriptions)
return -ENOMEM;

View File

@@ -3265,8 +3265,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
spin_lock_init(&cluster_info[i].lock);
if (!(si->flags & SWP_SOLIDSTATE)) {
si->global_cluster = kmalloc_obj(*si->global_cluster,
GFP_KERNEL);
si->global_cluster = kmalloc_obj(*si->global_cluster);
if (!si->global_cluster)
goto err;
for (i = 0; i < SWAP_NR_ORDERS; i++)