mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 15:36:55 +08:00
sched/fair: Switch to rcu_dereference_all()
With the {rcu,sched,bh} RCU flavours being unified, it doesn't really
make sense to check for just the rcu one. Switch to the _all family of
verification which includes all 3 of the listed flavours.
Notably, this will enable us to remove some superfluous
rcu_read_lock() regions when we know they are inside preempt/IRQ
disabled regions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
f24165bfa7
commit
71fedc41c2
@@ -1513,7 +1513,7 @@ static unsigned int task_scan_start(struct task_struct *p)
|
||||
|
||||
/* Scale the maximum scan period with the amount of shared memory. */
|
||||
rcu_read_lock();
|
||||
ng = rcu_dereference(p->numa_group);
|
||||
ng = rcu_dereference_all(p->numa_group);
|
||||
if (ng) {
|
||||
unsigned long shared = group_faults_shared(ng);
|
||||
unsigned long private = group_faults_priv(ng);
|
||||
@@ -1580,7 +1580,7 @@ pid_t task_numa_group_id(struct task_struct *p)
|
||||
pid_t gid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
ng = rcu_dereference(p->numa_group);
|
||||
ng = rcu_dereference_all(p->numa_group);
|
||||
if (ng)
|
||||
gid = ng->gid;
|
||||
rcu_read_unlock();
|
||||
@@ -2239,7 +2239,7 @@ static bool task_numa_compare(struct task_numa_env *env,
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
cur = rcu_dereference(dst_rq->curr);
|
||||
cur = rcu_dereference_all(dst_rq->curr);
|
||||
if (cur && ((cur->flags & (PF_EXITING | PF_KTHREAD)) ||
|
||||
!cur->mm))
|
||||
cur = NULL;
|
||||
@@ -2284,7 +2284,7 @@ static bool task_numa_compare(struct task_numa_env *env,
|
||||
* If dst and source tasks are in the same NUMA group, or not
|
||||
* in any group then look only at task weights.
|
||||
*/
|
||||
cur_ng = rcu_dereference(cur->numa_group);
|
||||
cur_ng = rcu_dereference_all(cur->numa_group);
|
||||
if (cur_ng == p_ng) {
|
||||
/*
|
||||
* Do not swap within a group or between tasks that have
|
||||
@@ -2499,7 +2499,7 @@ static int task_numa_migrate(struct task_struct *p)
|
||||
* to satisfy here.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
|
||||
sd = rcu_dereference_all(per_cpu(sd_numa, env.src_cpu));
|
||||
if (sd) {
|
||||
env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
|
||||
env.imb_numa_nr = sd->imb_numa_nr;
|
||||
@@ -3022,7 +3022,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
|
||||
if (!cpupid_match_pid(tsk, cpupid))
|
||||
goto no_join;
|
||||
|
||||
grp = rcu_dereference(tsk->numa_group);
|
||||
grp = rcu_dereference_all(tsk->numa_group);
|
||||
if (!grp)
|
||||
goto no_join;
|
||||
|
||||
@@ -4435,7 +4435,7 @@ static inline void migrate_se_pelt_lag(struct sched_entity *se)
|
||||
rq = rq_of(cfs_rq);
|
||||
|
||||
rcu_read_lock();
|
||||
is_idle = is_idle_task(rcu_dereference(rq->curr));
|
||||
is_idle = is_idle_task(rcu_dereference_all(rq->curr));
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
@@ -7462,7 +7462,7 @@ static inline void set_idle_cores(int cpu, int val)
|
||||
{
|
||||
struct sched_domain_shared *sds;
|
||||
|
||||
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
|
||||
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
|
||||
if (sds)
|
||||
WRITE_ONCE(sds->has_idle_cores, val);
|
||||
}
|
||||
@@ -7471,7 +7471,7 @@ static inline bool test_idle_cores(int cpu)
|
||||
{
|
||||
struct sched_domain_shared *sds;
|
||||
|
||||
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
|
||||
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
|
||||
if (sds)
|
||||
return READ_ONCE(sds->has_idle_cores);
|
||||
|
||||
@@ -7600,7 +7600,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
|
||||
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
||||
|
||||
if (sched_feat(SIS_UTIL)) {
|
||||
sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
|
||||
sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, target));
|
||||
if (sd_share) {
|
||||
/* because !--nr is the condition to stop scan */
|
||||
nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
|
||||
@@ -7806,7 +7806,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
||||
* sd_asym_cpucapacity rather than sd_llc.
|
||||
*/
|
||||
if (sched_asym_cpucap_active()) {
|
||||
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
|
||||
sd = rcu_dereference_all(per_cpu(sd_asym_cpucapacity, target));
|
||||
/*
|
||||
* On an asymmetric CPU capacity system where an exclusive
|
||||
* cpuset defines a symmetric island (i.e. one unique
|
||||
@@ -7821,7 +7821,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
||||
}
|
||||
}
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_llc, target));
|
||||
sd = rcu_dereference_all(per_cpu(sd_llc, target));
|
||||
if (!sd)
|
||||
return target;
|
||||
|
||||
@@ -8290,7 +8290,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
||||
struct energy_env eenv;
|
||||
|
||||
rcu_read_lock();
|
||||
pd = rcu_dereference(rd->pd);
|
||||
pd = rcu_dereference_all(rd->pd);
|
||||
if (!pd)
|
||||
goto unlock;
|
||||
|
||||
@@ -8298,7 +8298,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
||||
* Energy-aware wake-up happens on the lowest sched_domain starting
|
||||
* from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
|
||||
*/
|
||||
sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
|
||||
sd = rcu_dereference_all(*this_cpu_ptr(&sd_asym_cpucapacity));
|
||||
while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
|
||||
sd = sd->parent;
|
||||
if (!sd)
|
||||
@@ -9305,7 +9305,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
|
||||
*/
|
||||
static long migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
||||
{
|
||||
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
||||
struct numa_group *numa_group = rcu_dereference_all(p->numa_group);
|
||||
unsigned long src_weight, dst_weight;
|
||||
int src_nid, dst_nid, dist;
|
||||
|
||||
@@ -10985,7 +10985,7 @@ static void update_idle_cpu_scan(struct lb_env *env,
|
||||
if (env->sd->span_weight != llc_weight)
|
||||
return;
|
||||
|
||||
sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
|
||||
sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, env->dst_cpu));
|
||||
if (!sd_share)
|
||||
return;
|
||||
|
||||
@@ -11335,7 +11335,7 @@ static struct sched_group *sched_balance_find_src_group(struct lb_env *env)
|
||||
goto force_balance;
|
||||
|
||||
if (!is_rd_overutilized(env->dst_rq->rd) &&
|
||||
rcu_dereference(env->dst_rq->rd->pd))
|
||||
rcu_dereference_all(env->dst_rq->rd->pd))
|
||||
goto out_balanced;
|
||||
|
||||
/* ASYM feature bypasses nice load balance check */
|
||||
@@ -12424,7 +12424,7 @@ static void nohz_balancer_kick(struct rq *rq)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
sd = rcu_dereference(rq->sd);
|
||||
sd = rcu_dereference_all(rq->sd);
|
||||
if (sd) {
|
||||
/*
|
||||
* If there's a runnable CFS task and the current CPU has reduced
|
||||
@@ -12436,7 +12436,7 @@ static void nohz_balancer_kick(struct rq *rq)
|
||||
}
|
||||
}
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
|
||||
sd = rcu_dereference_all(per_cpu(sd_asym_packing, cpu));
|
||||
if (sd) {
|
||||
/*
|
||||
* When ASYM_PACKING; see if there's a more preferred CPU
|
||||
@@ -12454,7 +12454,7 @@ static void nohz_balancer_kick(struct rq *rq)
|
||||
}
|
||||
}
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
|
||||
sd = rcu_dereference_all(per_cpu(sd_asym_cpucapacity, cpu));
|
||||
if (sd) {
|
||||
/*
|
||||
* When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
|
||||
@@ -12475,7 +12475,7 @@ static void nohz_balancer_kick(struct rq *rq)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
|
||||
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
|
||||
if (sds) {
|
||||
/*
|
||||
* If there is an imbalance between LLC domains (IOW we could
|
||||
@@ -12507,7 +12507,7 @@ static void set_cpu_sd_state_busy(int cpu)
|
||||
struct sched_domain *sd;
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
|
||||
|
||||
if (!sd || !sd->nohz_idle)
|
||||
goto unlock;
|
||||
@@ -12537,7 +12537,7 @@ static void set_cpu_sd_state_idle(int cpu)
|
||||
struct sched_domain *sd;
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
|
||||
|
||||
if (!sd || sd->nohz_idle)
|
||||
goto unlock;
|
||||
@@ -13915,7 +13915,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
|
||||
struct numa_group *ng;
|
||||
|
||||
rcu_read_lock();
|
||||
ng = rcu_dereference(p->numa_group);
|
||||
ng = rcu_dereference_all(p->numa_group);
|
||||
for_each_online_node(node) {
|
||||
if (p->numa_faults) {
|
||||
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
|
||||
|
||||
@@ -2011,7 +2011,7 @@ queue_balance_callback(struct rq *rq,
|
||||
}
|
||||
|
||||
#define rcu_dereference_sched_domain(p) \
|
||||
rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex))
|
||||
rcu_dereference_all_check((p), lockdep_is_held(&sched_domains_mutex))
|
||||
|
||||
/*
|
||||
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
||||
|
||||
Reference in New Issue
Block a user