sched/fair: Update blocked load from NEWIDLE

Since we already iterate CPUs looking for work on NEWIDLE, use this
iteration to age the blocked load. If the domain for which this is
done completely spand the idle set, we can push the ILB based aging
forward.

Suggested-by: Brendan Jackman <brendan.jackman@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-12-21 11:20:23 +01:00 committed by Ingo Molnar
parent a4064fb614
commit e022e0d38a
3 changed files with 45 additions and 6 deletions

View File

@ -6074,6 +6074,7 @@ void __init sched_init(void)
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_load_update_tick = jiffies;
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
#endif
#endif /* CONFIG_SMP */

View File

@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
}
return load;
}
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;
#endif /* CONFIG_NO_HZ_COMMON */
/**
@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
#define LBF_NOHZ_STATS 0x10
struct lb_env {
struct sched_domain *sd;
@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
}
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}
@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf);
}
@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
return group_other;
}
static void update_nohz_stats(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_COMMON
unsigned int cpu = rq->cpu;
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return;
if (!time_after(jiffies, rq->last_blocked_load_update_tick))
return;
update_blocked_averages(cpu);
#endif
}
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
if (env->flags & LBF_NOHZ_STATS)
update_nohz_stats(rq);
/* Bias balancing toward CPUs of our domain: */
if (local_group)
load = target_load(i, load_idx);
@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1;
#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE) {
env->flags |= LBF_NOHZ_STATS;
if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
}
#endif
load_idx = get_sd_load_idx(env->sd, env->idle);
do {
@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;
static inline int find_new_ilb(void)
{

View File

@ -762,6 +762,7 @@ struct rq {
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
unsigned long last_blocked_load_update_tick;
#endif /* CONFIG_SMP */
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */