sched/fair: Make group power more consistent

For easier access, less dereferences and more consistent value, store
the group power in update_sg_lb_stats() and use it thereafter. The
actual value in sched_group::sched_group_power::power can change
throughout the load-balance pass if we're unlucky.

Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-739xxqkyvftrhnh9ncudutc7@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2013-08-15 20:37:48 +02:00 committed by Ingo Molnar
parent 38d0f77085
commit 3ae11c90fd
1 changed files with 22 additions and 19 deletions

View File

@ -4239,6 +4239,7 @@ struct sg_lb_stats {
unsigned long group_load; /* Total load over the CPUs of the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long load_per_task;
unsigned long group_power;
unsigned int sum_nr_running; /* Nr tasks running in the group */
unsigned int group_capacity;
unsigned int idle_cpus;
@ -4518,7 +4519,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
update_group_power(env->sd, env->dst_cpu);
/* Adjust by relative CPU power of the group */
sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
sgs->group_power = group->sgp->power;
sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
/*
* Consider the group unbalanced when the imbalance is larger
@ -4537,7 +4539,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_imb = 1;
sgs->group_capacity =
DIV_ROUND_CLOSEST(group->sgp->power, SCHED_POWER_SCALE);
DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(env->sd, group);
@ -4637,7 +4639,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
/* Now, start updating sd_lb_stats */
sds->total_load += sgs->group_load;
sds->total_pwr += sg->sgp->power;
sds->total_pwr += sgs->group_power;
if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
@ -4685,8 +4687,9 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (env->dst_cpu > busiest_cpu)
return 0;
env->imbalance = DIV_ROUND_CLOSEST(sds->busiest_stat.avg_load *
sds->busiest->sgp->power, SCHED_POWER_SCALE);
env->imbalance = DIV_ROUND_CLOSEST(
sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
SCHED_POWER_SCALE);
return 1;
}
@ -4716,7 +4719,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
scaled_busy_load_per_task =
(busiest->load_per_task * SCHED_POWER_SCALE) /
sds->busiest->sgp->power;
busiest->group_power;
if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
@ -4730,32 +4733,32 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
* moving them.
*/
pwr_now += sds->busiest->sgp->power *
pwr_now += busiest->group_power *
min(busiest->load_per_task, busiest->avg_load);
pwr_now += sds->local->sgp->power *
pwr_now += local->group_power *
min(local->load_per_task, local->avg_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
sds->busiest->sgp->power;
busiest->group_power;
if (busiest->avg_load > tmp) {
pwr_move += sds->busiest->sgp->power *
pwr_move += busiest->group_power *
min(busiest->load_per_task,
busiest->avg_load - tmp);
}
/* Amount of load we'd add */
if (busiest->avg_load * sds->busiest->sgp->power <
if (busiest->avg_load * busiest->group_power <
busiest->load_per_task * SCHED_POWER_SCALE) {
tmp = (busiest->avg_load * sds->busiest->sgp->power) /
sds->local->sgp->power;
tmp = (busiest->avg_load * busiest->group_power) /
local->group_power;
} else {
tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
sds->local->sgp->power;
local->group_power;
}
pwr_move += sds->local->sgp->power *
min(local->load_per_task, local->avg_load + tmp);
pwr_move += local->group_power *
min(local->load_per_task, local->avg_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
/* Move if we gain throughput */
@ -4800,7 +4803,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
(busiest->sum_nr_running - busiest->group_capacity);
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
load_above_capacity /= sds->busiest->sgp->power;
load_above_capacity /= busiest->group_power;
}
/*
@ -4818,8 +4821,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
/* How much load to actually move to equalise the imbalance */
env->imbalance = min(
max_pull * sds->busiest->sgp->power,
(sds->avg_load - local->avg_load) * sds->local->sgp->power
max_pull * busiest->group_power,
(sds->avg_load - local->avg_load) * local->group_power
) / SCHED_POWER_SCALE;
/*