sched/fair: Optimize per entity utilization tracking

Currently the load_{sum,avg} and util_{sum,avg} tracking is asymmetric
in that load tracking gets a 2^10 unit from the weight, but util gets
no such factor.

This results in more lost bits for util scaling and asymmetric scaling
rules.

Fix this by removing shifts, such that we gain the 2^10 factor from
scaling. There is no risk of overflowing the u32 as the max value is
now LOAD_AVG_MAX << 10, which is still well below UINT_MAX.

This further entangles the assumption that both LOAD and CAPACITY
shifts are the same (and 10) so put in an assertion for that.

This fixes the math for the LOAD_RESOLUTION != 0 case.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2015-09-09 09:06:17 +02:00 committed by Ingo Molnar
parent 6f2b04524f
commit 006cdf025a
1 changed files with 10 additions and 7 deletions

View File

@ -682,7 +682,7 @@ void init_entity_runnable_average(struct sched_entity *se)
sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
sa->util_sum = LOAD_AVG_MAX;
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
@ -2515,6 +2515,10 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
#error "load tracking assumes 2^10 as unit"
#endif
#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
/*
@ -2599,7 +2603,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
}
}
if (running)
sa->util_sum += cap_scale(scaled_delta_w, scale_cpu);
sa->util_sum += scaled_delta_w * scale_cpu;
delta -= delta_w;
@ -2623,7 +2627,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * contrib;
}
if (running)
sa->util_sum += cap_scale(contrib, scale_cpu);
sa->util_sum += contrib * scale_cpu;
}
/* Remainder of delta accrued against u_0` */
@ -2634,7 +2638,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
if (running)
sa->util_sum += cap_scale(scaled_delta, scale_cpu);
sa->util_sum += scaled_delta * scale_cpu;
sa->period_contrib += delta;
@ -2644,7 +2648,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
cfs_rq->runnable_load_avg =
div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
}
sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX;
sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
}
return decayed;
@ -2686,8 +2690,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
sa->util_avg = max_t(long, sa->util_avg - r, 0);
sa->util_sum = max_t(s32, sa->util_sum -
((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
}
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,