From 7b20b916e953cabef569541f991a0a583bc344cb Mon Sep 17 00:00:00 2001 From: Yuyang Du Date: Tue, 3 May 2016 05:54:27 +0800 Subject: [PATCH] sched/fair: Optimize sum computation with a lookup table __compute_runnable_contrib() uses a loop to compute sum, whereas a table lookup can do it faster in a constant amount of time. The program to generate the constants is located at: Documentation/scheduler/sched-avg.txt Signed-off-by: Yuyang Du Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Morten Rasmussen Acked-by: Vincent Guittot Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: juri.lelli@arm.com Cc: pjt@google.com Link: http://lkml.kernel.org/r/1462226078-31904-2-git-send-email-yuyang.du@intel.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e1485710d1ec..8c381a6d56e3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2602,6 +2602,16 @@ static const u32 runnable_avg_yN_sum[] = { 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371, }; +/* + * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to + * lower integers. See Documentation/scheduler/sched-avg.txt how these + * were generated: + */ +static const u32 __accumulated_sum_N32[] = { + 0, 23371, 35056, 40899, 43820, 45281, + 46011, 46376, 46559, 46650, 46696, 46719, +}; + /* * Approximate: * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) @@ -2650,14 +2660,9 @@ static u32 __compute_runnable_contrib(u64 n) else if (unlikely(n >= LOAD_AVG_MAX_N)) return LOAD_AVG_MAX; - /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */ - do { - contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */ - contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD]; - - n -= LOAD_AVG_PERIOD; - } while (n > LOAD_AVG_PERIOD); - + /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */ + contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD]; + n %= LOAD_AVG_PERIOD; contrib = decay_load(contrib, n); return contrib + runnable_avg_yN_sum[n]; }