From 2539fc82aa9b07d968cf9ba1ffeec3e0416ac721 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 24 May 2018 15:10:23 +0100 Subject: [PATCH] sched/fair: Update util_est before updating schedutil When a task is enqueued the estimated utilization of a CPU is updated to better support the selection of the required frequency. However, schedutil is (implicitly) updated by update_load_avg() which always happens before util_est_{en,de}queue(), thus potentially introducing a latency between estimated utilization updates and frequency selections. Let's update util_est at the beginning of enqueue_task_fair(), which will ensure that all schedutil updates will see the most updated estimated utilization value for a CPU. Reported-by: Vincent Guittot Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Acked-by: Viresh Kumar Acked-by: Vincent Guittot Cc: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Steve Muckle Fixes: 7f65ea42eb00 ("sched/fair: Add util_est on top of PELT") Link: http://lkml.kernel.org/r/20180524141023.13765-3-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 748cb054fefd..e497c05aab7f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5385,6 +5385,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; + /* + * The code below (indirectly) updates schedutil which looks at + * the cfs_rq utilization to select a frequency. + * Let's add the task's estimated utilization to the cfs_rq's + * estimated utilization, before we update schedutil. + */ + util_est_enqueue(&rq->cfs, p); + /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag @@ -5426,7 +5434,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) add_nr_running(rq, 1); - util_est_enqueue(&rq->cfs, p); hrtick_update(rq); }