sched/fair: Make update_min_vruntime() more readable

The update_min_vruntime() control flow can be simplified.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: minchan.kim@lge.com
Link: http://lkml.kernel.org/r/1436088829-25768-1-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Byungchul Park 2015-07-05 18:33:48 +09:00 committed by Ingo Molnar
parent 62cc20bcf2
commit 97a7142f15
1 changed files with 7 additions and 10 deletions

View File

@ -464,20 +464,17 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
{ {
u64 vruntime = cfs_rq->min_vruntime; u64 vruntime = cfs_rq->min_vruntime;
if (cfs_rq->curr)
vruntime = cfs_rq->curr->vruntime;
if (cfs_rq->rb_leftmost) { if (cfs_rq->rb_leftmost) {
struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
struct sched_entity, struct sched_entity,
run_node); run_node);
if (!cfs_rq->curr) vruntime = se->vruntime;
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
} }
if (cfs_rq->curr)
vruntime = min_vruntime(vruntime, cfs_rq->curr->vruntime);
/* ensure we never gain time by being placed backwards. */ /* ensure we never gain time by being placed backwards. */
cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
@ -5988,7 +5985,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
* *
* The adjacency matrix of the resulting graph is given by: * The adjacency matrix of the resulting graph is given by:
* *
* log_2 n * log_2 n
* A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
* k = 0 * k = 0
* *
@ -6034,7 +6031,7 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
* *
* [XXX write more on how we solve this.. _after_ merging pjt's patches that * [XXX write more on how we solve this.. _after_ merging pjt's patches that
* rewrite all of this once again.] * rewrite all of this once again.]
*/ */
static unsigned long __read_mostly max_load_balance_interval = HZ/10; static unsigned long __read_mostly max_load_balance_interval = HZ/10;
@ -6696,7 +6693,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
/* /*
* !SD_OVERLAP domains can assume that child groups * !SD_OVERLAP domains can assume that child groups
* span the current group. * span the current group.
*/ */
group = child->groups; group = child->groups;
do { do {