sched/fair: Factor out the {at,de}taching of the per entity load {to,from} the runqueue

Currently we open-code the addition/subtraction of the per entity load
to/from the runqueue, factor this out into helper functions.

Signed-off-by: Byungchul Park <byungchul.park@lge.com>
[ Rewrote the changelog. ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1440069720-27038-2-git-send-email-byungchul.park@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Byungchul Park 2015-08-20 20:21:56 +09:00 committed by Ingo Molnar
parent 973759c80d
commit a05e8c51ff
1 changed files with 38 additions and 39 deletions

View File

@ -2664,8 +2664,8 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
int decayed;
struct sched_avg *sa = &cfs_rq->avg;
int decayed;
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
@ -2695,33 +2695,52 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
static inline void update_load_avg(struct sched_entity *se, int update_tg)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
int cpu = cpu_of(rq_of(cfs_rq));
u64 now = cfs_rq_clock_task(cfs_rq);
int cpu = cpu_of(rq_of(cfs_rq));
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
__update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
update_tg_load_avg(cfs_rq, 0);
}
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
se->avg.last_update_time = cfs_rq->avg.last_update_time;
cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq->avg.load_sum += se->avg.load_sum;
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
}
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
&se->avg, se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
}
/* Add the load generated by se into cfs_rq's load average */
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
u64 now = cfs_rq_clock_task(cfs_rq);
int migrated = 0, decayed;
int migrated, decayed;
if (sa->last_update_time == 0) {
sa->last_update_time = now;
migrated = 1;
}
else {
migrated = !sa->last_update_time;
if (!migrated) {
__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
@ -2732,12 +2751,8 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;
if (migrated) {
cfs_rq->avg.load_avg += sa->load_avg;
cfs_rq->avg.load_sum += sa->load_sum;
cfs_rq->avg.util_avg += sa->util_avg;
cfs_rq->avg.util_sum += sa->util_sum;
}
if (migrated)
attach_entity_load_avg(cfs_rq, se);
if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
@ -2752,7 +2767,7 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->runnable_load_avg =
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
cfs_rq->runnable_load_sum =
max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
}
/*
@ -2820,6 +2835,11 @@ static inline void
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline int idle_balance(struct rq *rq)
{
return 0;
@ -7909,25 +7929,10 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime;
}
#ifdef CONFIG_SMP
/* Catch up with the cfs_rq and remove our load when we leave */
__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
cfs_rq->avg.load_avg =
max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
cfs_rq->avg.load_sum =
max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
cfs_rq->avg.util_avg =
max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
cfs_rq->avg.util_sum =
max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
#endif
detach_entity_load_avg(cfs_rq, se);
}
/*
* We switched to the sched_fair class.
*/
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
@ -8040,14 +8045,8 @@ static void task_move_group_fair(struct task_struct *p, int queued)
cfs_rq = cfs_rq_of(se);
se->vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP
/* Virtually synchronize task with its new cfs_rq */
p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
cfs_rq->avg.load_avg += p->se.avg.load_avg;
cfs_rq->avg.load_sum += p->se.avg.load_sum;
cfs_rq->avg.util_avg += p->se.avg.util_avg;
cfs_rq->avg.util_sum += p->se.avg.util_sum;
#endif
attach_entity_load_avg(cfs_rq, se);
}
}