sched/fair: Track cgroup depth

Track depth in cgroup tree, this is useful for things like
find_matching_se() where you need to get to a common parent of two
sched entities.

Keeping the depth avoids having to calculate it on the spot, which
saves a number of possible cache-misses.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2012-02-11 06:05:00 +01:00 committed by Ingo Molnar
parent 3c4017c13f
commit fed14d45f9
2 changed files with 22 additions and 26 deletions

View File

@ -1078,6 +1078,7 @@ struct sched_entity {
#endif #endif
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent; struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */ /* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;

View File

@ -322,13 +322,13 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */ /* Do the two (enqueued) entities belong to the same group ? */
static inline int static inline struct cfs_rq *
is_same_group(struct sched_entity *se, struct sched_entity *pse) is_same_group(struct sched_entity *se, struct sched_entity *pse)
{ {
if (se->cfs_rq == pse->cfs_rq) if (se->cfs_rq == pse->cfs_rq)
return 1; return se->cfs_rq;
return 0; return NULL;
} }
static inline struct sched_entity *parent_entity(struct sched_entity *se) static inline struct sched_entity *parent_entity(struct sched_entity *se)
@ -336,17 +336,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
return se->parent; return se->parent;
} }
/* return depth at which a sched entity is present in the hierarchy */
static inline int depth_se(struct sched_entity *se)
{
int depth = 0;
for_each_sched_entity(se)
depth++;
return depth;
}
static void static void
find_matching_se(struct sched_entity **se, struct sched_entity **pse) find_matching_se(struct sched_entity **se, struct sched_entity **pse)
{ {
@ -360,8 +349,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
*/ */
/* First walk up until both entities are at same depth */ /* First walk up until both entities are at same depth */
se_depth = depth_se(*se); se_depth = (*se)->depth;
pse_depth = depth_se(*pse); pse_depth = (*pse)->depth;
while (se_depth > pse_depth) { while (se_depth > pse_depth) {
se_depth--; se_depth--;
@ -426,10 +415,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
#define for_each_leaf_cfs_rq(rq, cfs_rq) \ #define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
static inline int static inline struct cfs_rq *
is_same_group(struct sched_entity *se, struct sched_entity *pse) is_same_group(struct sched_entity *se, struct sched_entity *pse)
{ {
return 1; return cfs_rq_of(se); /* always the same rq */
} }
static inline struct sched_entity *parent_entity(struct sched_entity *se) static inline struct sched_entity *parent_entity(struct sched_entity *se)
@ -7262,7 +7251,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq) static void task_move_group_fair(struct task_struct *p, int on_rq)
{ {
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
/* /*
* If the task was not on the rq at the time of this cgroup movement * If the task was not on the rq at the time of this cgroup movement
* it must have been asleep, sleeping tasks keep their ->vruntime * it must have been asleep, sleeping tasks keep their ->vruntime
@ -7288,23 +7279,24 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* To prevent boost or penalty in the new cfs_rq caused by delta * To prevent boost or penalty in the new cfs_rq caused by delta
* min_vruntime between the two cfs_rqs, we skip vruntime adjustment. * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
*/ */
if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING)) if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
on_rq = 1; on_rq = 1;
if (!on_rq) if (!on_rq)
p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; se->vruntime -= cfs_rq_of(se)->min_vruntime;
set_task_rq(p, task_cpu(p)); set_task_rq(p, task_cpu(p));
se->depth = se->parent ? se->parent->depth + 1 : 0;
if (!on_rq) { if (!on_rq) {
cfs_rq = cfs_rq_of(&p->se); cfs_rq = cfs_rq_of(se);
p->se.vruntime += cfs_rq->min_vruntime; se->vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* migrate_task_rq_fair() will have removed our previous * migrate_task_rq_fair() will have removed our previous
* contribution, but we must synchronize for ongoing future * contribution, but we must synchronize for ongoing future
* decay. * decay.
*/ */
p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter); se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib; cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
#endif #endif
} }
} }
@ -7400,10 +7392,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
if (!se) if (!se)
return; return;
if (!parent) if (!parent) {
se->cfs_rq = &rq->cfs; se->cfs_rq = &rq->cfs;
else se->depth = 0;
} else {
se->cfs_rq = parent->my_q; se->cfs_rq = parent->my_q;
se->depth = parent->depth + 1;
}
se->my_q = cfs_rq; se->my_q = cfs_rq;
/* guarantee group entities always have weight */ /* guarantee group entities always have weight */