sched: fix build error in kernel/sched_rt.c when RT_GROUP_SCHED && !SMP

Ingo found a build error in the scheduler when RT_GROUP_SCHED was
enabled, but SMP was not.  This patch rearranges the code such
that it is a little more streamlined and compiles under all permutations
of SMP, UP and RT_GROUP_SCHED.  It was boot tested on my 4-way x86_64
and it still passes preempt-test.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
This commit is contained in:
Gregory Haskins 2009-01-14 09:10:04 -05:00
parent b07430ac37
commit 398a153b16
2 changed files with 176 additions and 96 deletions

View File

@ -466,7 +466,9 @@ struct rt_rq {
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
struct { struct {
int curr; /* highest queued rt task prio */ int curr; /* highest queued rt task prio */
#ifdef CONFIG_SMP
int next; /* next highest */ int next; /* next highest */
#endif
} highest_prio; } highest_prio;
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -8267,8 +8269,10 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.curr = MAX_RT_PRIO;
#ifdef CONFIG_SMP
rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq->highest_prio.next = MAX_RT_PRIO;
#endif #endif
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0; rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0; rt_rq->overloaded = 0;

View File

@ -3,6 +3,40 @@
* policies) * policies)
*/ */
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
#ifdef CONFIG_RT_GROUP_SCHED
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
#else /* CONFIG_RT_GROUP_SCHED */
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
struct rq *rq = task_rq(p);
return &rq->rt;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline int rt_overloaded(struct rq *rq) static inline int rt_overloaded(struct rq *rq)
@ -37,19 +71,35 @@ static inline void rt_clear_overload(struct rq *rq)
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
} }
static void update_rt_migration(struct rq *rq) static void update_rt_migration(struct rt_rq *rt_rq)
{ {
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
if (!rq->rt.overloaded) { if (!rt_rq->overloaded) {
rt_set_overload(rq); rt_set_overload(rq_of_rt_rq(rt_rq));
rq->rt.overloaded = 1; rt_rq->overloaded = 1;
} }
} else if (rq->rt.overloaded) { } else if (rt_rq->overloaded) {
rt_clear_overload(rq); rt_clear_overload(rq_of_rt_rq(rt_rq));
rq->rt.overloaded = 0; rt_rq->overloaded = 0;
} }
} }
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{ {
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@ -68,14 +118,13 @@ static inline
void enqueue_pushable_task(struct rq *rq, struct task_struct *p) {} void enqueue_pushable_task(struct rq *rq, struct task_struct *p) {}
static inline static inline
void dequeue_pushable_task(struct rq *rq, struct task_struct *p) {} void dequeue_pushable_task(struct rq *rq, struct task_struct *p) {}
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
static inline int on_rt_rq(struct sched_rt_entity *rt_se) static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{ {
return !list_empty(&rt_se->run_list); return !list_empty(&rt_se->run_list);
@ -99,16 +148,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
#define for_each_leaf_rt_rq(rt_rq, rq) \ #define for_each_leaf_rt_rq(rt_rq, rq) \
list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
#define for_each_sched_rt_entity(rt_se) \ #define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent) for (; rt_se; rt_se = rt_se->parent)
@ -196,19 +235,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
#define for_each_leaf_rt_rq(rt_rq, rq) \ #define for_each_leaf_rt_rq(rt_rq, rq) \
for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
struct rq *rq = task_rq(p);
return &rq->rt;
}
#define for_each_sched_rt_entity(rt_se) \ #define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL) for (; rt_se; rt_se = NULL)
@ -567,7 +593,7 @@ static void update_curr_rt(struct rq *rq)
} }
} }
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP
static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
@ -580,33 +606,24 @@ static inline int next_prio(struct rq *rq)
else else
return MAX_RT_PRIO; return MAX_RT_PRIO;
} }
#endif
static inline static void
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{ {
int prio = rt_se_prio(rt_se);
#ifdef CONFIG_SMP
struct rq *rq = rq_of_rt_rq(rt_rq); struct rq *rq = rq_of_rt_rq(rt_rq);
#endif
WARN_ON(!rt_prio(prio)); if (prio < prev_prio) {
rt_rq->rt_nr_running++;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (prio < rt_rq->highest_prio.curr) {
/* /*
* If the new task is higher in priority than anything on the * If the new task is higher in priority than anything on the
* run-queue, we have a new high that must be published to * run-queue, we know that the previous high becomes our
* the world. We also know that the previous high becomes * next-highest.
* our next-highest.
*/ */
rt_rq->highest_prio.next = rt_rq->highest_prio.curr; rt_rq->highest_prio.next = prev_prio;
rt_rq->highest_prio.curr = prio;
#ifdef CONFIG_SMP
if (rq->online) if (rq->online)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio); cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
#endif
} else if (prio == rt_rq->highest_prio.curr) } else if (prio == rt_rq->highest_prio.curr)
/* /*
* If the next task is equal in priority to the highest on * If the next task is equal in priority to the highest on
@ -619,72 +636,131 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
* Otherwise, we need to recompute next-highest * Otherwise, we need to recompute next-highest
*/ */
rt_rq->highest_prio.next = next_prio(rq); rt_rq->highest_prio.next = next_prio(rq);
#endif
#ifdef CONFIG_SMP
if (rt_se->nr_cpus_allowed > 1)
rq->rt.rt_nr_migratory++;
update_rt_migration(rq);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
#else
start_rt_bandwidth(&def_rt_bandwidth);
#endif
} }
static inline static void
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{ {
#ifdef CONFIG_SMP
struct rq *rq = rq_of_rt_rq(rt_rq); struct rq *rq = rq_of_rt_rq(rt_rq);
int highest_prio = rt_rq->highest_prio.curr;
#endif
WARN_ON(!rt_prio(rt_se_prio(rt_se))); if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
WARN_ON(!rt_rq->rt_nr_running); rt_rq->highest_prio.next = next_prio(rq);
rt_rq->rt_nr_running--;
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
#else /* CONFIG_SMP */
static inline
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_rq->rt_nr_running) { static void
int prio = rt_se_prio(rt_se); inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
WARN_ON(prio < rt_rq->highest_prio.curr); if (prio < prev_prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
}
static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (rt_rq->rt_nr_running) {
WARN_ON(prio < prev_prio);
/* /*
* This may have been our highest or next-highest priority * This may have been our highest task, and therefore
* task and therefore we may have some recomputation to do * we may have some recomputation to do
*/ */
if (prio == rt_rq->highest_prio.curr) { if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
rt_rq->highest_prio.curr = rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap); sched_find_first_bit(array->bitmap);
} }
if (prio <= rt_rq->highest_prio.next)
rt_rq->highest_prio.next = next_prio(rq);
} else } else
rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.curr = MAX_RT_PRIO;
#endif
#ifdef CONFIG_SMP
if (rt_se->nr_cpus_allowed > 1)
rq->rt.rt_nr_migratory--;
if (rq->online && rt_rq->highest_prio.curr != highest_prio) dec_rt_prio_smp(rt_rq, prio, prev_prio);
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); }
#else
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
update_rt_migration(rq);
#endif /* CONFIG_SMP */
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se)) if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--; rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
#endif }
#else /* CONFIG_RT_GROUP_SCHED */
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running++;
inc_rt_prio(rt_rq, prio);
inc_rt_migration(rt_se, rt_rq);
inc_rt_group(rt_se, rt_rq);
}
static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running--;
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
dec_rt_migration(rt_se, rt_rq);
dec_rt_group(rt_se, rt_rq);
} }
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@ -1453,7 +1529,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
rq->rt.rt_nr_migratory--; rq->rt.rt_nr_migratory--;
} }
update_rt_migration(rq); update_rt_migration(&rq->rt);
} }
cpumask_copy(&p->cpus_allowed, new_mask); cpumask_copy(&p->cpus_allowed, new_mask);