2011-10-25 10:00:11 +02:00
|
|
|
#include "sched.h"
|
|
|
|
|
2010-09-22 13:53:15 +02:00
|
|
|
/*
|
|
|
|
* stop-task scheduling class.
|
|
|
|
*
|
|
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
|
|
* everything and will be preempted by nothing.
|
|
|
|
*
|
|
|
|
* See kernel/stop_machine.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int
|
2011-04-05 17:23:46 +02:00
|
|
|
select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
|
2010-09-22 13:53:15 +02:00
|
|
|
{
|
|
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2010-10-31 12:37:04 +01:00
|
|
|
/* we're never preempted */
|
2010-09-22 13:53:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct task_struct *pick_next_task_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
|
2011-04-05 17:23:44 +02:00
|
|
|
if (stop && stop->on_rq)
|
2010-09-22 13:53:15 +02:00
|
|
|
return stop;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2011-07-21 18:43:27 +02:00
|
|
|
inc_nr_running(rq);
|
2010-09-22 13:53:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
{
|
2011-07-21 18:43:27 +02:00
|
|
|
dec_nr_running(rq);
|
2010-09-22 13:53:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_curr_task_stop(struct rq *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-01-17 17:03:27 +01:00
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
2010-09-22 13:53:15 +02:00
|
|
|
{
|
|
|
|
BUG(); /* its impossible to change to this class */
|
|
|
|
}
|
|
|
|
|
2011-01-17 17:03:27 +01:00
|
|
|
static void
|
|
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
2010-09-22 13:53:15 +02:00
|
|
|
{
|
|
|
|
BUG(); /* how!?, what priority? */
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
get_rr_interval_stop(struct rq *rq, struct task_struct *task)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
|
|
*/
|
2011-10-25 10:00:11 +02:00
|
|
|
const struct sched_class stop_sched_class = {
|
2010-09-22 13:53:15 +02:00
|
|
|
.next = &rt_sched_class,
|
|
|
|
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
|
|
.dequeue_task = dequeue_task_stop,
|
|
|
|
.yield_task = yield_task_stop,
|
|
|
|
|
|
|
|
.check_preempt_curr = check_preempt_curr_stop,
|
|
|
|
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
|
|
.put_prev_task = put_prev_task_stop,
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.select_task_rq = select_task_rq_stop,
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.set_curr_task = set_curr_task_stop,
|
|
|
|
.task_tick = task_tick_stop,
|
|
|
|
|
|
|
|
.get_rr_interval = get_rr_interval_stop,
|
|
|
|
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
|
|
.switched_to = switched_to_stop,
|
|
|
|
};
|