sched: Rename sync arguments

In order to extend the functions to have more than 1 flag (sync),
rename the argument to flags, and explicitly define a WF_ space for
individual flags.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-09-14 19:55:44 +02:00 committed by Ingo Molnar
parent 0763a660a8
commit 7d47872146
6 changed files with 33 additions and 24 deletions

View File

@ -1024,6 +1024,11 @@ struct uts_namespace;
struct rq;
struct sched_domain;
/*
* wake flags
*/
#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
struct sched_class {
const struct sched_class *next;
@ -1031,13 +1036,13 @@ struct sched_class {
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int sync);
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,

View File

@ -26,8 +26,8 @@
#include <asm/current.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
struct __wait_queue {
unsigned int flags;

View File

@ -636,9 +636,10 @@ struct rq {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
rq->curr->sched_class->check_preempt_curr(rq, p, sync);
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
}
static inline int cpu_of(struct rq *rq)
@ -2318,14 +2319,15 @@ void task_oncpu_function_call(struct task_struct *p,
*
* returns failure only if the task is already active.
*/
static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
static int try_to_wake_up(struct task_struct *p, unsigned int state,
int wake_flags)
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
struct rq *rq;
if (!sched_feat(SYNC_WAKEUPS))
sync = 0;
wake_flags &= ~WF_SYNC;
this_cpu = get_cpu();
@ -2352,7 +2354,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
p->state = TASK_WAKING;
task_rq_unlock(rq, &flags);
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync);
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
@ -2378,7 +2380,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
out_activate:
#endif /* CONFIG_SMP */
schedstat_inc(p, se.nr_wakeups);
if (sync)
if (wake_flags & WF_SYNC)
schedstat_inc(p, se.nr_wakeups_sync);
if (orig_cpu != cpu)
schedstat_inc(p, se.nr_wakeups_migrate);
@ -2407,7 +2409,7 @@ out_activate:
out_running:
trace_sched_wakeup(rq, p, success);
check_preempt_curr(rq, p, sync);
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
@ -5562,10 +5564,10 @@ asmlinkage void __sched preempt_schedule_irq(void)
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
int default_wake_function(wait_queue_t *curr, unsigned mode, int flags,
void *key)
{
return try_to_wake_up(curr->private, mode, sync);
return try_to_wake_up(curr->private, mode, flags);
}
EXPORT_SYMBOL(default_wake_function);
@ -5579,14 +5581,14 @@ EXPORT_SYMBOL(default_wake_function);
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int sync, void *key)
int nr_exclusive, int flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, sync, key) &&
if (curr->func(curr, mode, flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
@ -5647,16 +5649,16 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
int sync = 1;
int wake_flags = WF_SYNC;
if (unlikely(!q))
return;
if (unlikely(!nr_exclusive))
sync = 0;
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, sync, key);
__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);

View File

@ -1331,13 +1331,14 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*
* preempt must be disabled.
*/
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync)
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
{
struct sched_domain *tmp, *sd = NULL;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
int want_affine = 0;
int sync = flags & WF_SYNC;
if (sd_flag & SD_BALANCE_WAKE) {
if (sched_feat(AFFINE_WAKEUPS))
@ -1548,11 +1549,12 @@ static void set_next_buddy(struct sched_entity *se)
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int sync = flags & WF_SYNC;
update_curr(cfs_rq);

View File

@ -6,7 +6,7 @@
*/
#ifdef CONFIG_SMP
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync)
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync)
/*
* Idle tasks are unconditionally rescheduled:
*/
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
{
resched_task(rq->idle);
}

View File

@ -938,7 +938,7 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync)
static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
{
struct rq *rq = task_rq(p);
@ -1002,7 +1002,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
resched_task(rq->curr);