sched: Optimize migrate_disable

Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few
atomic ops. See comment on why it should be safe.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
This commit is contained in:
Peter Zijlstra 2011-08-11 15:03:35 +02:00 committed by Alibek Omarov
parent 1b6c3983a0
commit d921689f21
1 changed files with 20 additions and 4 deletions

View File

@ -4744,7 +4744,19 @@ void migrate_disable(void)
preempt_enable();
return;
}
rq = task_rq_lock(p, &flags);
/*
* Since this is always current we can get away with only locking
* rq->lock, the ->cpus_allowed value can normally only be changed
* while holding both p->pi_lock and rq->lock, but seeing that this
* it current, we cannot actually be waking up, so all code that
* relies on serialization against p->pi_lock is out of scope.
*
* Taking rq->lock serializes us against things like
* set_cpus_allowed_ptr() that can still happen concurrently.
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
p->migrate_disable = 1;
mask = tsk_cpus_allowed(p);
@ -4755,7 +4767,7 @@ void migrate_disable(void)
p->sched_class->set_cpus_allowed(p, mask);
p->nr_cpus_allowed = cpumask_weight(mask);
}
task_rq_unlock(rq, p, &flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
preempt_enable();
}
EXPORT_SYMBOL(migrate_disable);
@ -4783,7 +4795,11 @@ void migrate_enable(void)
return;
}
rq = task_rq_lock(p, &flags);
/*
* See comment in migrate_disable().
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
p->migrate_disable = 0;
mask = tsk_cpus_allowed(p);
@ -4795,7 +4811,7 @@ void migrate_enable(void)
p->nr_cpus_allowed = cpumask_weight(mask);
}
task_rq_unlock(rq, p, &flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
unpin_current_cpu();
preempt_enable();
}