sched/core: Add rq->lock wrappers

The missing update_rq_clock() check can work with partial rq->lock
wrappery, since a missing wrapper can cause the warning to not be
emitted when it should have, but cannot cause the warning to trigger
when it should not have.

The duplicate update_rq_clock() check however can cause false warnings
to trigger. Therefore add more comprehensive rq->lock wrappery.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2016-10-04 16:04:35 +02:00 committed by Ingo Molnar
parent 26ae58d23b
commit 8a8c69c327
3 changed files with 171 additions and 112 deletions

View File

@ -85,21 +85,6 @@ int sysctl_sched_rt_runtime = 950000;
/* CPUs with isolated domains */
cpumask_var_t cpu_isolated_map;
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
raw_spin_lock(&rq->lock);
return rq;
}
/*
* __task_rq_lock - lock the rq @p resides on.
*/
@ -264,13 +249,14 @@ static void hrtick_clear(struct rq *rq)
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
struct rq_flags rf;
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
return HRTIMER_NORESTART;
}
@ -290,11 +276,12 @@ static void __hrtick_restart(struct rq *rq)
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
struct rq_flags rf;
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
__hrtick_restart(rq);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
}
/*
@ -949,18 +936,19 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
*
* Returns (locked) new rq. Old rq's lock is released.
*/
static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
{
lockdep_assert_held(&rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
dequeue_task(rq, p, 0);
set_task_cpu(p, new_cpu);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, rf);
rq = cpu_rq(new_cpu);
raw_spin_lock(&rq->lock);
rq_lock(rq, rf);
BUG_ON(task_cpu(p) != new_cpu);
enqueue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
@ -983,7 +971,8 @@ struct migration_arg {
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*/
static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
{
if (unlikely(!cpu_active(dest_cpu)))
return rq;
@ -992,7 +981,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return rq;
rq = move_queued_task(rq, p, dest_cpu);
rq = move_queued_task(rq, rf, p, dest_cpu);
return rq;
}
@ -1007,6 +996,7 @@ static int migration_cpu_stop(void *data)
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
struct rq_flags rf;
/*
* The original target CPU might have gone down and we might
@ -1021,7 +1011,7 @@ static int migration_cpu_stop(void *data)
sched_ttwu_pending();
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
/*
* If task_rq(p) != rq, it cannot be migrated here, because we're
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
@ -1029,11 +1019,11 @@ static int migration_cpu_stop(void *data)
*/
if (task_rq(p) == rq) {
if (task_on_rq_queued(p))
rq = __migrate_task(rq, p, arg->dest_cpu);
rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
else
p->wake_cpu = arg->dest_cpu;
}
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
raw_spin_unlock(&p->pi_lock);
local_irq_enable();
@ -1153,9 +1143,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
* OK, since we're going to drop the lock immediately
* afterwards anyway.
*/
rq_unpin_lock(rq, &rf);
rq = move_queued_task(rq, p, dest_cpu);
rq_repin_lock(rq, &rf);
rq = move_queued_task(rq, &rf, p, dest_cpu);
}
out:
task_rq_unlock(rq, p, &rf);
@ -1220,16 +1208,24 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
struct rq *src_rq, *dst_rq;
struct rq_flags srf, drf;
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);
rq_pin_lock(src_rq, &srf);
rq_pin_lock(dst_rq, &drf);
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
rq_unpin_lock(dst_rq, &drf);
rq_unpin_lock(src_rq, &srf);
} else {
/*
* Task isn't running anymore; make it appear like we migrated
@ -1729,14 +1725,12 @@ void sched_ttwu_pending(void)
struct rq *rq = this_rq();
struct llist_node *llist = llist_del_all(&rq->wake_list);
struct task_struct *p;
unsigned long flags;
struct rq_flags rf;
if (!llist)
return;
raw_spin_lock_irqsave(&rq->lock, flags);
rq_pin_lock(rq, &rf);
rq_lock_irqsave(rq, &rf);
while (llist) {
int wake_flags = 0;
@ -1750,8 +1744,7 @@ void sched_ttwu_pending(void)
ttwu_do_activate(rq, p, wake_flags, &rf);
}
rq_unpin_lock(rq, &rf);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
void scheduler_ipi(void)
@ -1809,7 +1802,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
void wake_up_if_idle(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
struct rq_flags rf;
rcu_read_lock();
@ -1819,11 +1812,11 @@ void wake_up_if_idle(int cpu)
if (set_nr_if_polling(rq->idle)) {
trace_sched_wake_idle_without_ipi(cpu);
} else {
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
if (is_idle_task(rq->curr))
smp_send_reschedule(cpu);
/* Else CPU is not idle, do nothing here: */
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
out:
@ -1849,11 +1842,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
}
#endif
raw_spin_lock(&rq->lock);
rq_pin_lock(rq, &rf);
rq_lock(rq, &rf);
ttwu_do_activate(rq, p, wake_flags, &rf);
rq_unpin_lock(rq, &rf);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
}
/*
@ -2100,11 +2091,9 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
* disabled avoiding further scheduler activity on it and we've
* not yet picked a replacement task.
*/
rq_unpin_lock(rq, rf);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, rf);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
rq_repin_lock(rq, rf);
rq_relock(rq, rf);
}
if (!(p->state & TASK_NORMAL))
@ -2778,9 +2767,9 @@ static void __balance_callback(struct rq *rq)
{
struct callback_head *head, *next;
void (*func)(struct rq *rq);
unsigned long flags;
struct rq_flags rf;
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
head = rq->balance_callback;
rq->balance_callback = NULL;
while (head) {
@ -2791,7 +2780,7 @@ static void __balance_callback(struct rq *rq)
func(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
static inline void balance_callback(struct rq *rq)
@ -3096,15 +3085,18 @@ void scheduler_tick(void)
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
struct rq_flags rf;
sched_clock_tick();
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
cpu_load_update_active(rq);
calc_global_load_tick(rq);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
perf_event_task_tick();
@ -3389,8 +3381,7 @@ static void __sched notrace __schedule(bool preempt)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
raw_spin_lock(&rq->lock);
rq_pin_lock(rq, &rf);
rq_lock(rq, &rf);
/* Promote REQ to ACT */
rq->clock_update_flags <<= 1;
@ -3442,8 +3433,7 @@ static void __sched notrace __schedule(bool preempt)
rq = context_switch(rq, prev, next, &rf);
} else {
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
rq_unpin_lock(rq, &rf);
raw_spin_unlock_irq(&rq->lock);
rq_unlock_irq(rq, &rf);
}
balance_callback(rq);
@ -4926,7 +4916,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
*/
SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
struct rq_flags rf;
struct rq *rq;
local_irq_disable();
rq = this_rq();
rq_lock(rq, &rf);
schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);
@ -4935,9 +4930,8 @@ SYSCALL_DEFINE0(sched_yield)
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
preempt_disable();
rq_unlock(rq, &rf);
sched_preempt_enable_no_resched();
schedule();
@ -5582,11 +5576,11 @@ static struct task_struct fake_task = {
* there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts.
*/
static void migrate_tasks(struct rq *dead_rq)
static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
{
struct rq *rq = dead_rq;
struct task_struct *next, *stop = rq->stop;
struct rq_flags rf;
struct rq_flags orf = *rf;
int dest_cpu;
/*
@ -5605,9 +5599,7 @@ static void migrate_tasks(struct rq *dead_rq)
* class method both need to have an up-to-date
* value of rq->clock[_task]
*/
rq_pin_lock(rq, &rf);
update_rq_clock(rq);
rq_unpin_lock(rq, &rf);
for (;;) {
/*
@ -5620,8 +5612,7 @@ static void migrate_tasks(struct rq *dead_rq)
/*
* pick_next_task() assumes pinned rq->lock:
*/
rq_repin_lock(rq, &rf);
next = pick_next_task(rq, &fake_task, &rf);
next = pick_next_task(rq, &fake_task, rf);
BUG_ON(!next);
next->sched_class->put_prev_task(rq, next);
@ -5634,10 +5625,9 @@ static void migrate_tasks(struct rq *dead_rq)
* because !cpu_active at this point, which means load-balance
* will not interfere. Also, stop-machine.
*/
rq_unpin_lock(rq, &rf);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, rf);
raw_spin_lock(&next->pi_lock);
raw_spin_lock(&rq->lock);
rq_relock(rq, rf);
/*
* Since we're inside stop-machine, _nothing_ should have
@ -5651,12 +5641,12 @@ static void migrate_tasks(struct rq *dead_rq)
/* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_rq->cpu, next);
rq = __migrate_task(rq, next, dest_cpu);
rq = __migrate_task(rq, rf, next, dest_cpu);
if (rq != dead_rq) {
raw_spin_unlock(&rq->lock);
rq_unlock(rq, rf);
rq = dead_rq;
raw_spin_lock(&rq->lock);
*rf = orf;
rq_relock(rq, rf);
}
raw_spin_unlock(&next->pi_lock);
}
@ -5769,7 +5759,7 @@ static int cpuset_cpu_inactive(unsigned int cpu)
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
struct rq_flags rf;
set_cpu_active(cpu, true);
@ -5787,12 +5777,12 @@ int sched_cpu_activate(unsigned int cpu)
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
* domains.
*/
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
update_max_interval();
@ -5850,18 +5840,20 @@ int sched_cpu_starting(unsigned int cpu)
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
struct rq_flags rf;
/* Handle pending wakeups and then migrate everything off */
sched_ttwu_pending();
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
migrate_tasks(rq);
migrate_tasks(rq, &rf);
BUG_ON(rq->nr_running != 1);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
calc_load_migrate(rq);
update_max_interval();
nohz_balance_exit_idle(cpu);
@ -7011,14 +7003,15 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
for_each_online_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq;
struct rq_flags rf;
raw_spin_lock_irq(&rq->lock);
rq_lock_irq(rq, &rf);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
rq_unlock_irq(rq, &rf);
}
if (runtime_was_enabled && !runtime_enabled)
cfs_bandwidth_usage_dec();

View File

@ -4271,8 +4271,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
throttled_list) {
struct rq *rq = rq_of(cfs_rq);
struct rq_flags rf;
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
@ -4289,7 +4290,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
unthrottle_cfs_rq(cfs_rq);
next:
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
if (!remaining)
break;
@ -5097,15 +5098,16 @@ void cpu_load_update_nohz_stop(void)
unsigned long curr_jiffies = READ_ONCE(jiffies);
struct rq *this_rq = this_rq();
unsigned long load;
struct rq_flags rf;
if (curr_jiffies == this_rq->last_load_update_tick)
return;
load = weighted_cpuload(cpu_of(this_rq));
raw_spin_lock(&this_rq->lock);
rq_lock(this_rq, &rf);
update_rq_clock(this_rq);
cpu_load_update_nohz(this_rq, curr_jiffies, load);
raw_spin_unlock(&this_rq->lock);
rq_unlock(this_rq, &rf);
}
#else /* !CONFIG_NO_HZ_COMMON */
static inline void cpu_load_update_nohz(struct rq *this_rq,
@ -6913,9 +6915,11 @@ static void attach_task(struct rq *rq, struct task_struct *p)
*/
static void attach_one_task(struct rq *rq, struct task_struct *p)
{
raw_spin_lock(&rq->lock);
struct rq_flags rf;
rq_lock(rq, &rf);
attach_task(rq, p);
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
}
/*
@ -6926,8 +6930,9 @@ static void attach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->tasks;
struct task_struct *p;
struct rq_flags rf;
raw_spin_lock(&env->dst_rq->lock);
rq_lock(env->dst_rq, &rf);
while (!list_empty(tasks)) {
p = list_first_entry(tasks, struct task_struct, se.group_node);
@ -6936,7 +6941,7 @@ static void attach_tasks(struct lb_env *env)
attach_task(env->dst_rq, p);
}
raw_spin_unlock(&env->dst_rq->lock);
rq_unlock(env->dst_rq, &rf);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@ -6944,9 +6949,9 @@ static void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq;
unsigned long flags;
struct rq_flags rf;
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
/*
@ -6965,7 +6970,7 @@ static void update_blocked_averages(int cpu)
if (cfs_rq->tg->se[cpu])
update_load_avg(cfs_rq->tg->se[cpu], 0);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
/*
@ -7019,12 +7024,12 @@ static inline void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs;
unsigned long flags;
struct rq_flags rf;
raw_spin_lock_irqsave(&rq->lock, flags);
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
static unsigned long task_h_load(struct task_struct *p)
@ -8042,7 +8047,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd_parent = sd->parent;
struct sched_group *group;
struct rq *busiest;
unsigned long flags;
struct rq_flags rf;
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
struct lb_env env = {
@ -8105,7 +8110,7 @@ redo:
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance:
raw_spin_lock_irqsave(&busiest->lock, flags);
rq_lock_irqsave(busiest, &rf);
update_rq_clock(busiest);
/*
@ -8122,14 +8127,14 @@ more_balance:
* See task_rq_lock() family for the details.
*/
raw_spin_unlock(&busiest->lock);
rq_unlock(busiest, &rf);
if (cur_ld_moved) {
attach_tasks(&env);
ld_moved += cur_ld_moved;
}
local_irq_restore(flags);
local_irq_restore(rf.flags);
if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK;
@ -8207,6 +8212,8 @@ more_balance:
sd->nr_balance_failed++;
if (need_active_balance(&env)) {
unsigned long flags;
raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the active_load_balance_cpu_stop,
@ -8444,8 +8451,9 @@ static int active_load_balance_cpu_stop(void *data)
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
struct task_struct *p = NULL;
struct rq_flags rf;
raw_spin_lock_irq(&busiest_rq->lock);
rq_lock_irq(busiest_rq, &rf);
/* make sure the requested cpu hasn't gone down in the meantime */
if (unlikely(busiest_cpu != smp_processor_id() ||
@ -8496,7 +8504,7 @@ static int active_load_balance_cpu_stop(void *data)
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
raw_spin_unlock(&busiest_rq->lock);
rq_unlock(busiest_rq, &rf);
if (p)
attach_one_task(target_rq, p);
@ -8794,10 +8802,13 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
* do the balance.
*/
if (time_after_eq(jiffies, rq->next_balance)) {
raw_spin_lock_irq(&rq->lock);
struct rq_flags rf;
rq_lock_irq(rq, &rf);
update_rq_clock(rq);
cpu_load_update_idle(rq);
raw_spin_unlock_irq(&rq->lock);
rq_unlock_irq(rq, &rf);
rebalance_domains(rq, CPU_IDLE);
}
@ -8988,8 +8999,9 @@ static void task_fork_fair(struct task_struct *p)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se, *curr;
struct rq *rq = this_rq();
struct rq_flags rf;
raw_spin_lock(&rq->lock);
rq_lock(rq, &rf);
update_rq_clock(rq);
cfs_rq = task_cfs_rq(current);
@ -9010,7 +9022,7 @@ static void task_fork_fair(struct task_struct *p)
}
se->vruntime -= cfs_rq->min_vruntime;
raw_spin_unlock(&rq->lock);
rq_unlock(rq, &rf);
}
/*
@ -9372,7 +9384,6 @@ static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;
/*
* We can't change the weight of the root cgroup.
@ -9389,19 +9400,17 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se;
struct sched_entity *se = tg->se[i];
struct rq_flags rf;
se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
/* Possible calls to update_curr() need rq clock */
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
update_load_avg(se, UPDATE_TG);
update_cfs_shares(se);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq_unlock_irqrestore(rq, &rf);
}
done:

View File

@ -1624,6 +1624,7 @@ static inline void sched_avg_update(struct rq *rq) { }
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(rq->lock);
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(p->pi_lock)
__acquires(rq->lock);
@ -1645,6 +1646,62 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
static inline void
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock_irqsave(&rq->lock, rf->flags);
rq_pin_lock(rq, rf);
}
static inline void
rq_lock_irq(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock_irq(&rq->lock);
rq_pin_lock(rq, rf);
}
static inline void
rq_lock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock(&rq->lock);
rq_pin_lock(rq, rf);
}
static inline void
rq_relock(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)
{
raw_spin_lock(&rq->lock);
rq_repin_lock(rq, rf);
}
static inline void
rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
}
static inline void
rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock_irq(&rq->lock);
}
static inline void
rq_unlock(struct rq *rq, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
raw_spin_unlock(&rq->lock);
}
#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT