sched/fair: Disable runtime_enabled on dying rq

We kill rq->rd on the CPU_DOWN_PREPARE stage:

	cpuset_cpu_inactive -> cpuset_update_active_cpus -> partition_sched_domains ->
	-> cpu_attach_domain -> rq_attach_root -> set_rq_offline

This unthrottles all throttled cfs_rqs.

But the cpu is still able to call schedule() till

	take_cpu_down->__cpu_disable()

is called from stop_machine.

This case the tasks from just unthrottled cfs_rqs are pickable
in a standard scheduler way, and they are picked by dying cpu.
The cfs_rqs becomes throttled again, and migrate_tasks()
in migration_call skips their tasks (one more unthrottle
in migrate_tasks()->CPU_DYING does not happen, because rq->rd
is already NULL).

Patch sets runtime_enabled to zero. This guarantees, the runtime
is not accounted, and the cfs_rqs won't exceed given
cfs_rq->runtime_remaining = 1, and tasks will be pickable
in migrate_tasks(). runtime_enabled is recalculated again
when rq becomes online again.

Ben Segall also noticed, we always enable runtime in
tg_set_cfs_bandwidth(). Actually, we should do that for online
cpus only. To prevent races with unthrottle_offline_cfs_rqs()
we take get_online_cpus() lock.

Reviewed-by: Ben Segall <bsegall@google.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
CC: Konstantin Khorenko <khorenko@parallels.com>
CC: Paul Turner <pjt@google.com>
CC: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1403684382.3462.42.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Kirill Tkhai 2014-06-25 12:19:42 +04:00 committed by Ingo Molnar
parent a22b4b0123
commit 0e59bdaea7
2 changed files with 29 additions and 1 deletions

View File

@ -7817,6 +7817,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
if (period > max_cfs_quota_period) if (period > max_cfs_quota_period)
return -EINVAL; return -EINVAL;
/*
* Prevent race between setting of cfs_rq->runtime_enabled and
* unthrottle_offline_cfs_rqs().
*/
get_online_cpus();
mutex_lock(&cfs_constraints_mutex); mutex_lock(&cfs_constraints_mutex);
ret = __cfs_schedulable(tg, period, quota); ret = __cfs_schedulable(tg, period, quota);
if (ret) if (ret)
@ -7842,7 +7847,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
} }
raw_spin_unlock_irq(&cfs_b->lock); raw_spin_unlock_irq(&cfs_b->lock);
for_each_possible_cpu(i) { for_each_online_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i]; struct cfs_rq *cfs_rq = tg->cfs_rq[i];
struct rq *rq = cfs_rq->rq; struct rq *rq = cfs_rq->rq;
@ -7858,6 +7863,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
cfs_bandwidth_usage_dec(); cfs_bandwidth_usage_dec();
out_unlock: out_unlock:
mutex_unlock(&cfs_constraints_mutex); mutex_unlock(&cfs_constraints_mutex);
put_online_cpus();
return ret; return ret;
} }

View File

@ -3798,6 +3798,19 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
hrtimer_cancel(&cfs_b->slack_timer); hrtimer_cancel(&cfs_b->slack_timer);
} }
static void __maybe_unused update_runtime_enabled(struct rq *rq)
{
struct cfs_rq *cfs_rq;
for_each_leaf_cfs_rq(rq, cfs_rq) {
struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
raw_spin_lock(&cfs_b->lock);
cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
raw_spin_unlock(&cfs_b->lock);
}
}
static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
@ -3811,6 +3824,12 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
* there's some valid quota amount * there's some valid quota amount
*/ */
cfs_rq->runtime_remaining = 1; cfs_rq->runtime_remaining = 1;
/*
* Offline rq is schedulable till cpu is completely disabled
* in take_cpu_down(), so we prevent new cfs throttling here.
*/
cfs_rq->runtime_enabled = 0;
if (cfs_rq_throttled(cfs_rq)) if (cfs_rq_throttled(cfs_rq))
unthrottle_cfs_rq(cfs_rq); unthrottle_cfs_rq(cfs_rq);
} }
@ -3854,6 +3873,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
return NULL; return NULL;
} }
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
@ -7362,6 +7382,8 @@ void trigger_load_balance(struct rq *rq)
static void rq_online_fair(struct rq *rq) static void rq_online_fair(struct rq *rq)
{ {
update_sysctl(); update_sysctl();
update_runtime_enabled(rq);
} }
static void rq_offline_fair(struct rq *rq) static void rq_offline_fair(struct rq *rq)