diff --git a/kernel/sched.c b/kernel/sched.c index a5dd03522e32..5594e65166fc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -732,13 +732,14 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; - lw->inv_weight = WMULT_CONST / lw->weight; + if (sched_feat(FAIR_SLEEPERS)) + lw->inv_weight = WMULT_CONST / lw->weight; } static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; - if (likely(lw->weight)) + if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight)) lw->inv_weight = WMULT_CONST / lw->weight; } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a566a4558167..7041dc697855 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -336,6 +336,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, } curr->vruntime += delta_exec_weighted; + if (!sched_feat(FAIR_SLEEPERS)) + return; + if (unlikely(!load)) return;