sched: Tweak wake_idx

When merging select_task_rq_fair() and sched_balance_self() we lost
the use of wake_idx, restore that and set them to 0 to make wake
balancing more aggressive.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-09-03 13:16:51 +02:00 committed by Ingo Molnar
parent d7c33c4930
commit 78e7ed53c9
7 changed files with 28 additions and 11 deletions

View File

@ -62,11 +62,12 @@ void build_cpu_to_node_map(void);
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
| SD_BALANCE_WAKE \
| SD_WAKE_AFFINE, \
.last_balance = jiffies, \
@ -87,7 +88,7 @@ void build_cpu_to_node_map(void);
.busy_idx = 3, \
.idle_idx = 2, \
.newidle_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \

View File

@ -58,9 +58,10 @@ static inline int pcibus_to_node(struct pci_bus *bus)
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_WAKE \
| SD_SERIALIZE, \

View File

@ -16,7 +16,7 @@
.busy_idx = 3, \
.idle_idx = 2, \
.newidle_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_FORK \

View File

@ -52,7 +52,7 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
.busy_idx = 3, \
.idle_idx = 2, \
.newidle_idx = 0, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_FORK \

View File

@ -138,7 +138,7 @@ extern unsigned long node_remap_size[];
.busy_idx = 3, \
.idle_idx = SD_IDLE_IDX, \
.newidle_idx = SD_NEWIDLE_IDX, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = SD_FORKEXEC_IDX, \
\
.flags = 1*SD_LOAD_BALANCE \

View File

@ -120,7 +120,7 @@ int arch_update_cpu_topology(void);
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
\
.flags = 1*SD_LOAD_BALANCE \
@ -152,7 +152,7 @@ int arch_update_cpu_topology(void);
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.wake_idx = 0, \
.forkexec_idx = 1, \
\
.flags = 1*SD_LOAD_BALANCE \

View File

@ -1232,12 +1232,27 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* domain.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int flag)
{
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
int load_idx = 0;
switch (flag) {
case SD_BALANCE_FORK:
case SD_BALANCE_EXEC:
load_idx = sd->forkexec_idx;
break;
case SD_BALANCE_WAKE:
load_idx = sd->wake_idx;
break;
default:
break;
}
do {
unsigned long load, avg_load;
@ -1392,7 +1407,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
continue;
}
group = find_idlest_group(sd, p, cpu);
group = find_idlest_group(sd, p, cpu, flag);
if (!group) {
sd = sd->child;
continue;