Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, tsc, sched: Recompute cyc2ns_offset's during resume from sleep states sched: Fix rq->clock synchronization when migrating tasks
This commit is contained in:
commit
5e686019df
|
@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
|
||||||
extern void check_tsc_sync_target(void);
|
extern void check_tsc_sync_target(void);
|
||||||
|
|
||||||
extern int notsc_setup(char *);
|
extern int notsc_setup(char *);
|
||||||
|
extern void save_sched_clock_state(void);
|
||||||
|
extern void restore_sched_clock_state(void);
|
||||||
|
|
||||||
#endif /* _ASM_X86_TSC_H */
|
#endif /* _ASM_X86_TSC_H */
|
||||||
|
|
|
@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long long cyc2ns_suspend;
|
||||||
|
|
||||||
|
void save_sched_clock_state(void)
|
||||||
|
{
|
||||||
|
if (!sched_clock_stable)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cyc2ns_suspend = sched_clock();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even on processors with invariant TSC, TSC gets reset in some the
|
||||||
|
* ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
|
||||||
|
* arbitrary value (still sync'd across cpu's) during resume from such sleep
|
||||||
|
* states. To cope up with this, recompute the cyc2ns_offset for each cpu so
|
||||||
|
* that sched_clock() continues from the point where it was left off during
|
||||||
|
* suspend.
|
||||||
|
*/
|
||||||
|
void restore_sched_clock_state(void)
|
||||||
|
{
|
||||||
|
unsigned long long offset;
|
||||||
|
unsigned long flags;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (!sched_clock_stable)
|
||||||
|
return;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
|
||||||
|
get_cpu_var(cyc2ns_offset) = 0;
|
||||||
|
offset = cyc2ns_suspend - sched_clock();
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
per_cpu(cyc2ns_offset, cpu) = offset;
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_FREQ
|
#ifdef CONFIG_CPU_FREQ
|
||||||
|
|
||||||
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
|
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
|
||||||
|
|
|
@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt)
|
||||||
void save_processor_state(void)
|
void save_processor_state(void)
|
||||||
{
|
{
|
||||||
__save_processor_state(&saved_context);
|
__save_processor_state(&saved_context);
|
||||||
|
save_sched_clock_state();
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
EXPORT_SYMBOL(save_processor_state);
|
EXPORT_SYMBOL(save_processor_state);
|
||||||
|
@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
|
||||||
void restore_processor_state(void)
|
void restore_processor_state(void)
|
||||||
{
|
{
|
||||||
__restore_processor_state(&saved_context);
|
__restore_processor_state(&saved_context);
|
||||||
|
restore_sched_clock_state();
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
EXPORT_SYMBOL(restore_processor_state);
|
EXPORT_SYMBOL(restore_processor_state);
|
||||||
|
|
|
@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p)
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
|
||||||
|
update_rq_clock(rq);
|
||||||
|
|
||||||
if (unlikely(task_cpu(p) != this_cpu))
|
if (unlikely(task_cpu(p) != this_cpu))
|
||||||
__set_task_cpu(p, this_cpu);
|
__set_task_cpu(p, this_cpu);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue