diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c index e1006b7acc9e..f3ab61ee7498 100644 --- a/arch/i386/kernel/hpet.c +++ b/arch/i386/kernel/hpet.c @@ -200,6 +200,23 @@ static int hpet_next_event(unsigned long delta, return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0); } +/* + * Clock source related code + */ +static cycle_t read_hpet(void) +{ + return (cycle_t)hpet_readl(HPET_COUNTER); +} + +static struct clocksource clocksource_hpet = { + .name = "hpet", + .rating = 250, + .read = read_hpet, + .mask = HPET_MASK, + .shift = HPET_SHIFT, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + /* * Try to setup the HPET timer */ @@ -207,6 +224,7 @@ int __init hpet_enable(void) { unsigned long id; uint64_t hpet_freq; + u64 tmp; if (!is_hpet_capable()) return 0; @@ -253,6 +271,25 @@ int __init hpet_enable(void) /* Start the counter */ hpet_start_counter(); + /* Initialize and register HPET clocksource + * + * hpet period is in femto seconds per cycle + * so we need to convert this to ns/cyc units + * aproximated by mult/2^shift + * + * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift + * fsec/cyc * 1ns/1000000fsec * 2^shift = mult + * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult + * (fsec/cyc << shift)/1000000 = mult + * (hpet_period << shift)/FSEC_PER_NSEC = mult + */ + tmp = (u64)hpet_period << HPET_SHIFT; + do_div(tmp, FSEC_PER_NSEC); + clocksource_hpet.mult = (u32)tmp; + + clocksource_register(&clocksource_hpet); + + if (id & HPET_ID_LEGSUP) { hpet_enable_int(); hpet_reserve_platform_timers(id); @@ -273,49 +310,6 @@ out_nohpet: return 0; } -/* - * Clock source related code - */ -static cycle_t read_hpet(void) -{ - return (cycle_t)hpet_readl(HPET_COUNTER); -} - -static struct clocksource clocksource_hpet = { - .name = "hpet", - .rating = 250, - .read = read_hpet, - .mask = HPET_MASK, - .shift = HPET_SHIFT, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int __init init_hpet_clocksource(void) -{ - u64 tmp; - - if (!hpet_virt_address) - return -ENODEV; - - /* - * hpet period is in femto seconds per cycle - * so we need to convert this to ns/cyc units - * aproximated by mult/2^shift - * - * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift - * fsec/cyc * 1ns/1000000fsec * 2^shift = mult - * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult - * (fsec/cyc << shift)/1000000 = mult - * (hpet_period << shift)/FSEC_PER_NSEC = mult - */ - tmp = (u64)hpet_period << HPET_SHIFT; - do_div(tmp, FSEC_PER_NSEC); - clocksource_hpet.mult = (u32)tmp; - - return clocksource_register(&clocksource_hpet); -} - -module_init(init_hpet_clocksource); #ifdef CONFIG_HPET_EMULATE_RTC diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index a6bc7bb38834..5cbb776b3089 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c @@ -195,4 +195,4 @@ static int __init init_pit_clocksource(void) clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); return clocksource_register(&clocksource_pit); } -module_init(init_pit_clocksource); +arch_initcall(init_pit_clocksource); diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 122623dcc6e1..698c24fe482e 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -657,5 +657,4 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif #endif - tsc_init(); } diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index ccd3734edb8f..94e5cb091104 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -279,5 +279,6 @@ void __init hpet_time_init(void) */ void __init time_init(void) { + tsc_init(); late_time_init = choose_time_init(); } diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index 59222a04234b..875d8a6ecc02 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c @@ -184,34 +184,6 @@ int recalibrate_cpu_khz(void) EXPORT_SYMBOL(recalibrate_cpu_khz); -void __init tsc_init(void) -{ - if (!cpu_has_tsc || tsc_disable) - goto out_no_tsc; - - cpu_khz = calculate_cpu_khz(); - tsc_khz = cpu_khz; - - if (!cpu_khz) - goto out_no_tsc; - - printk("Detected %lu.%03lu MHz processor.\n", - (unsigned long)cpu_khz / 1000, - (unsigned long)cpu_khz % 1000); - - set_cyc2ns_scale(cpu_khz); - use_tsc_delay(); - return; - -out_no_tsc: - /* - * Set the tsc_disable flag if there's no TSC support, this - * makes it a fast flag for the kernel to see whether it - * should be using the TSC. - */ - tsc_disable = 1; -} - #ifdef CONFIG_CPU_FREQ /* @@ -381,28 +353,47 @@ static void __init check_geode_tsc_reliable(void) static inline void check_geode_tsc_reliable(void) { } #endif -static int __init init_tsc_clocksource(void) + +void __init tsc_init(void) { + if (!cpu_has_tsc || tsc_disable) + goto out_no_tsc; - if (cpu_has_tsc && tsc_khz && !tsc_disable) { - /* check blacklist */ - dmi_check_system(bad_tsc_dmi_table); + cpu_khz = calculate_cpu_khz(); + tsc_khz = cpu_khz; - unsynchronized_tsc(); - check_geode_tsc_reliable(); - current_tsc_khz = tsc_khz; - clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, + if (!cpu_khz) + goto out_no_tsc; + + printk("Detected %lu.%03lu MHz processor.\n", + (unsigned long)cpu_khz / 1000, + (unsigned long)cpu_khz % 1000); + + set_cyc2ns_scale(cpu_khz); + use_tsc_delay(); + + /* Check and install the TSC clocksource */ + dmi_check_system(bad_tsc_dmi_table); + + unsynchronized_tsc(); + check_geode_tsc_reliable(); + current_tsc_khz = tsc_khz; + clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, clocksource_tsc.shift); - /* lower the rating if we already know its unstable: */ - if (check_tsc_unstable()) { - clocksource_tsc.rating = 0; - clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; - } - - return clocksource_register(&clocksource_tsc); + /* lower the rating if we already know its unstable: */ + if (check_tsc_unstable()) { + clocksource_tsc.rating = 0; + clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; } + clocksource_register(&clocksource_tsc); - return 0; + return; + +out_no_tsc: + /* + * Set the tsc_disable flag if there's no TSC support, this + * makes it a fast flag for the kernel to see whether it + * should be using the TSC. + */ + tsc_disable = 1; } - -module_init(init_tsc_clocksource); diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c index 65a0edd71a17..8cf0b8a13778 100644 --- a/arch/x86_64/kernel/hpet.c +++ b/arch/x86_64/kernel/hpet.c @@ -12,6 +12,12 @@ #include #include +#define HPET_MASK 0xFFFFFFFF +#define HPET_SHIFT 22 + +/* FSEC = 10^-15 NSEC = 10^-9 */ +#define FSEC_PER_NSEC 1000000 + int nohpet __initdata; unsigned long hpet_address; @@ -106,9 +112,31 @@ int hpet_timer_stop_set_go(unsigned long tick) return 0; } +static cycle_t read_hpet(void) +{ + return (cycle_t)hpet_readl(HPET_COUNTER); +} + +static cycle_t __vsyscall_fn vread_hpet(void) +{ + return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} + +struct clocksource clocksource_hpet = { + .name = "hpet", + .rating = 250, + .read = read_hpet, + .mask = (cycle_t)HPET_MASK, + .mult = 0, /* set below */ + .shift = HPET_SHIFT, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .vread = vread_hpet, +}; + int hpet_arch_init(void) { unsigned int id; + u64 tmp; if (!hpet_address) return -1; @@ -132,6 +160,22 @@ int hpet_arch_init(void) hpet_use_timer = (id & HPET_ID_LEGSUP); + /* + * hpet period is in femto seconds per cycle + * so we need to convert this to ns/cyc units + * aproximated by mult/2^shift + * + * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift + * fsec/cyc * 1ns/1000000fsec * 2^shift = mult + * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult + * (fsec/cyc << shift)/1000000 = mult + * (hpet_period << shift)/FSEC_PER_NSEC = mult + */ + tmp = (u64)hpet_period << HPET_SHIFT; + do_div(tmp, FSEC_PER_NSEC); + clocksource_hpet.mult = (u32)tmp; + clocksource_register(&clocksource_hpet); + return hpet_timer_stop_set_go(hpet_tick); } @@ -444,68 +488,3 @@ static int __init nohpet_setup(char *s) } __setup("nohpet", nohpet_setup); - -#define HPET_MASK 0xFFFFFFFF -#define HPET_SHIFT 22 - -/* FSEC = 10^-15 NSEC = 10^-9 */ -#define FSEC_PER_NSEC 1000000 - -static void *hpet_ptr; - -static cycle_t read_hpet(void) -{ - return (cycle_t)readl(hpet_ptr); -} - -static cycle_t __vsyscall_fn vread_hpet(void) -{ - return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); -} - -struct clocksource clocksource_hpet = { - .name = "hpet", - .rating = 250, - .read = read_hpet, - .mask = (cycle_t)HPET_MASK, - .mult = 0, /* set below */ - .shift = HPET_SHIFT, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .vread = vread_hpet, -}; - -static int __init init_hpet_clocksource(void) -{ - unsigned long hpet_period; - void __iomem *hpet_base; - u64 tmp; - - if (!hpet_address) - return -ENODEV; - - /* calculate the hpet address: */ - hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); - hpet_ptr = hpet_base + HPET_COUNTER; - - /* calculate the frequency: */ - hpet_period = readl(hpet_base + HPET_PERIOD); - - /* - * hpet period is in femto seconds per cycle - * so we need to convert this to ns/cyc units - * aproximated by mult/2^shift - * - * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift - * fsec/cyc * 1ns/1000000fsec * 2^shift = mult - * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult - * (fsec/cyc << shift)/1000000 = mult - * (hpet_period << shift)/FSEC_PER_NSEC = mult - */ - tmp = (u64)hpet_period << HPET_SHIFT; - do_div(tmp, FSEC_PER_NSEC); - clocksource_hpet.mult = (u32)tmp; - - return clocksource_register(&clocksource_hpet); -} - -module_init(init_hpet_clocksource); diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index c9addcfb96dc..75d73a9aa9ff 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -358,6 +358,8 @@ void __init time_init(void) set_cyc2ns_scale(cpu_khz); printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); + init_tsc_clocksource(); + setup_irq(0, &irq0); } diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c index 895831865019..1a0edbbffaa0 100644 --- a/arch/x86_64/kernel/tsc.c +++ b/arch/x86_64/kernel/tsc.c @@ -210,7 +210,7 @@ void mark_tsc_unstable(void) } EXPORT_SYMBOL_GPL(mark_tsc_unstable); -static int __init init_tsc_clocksource(void) +void __init init_tsc_clocksource(void) { if (!notsc) { clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, @@ -218,9 +218,6 @@ static int __init init_tsc_clocksource(void) if (check_tsc_unstable()) clocksource_tsc.rating = 0; - return clocksource_register(&clocksource_tsc); + clocksource_register(&clocksource_tsc); } - return 0; } - -module_init(init_tsc_clocksource); diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index ccaa6a39cb4b..d42060ede930 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -214,4 +214,7 @@ pm_good: return clocksource_register(&clocksource_acpi_pm); } -module_init(init_acpi_pm_clocksource); +/* We use fs_initcall because we want the PCI fixups to have run + * but we still need to load before device_initcall + */ +fs_initcall(init_acpi_pm_clocksource); diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 4f3925ceb360..1bde303b970b 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c @@ -116,4 +116,4 @@ static int __init init_cyclone_clocksource(void) return clocksource_register(&clocksource_cyclone); } -module_init(init_cyclone_clocksource); +arch_initcall(init_cyclone_clocksource); diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h index 9a0a368852c7..26c3e9828288 100644 --- a/include/asm-x86_64/tsc.h +++ b/include/asm-x86_64/tsc.h @@ -55,6 +55,7 @@ static __always_inline cycles_t get_cycles_sync(void) extern void tsc_init(void); extern void mark_tsc_unstable(void); extern int unsynchronized_tsc(void); +extern void init_tsc_clocksource(void); /* * Boot-time check whether the TSCs are synchronized across diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 193a0793af95..5b0e46b56fd0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -55,16 +55,18 @@ static DEFINE_SPINLOCK(clocksource_lock); static char override_name[32]; static int finished_booting; -/* clocksource_done_booting - Called near the end of bootup +/* clocksource_done_booting - Called near the end of core bootup * - * Hack to avoid lots of clocksource churn at boot time + * Hack to avoid lots of clocksource churn at boot time. + * We use fs_initcall because we want this to start before + * device_initcall but after subsys_initcall. */ static int __init clocksource_done_booting(void) { finished_booting = 1; return 0; } -late_initcall(clocksource_done_booting); +fs_initcall(clocksource_done_booting); #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static LIST_HEAD(watchdog_list);