diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 7d5c3b0ea8da..8b581d3905cb 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) { - /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf + /* Intel Xeon Processor 7100 Series Specification Update + * http://www.intel.com/Assets/PDF/specupdate/314554.pdf * AL30: A Machine Check Exception (MCE) Occurring during an * Enhanced Intel SpeedStep Technology Ratio Change May Cause - * Both Processor Cores to Lock Up when HT is enabled*/ + * Both Processor Cores to Lock Up. */ if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && - (c->x86_mask == 8) && smt_capable()) + (c->x86_mask == 8)) { + printk(KERN_INFO "acpi-cpufreq: Intel(R) " + "Xeon(R) 7100 Errata AL30, processors may " + "lock up on frequency changes: disabling " + "acpi-cpufreq.\n"); return -ENODEV; + } } return 0; } @@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int result = 0; struct cpuinfo_x86 *c = &cpu_data(policy->cpu); struct acpi_processor_performance *perf; +#ifdef CONFIG_SMP + static int blacklisted; +#endif dprintk("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP - result = acpi_cpufreq_blacklist(c); - if (result) - return result; + if (blacklisted) + return blacklisted; + blacklisted = acpi_cpufreq_blacklist(c); + if (blacklisted) + return blacklisted; #endif data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index ce2ed3e4aad9..cabd2fa3fc93 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); break; case 1 ... 15: - longhaul_version = TYPE_LONGHAUL_V1; + longhaul_version = TYPE_LONGHAUL_V2; if (c->x86_mask < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6394aa5c7985..3f12dabeab52 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data) * set it to 1 to avoid problems in the future. * For all others it's a BIOS bug. */ - if (!boot_cpu_data.x86 == 0x11) + if (boot_cpu_data.x86 != 0x11) printk(KERN_ERR FW_WARN PFX "Invalid zero transition " "latency\n"); max_latency = 1; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 6911e91fb4f6..3ae5a7a3a500 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void) return 0; } -struct get_freq_data { - unsigned int speed; - unsigned int processor; -}; - -static void get_freq_data(void *_data) +static void get_freq_data(void *_speed) { - struct get_freq_data *data = _data; + unsigned int *speed = _speed; - data->speed = speedstep_get_frequency(data->processor); + *speed = speedstep_get_frequency(speedstep_processor); } static unsigned int speedstep_get(unsigned int cpu) { - struct get_freq_data data = { .processor = cpu }; + unsigned int speed; /* You're supposed to ensure CPU is online. */ - if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) + if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); - dprintk("detected %u kHz as current frequency\n", data.speed); - return data.speed; + dprintk("detected %u kHz as current frequency\n", speed); + return speed; } /** diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3938c7817095..ff57c40e9b8b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); #ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ -static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor); +static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); #endif static DEFINE_SPINLOCK(cpufreq_driver_lock); @@ -774,10 +774,12 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, #ifdef CONFIG_SMP unsigned long flags; unsigned int j; - #ifdef CONFIG_HOTPLUG_CPU - if (per_cpu(cpufreq_cpu_governor, cpu)) { - policy->governor = per_cpu(cpufreq_cpu_governor, cpu); + struct cpufreq_governor *gov; + + gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); + if (gov) { + policy->governor = gov; dprintk("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } @@ -949,10 +951,13 @@ err_out_kobj_put: static int cpufreq_add_dev(struct sys_device *sys_dev) { unsigned int cpu = sys_dev->id; - int ret = 0; + int ret = 0, found = 0; struct cpufreq_policy *policy; unsigned long flags; unsigned int j; +#ifdef CONFIG_HOTPLUG_CPU + int sibling; +#endif if (cpu_is_offline(cpu)) return 0; @@ -999,7 +1004,19 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) INIT_WORK(&policy->update, handle_update); /* Set governor before ->init, so that driver could check it */ - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; +#ifdef CONFIG_HOTPLUG_CPU + for_each_online_cpu(sibling) { + struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); + if (cp && cp->governor && + (cpumask_test_cpu(cpu, cp->related_cpus))) { + policy->governor = cp->governor; + found = 1; + break; + } + } +#endif + if (!found) + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; /* call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU */ @@ -1111,7 +1128,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU - per_cpu(cpufreq_cpu_governor, cpu) = data->governor; + strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, + CPUFREQ_NAME_LEN); #endif /* if we have other CPUs still registered, we need to unlink them, @@ -1135,7 +1153,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) continue; dprintk("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU - per_cpu(cpufreq_cpu_governor, j) = data->governor; + strncpy(per_cpu(cpufreq_cpu_governor, j), + data->governor->name, CPUFREQ_NAME_LEN); #endif cpu_sys_dev = get_cpu_sysdev(j); sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); @@ -1606,9 +1625,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { +#ifdef CONFIG_HOTPLUG_CPU + int cpu; +#endif + if (!governor) return; +#ifdef CONFIG_HOTPLUG_CPU + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + continue; + if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) + strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); + } +#endif + mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); mutex_unlock(&cpufreq_governor_mutex); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index bc33ddc9c97c..c7b081b839ff 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = cur_wall_time; + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return idle_time; + return (cputime64_t)jiffies_to_usecs(idle_time);; } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 071699de50ee..4b34ade2332b 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = cur_wall_time; + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return idle_time; + return (cputime64_t)jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)