Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Make cpufreq suspend code conditional on powerpc.
  [CPUFREQ] Fix a kobject reference bug related to managed CPUs
  [CPUFREQ] Do not set policy for offline cpus
  [CPUFREQ] Fix NULL pointer dereference regression in conservative governor
This commit is contained in:
Linus Torvalds 2009-08-04 15:28:46 -07:00
commit 3f5760b90e
2 changed files with 30 additions and 3 deletions

View File

@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Check for existing affected CPUs.
* They may not be aware of it due to CPU Hotplug.
* cpufreq_cpu_put is called when the device is removed
* in __cpufreq_remove_dev()
*/
managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) {
@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (!ret)
if (ret)
cpufreq_cpu_put(managed_policy);
/*
* Success. We only needed to be added to the mask.
@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(policy_cpu, j) = policy->cpu;
}
@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get);
static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
{
int cpu = sysdev->id;
int ret = 0;
#ifdef __powerpc__
int cpu = sysdev->id;
unsigned int cur_freq = 0;
struct cpufreq_policy *cpu_policy;
dprintk("suspending cpu %u\n", cpu);
/*
* This whole bogosity is here because Powerbooks are made of fail.
* No sane platform should need any of the code below to be run.
* (it's entirely the wrong thing to do, as driver->get may
* reenable interrupts on some architectures).
*/
if (!cpu_online(cpu))
return 0;
@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
out:
cpufreq_cpu_put(cpu_policy);
#endif /* __powerpc__ */
return ret;
}
@ -1322,12 +1336,18 @@ out:
*/
static int cpufreq_resume(struct sys_device *sysdev)
{
int cpu = sysdev->id;
int ret = 0;
#ifdef __powerpc__
int cpu = sysdev->id;
struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu);
/* As with the ->suspend method, all the code below is
* only necessary because Powerbooks suck.
* See commit 42d4dc3f4e1e for jokes. */
if (!cpu_online(cpu))
return 0;
@ -1391,6 +1411,7 @@ out:
schedule_work(&cpu_policy->update);
fail:
cpufreq_cpu_put(cpu_policy);
#endif /* __powerpc__ */
return ret;
}

View File

@ -63,6 +63,7 @@ struct cpu_dbs_info_s {
unsigned int down_skip;
unsigned int requested_freq;
int cpu;
unsigned int enable:1;
/*
* percpu mutex that serializes governor limit change with
* do_dbs_timer invocation. We do not want do_dbs_timer to run
@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_policy *policy;
if (!this_dbs_info->enable)
return 0;
policy = this_dbs_info->cur_policy;
/*
@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay;
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
delay);
@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work);
}