Merge branch 'pm-cpufreq'

* pm-cpufreq: (53 commits)
  cpufreq: speedstep-lib: Use monotonic clock
  cpufreq: powernv: Increase the verbosity of OCC console messages
  cpufreq: sfi: use kmemdup rather than duplicating its implementation
  cpufreq: drop !cpufreq_driver check from cpufreq_parse_governor()
  cpufreq: rename cpufreq_real_policy as cpufreq_user_policy
  cpufreq: remove redundant 'policy' field from user_policy
  cpufreq: remove redundant 'governor' field from user_policy
  cpufreq: update user_policy.* on success
  cpufreq: use memcpy() to copy policy
  cpufreq: remove redundant CPUFREQ_INCOMPATIBLE notifier event
  cpufreq: mediatek: Add MT8173 cpufreq driver
  dt-bindings: mediatek: Add MT8173 CPU DVFS clock bindings
  intel_pstate: append more Oracle OEM table id to vendor bypass list
  intel_pstate: Add SKY-S support
  intel_pstate: Fix possible overflow complained by Coverity
  cpufreq: Correct a freq check in cpufreq_set_policy()
  cpufreq: Lock CPU online/offline in cpufreq_register_driver()
  cpufreq: Replace recover_policy with new_policy in cpufreq_online()
  cpufreq: Separate CPU device registration from CPU online
  cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling
  ...
This commit is contained in:
Rafael J. Wysocki 2015-09-01 15:52:35 +02:00
commit 4ffe18c255
28 changed files with 1280 additions and 458 deletions

View File

@ -55,16 +55,13 @@ transition notifiers.
---------------------------- ----------------------------
These are notified when a new policy is intended to be set. Each These are notified when a new policy is intended to be set. Each
CPUFreq policy notifier is called three times for a policy transition: CPUFreq policy notifier is called twice for a policy transition:
1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if 1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
they see a need for this - may it be thermal considerations or they see a need for this - may it be thermal considerations or
hardware limitations. hardware limitations.
2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid 2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
hardware failure.
3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
- if two hardware drivers failed to agree on a new policy before this - if two hardware drivers failed to agree on a new policy before this
stage, the incompatible hardware shall be shut down, and the user stage, the incompatible hardware shall be shut down, and the user
informed of this. informed of this.

View File

@ -0,0 +1,83 @@
Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
Required properties:
- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
- clock-names: Should contain the following:
"cpu" - The multiplexer for clock input of CPU cluster.
"intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
source (usually MAINPLL) when the original CPU PLL is under
transition and not stable yet.
Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
generic clock consumer properties.
- proc-supply: Regulator for Vproc of CPU cluster.
Optional properties:
- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
needs to do "voltage tracking" to step by step scale up/down Vproc and
Vsram to fit SoC specific needs. When absent, the voltage scaling
flow is handled by hardware, hence no software "voltage tracking" is
needed.
Example:
--------
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x000>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA53SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x001>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA53SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA57SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA57SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
&cpu0 {
proc-supply = <&mt6397_vpca15_reg>;
};
&cpu1 {
proc-supply = <&mt6397_vpca15_reg>;
};
&cpu2 {
proc-supply = <&da9211_vcpu_reg>;
sram-supply = <&mt6397_vsramca7_reg>;
};
&cpu3 {
proc-supply = <&da9211_vcpu_reg>;
sram-supply = <&mt6397_vsramca7_reg>;
};

View File

@ -361,6 +361,7 @@ enum opal_msg_type {
OPAL_MSG_HMI_EVT, OPAL_MSG_HMI_EVT,
OPAL_MSG_DPO, OPAL_MSG_DPO,
OPAL_MSG_PRD, OPAL_MSG_PRD,
OPAL_MSG_OCC,
OPAL_MSG_TYPE_MAX, OPAL_MSG_TYPE_MAX,
}; };
@ -700,6 +701,17 @@ struct opal_prd_msg_header {
struct opal_prd_msg; struct opal_prd_msg;
#define OCC_RESET 0
#define OCC_LOAD 1
#define OCC_THROTTLE 2
#define OCC_MAX_THROTTLE_STATUS 5
struct opal_occ_msg {
__be64 type;
__be64 chip;
__be64 throttle_status;
};
/* /*
* SG entries * SG entries
* *

View File

@ -83,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
if (ignore_ppc) if (ignore_ppc)
return 0; return 0;
if (event != CPUFREQ_INCOMPATIBLE) if (event != CPUFREQ_ADJUST)
return 0; return 0;
mutex_lock(&performance_mutex); mutex_lock(&performance_mutex);
@ -780,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
EXPORT_SYMBOL(acpi_processor_register_performance); EXPORT_SYMBOL(acpi_processor_register_performance);
void void acpi_processor_unregister_performance(unsigned int cpu)
acpi_processor_unregister_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
{ {
struct acpi_processor *pr; struct acpi_processor *pr;

View File

@ -130,6 +130,13 @@ config ARM_KIRKWOOD_CPUFREQ
This adds the CPUFreq driver for Marvell Kirkwood This adds the CPUFreq driver for Marvell Kirkwood
SoCs. SoCs.
config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.
config ARM_OMAP2PLUS_CPUFREQ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+" bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS

View File

@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o

View File

@ -65,18 +65,21 @@ enum {
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data { struct acpi_cpufreq_data {
struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
unsigned int resume; unsigned int resume;
unsigned int cpu_feature; unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus; cpumask_var_t freqdomain_cpus;
}; };
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
/* acpi_perf_data is a pointer to percpu data. */ /* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data; static struct acpi_processor_performance __percpu *acpi_perf_data;
static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
{
return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
}
static struct cpufreq_driver acpi_cpufreq_driver; static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict; static unsigned int acpi_pstate_strict;
@ -144,7 +147,7 @@ static int _store_boost(int val)
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
return cpufreq_show_cpus(data->freqdomain_cpus, buf); return cpufreq_show_cpus(data->freqdomain_cpus, buf);
} }
@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
int i; int i;
perf = data->acpi_data; perf = to_perf_data(data);
for (i = 0; i < perf->state_count; i++) { for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status) if (value == perf->states[i].status)
@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
else else
msr &= INTEL_MSR_RANGE; msr &= INTEL_MSR_RANGE;
perf = data->acpi_data; perf = to_perf_data(data);
cpufreq_for_each_entry(pos, data->freq_table) cpufreq_for_each_entry(pos, data->freq_table)
if (msr == perf->states[pos->driver_data].status) if (msr == perf->states[pos->driver_data].status)
@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
put_cpu(); put_cpu();
} }
static u32 get_cur_val(const struct cpumask *mask) static u32
get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{ {
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
struct drv_cmd cmd; struct drv_cmd cmd;
@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask))) if (unlikely(cpumask_empty(mask)))
return 0; return 0;
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
break; break;
case SYSTEM_IO_CAPABLE: case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE; cmd.type = SYSTEM_IO_CAPABLE;
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; perf = to_perf_data(data);
cmd.addr.io.port = perf->control_register.address; cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.addr.io.bit_width = perf->control_register.bit_width;
break; break;
@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); struct acpi_cpufreq_data *data;
struct cpufreq_policy *policy;
unsigned int freq; unsigned int freq;
unsigned int cached_freq; unsigned int cached_freq;
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
if (unlikely(data == NULL || policy = cpufreq_cpu_get(cpu);
data->acpi_data == NULL || data->freq_table == NULL)) { if (unlikely(!policy))
return 0; return 0;
}
cached_freq = data->freq_table[data->acpi_data->state].frequency; data = policy->driver_data;
freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); cpufreq_cpu_put(policy);
if (unlikely(!data || !data->freq_table))
return 0;
cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
if (freq != cached_freq) { if (freq != cached_freq) {
/* /*
* The dreaded BIOS frequency change behind our back. * The dreaded BIOS frequency change behind our back.
@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
unsigned int i; unsigned int i;
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
cur_freq = extract_freq(get_cur_val(mask), data); cur_freq = extract_freq(get_cur_val(mask, data), data);
if (cur_freq == freq) if (cur_freq == freq)
return 1; return 1;
udelay(10); udelay(10);
@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy, static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index) unsigned int index)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
struct drv_cmd cmd; struct drv_cmd cmd;
unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0; int result = 0;
if (unlikely(data == NULL || if (unlikely(data == NULL || data->freq_table == NULL)) {
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV; return -ENODEV;
} }
perf = data->acpi_data; perf = to_perf_data(data);
next_perf_state = data->freq_table[index].driver_data; next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) { if (perf->state == next_perf_state) {
if (unlikely(data->resume)) { if (unlikely(data->resume)) {
@ -482,8 +490,9 @@ out:
static unsigned long static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{ {
struct acpi_processor_performance *perf = data->acpi_data; struct acpi_processor_performance *perf;
perf = to_perf_data(data);
if (cpu_khz) { if (cpu_khz) {
/* search the closest match to cpu_khz */ /* search the closest match to cpu_khz */
unsigned int i; unsigned int i;
@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_free; goto err_free;
} }
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); perf = per_cpu_ptr(acpi_perf_data, cpu);
per_cpu(acfreq_data, cpu) = data; data->acpi_perf_cpu = cpu;
policy->driver_data = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
result = acpi_processor_register_performance(data->acpi_data, cpu); result = acpi_processor_register_performance(perf, cpu);
if (result) if (result)
goto err_free_mask; goto err_free_mask;
perf = data->acpi_data;
policy->shared_type = perf->shared_type; policy->shared_type = perf->shared_type;
/* /*
@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
err_freqfree: err_freqfree:
kfree(data->freq_table); kfree(data->freq_table);
err_unreg: err_unreg:
acpi_processor_unregister_performance(perf, cpu); acpi_processor_unregister_performance(cpu);
err_free_mask: err_free_mask:
free_cpumask_var(data->freqdomain_cpus); free_cpumask_var(data->freqdomain_cpus);
err_free: err_free:
kfree(data); kfree(data);
per_cpu(acfreq_data, cpu) = NULL; policy->driver_data = NULL;
return result; return result;
} }
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_cpu_exit\n"); pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) { if (data) {
per_cpu(acfreq_data, policy->cpu) = NULL; policy->driver_data = NULL;
acpi_processor_unregister_performance(data->acpi_data, acpi_processor_unregister_performance(data->acpi_perf_cpu);
policy->cpu);
free_cpumask_var(data->freqdomain_cpus); free_cpumask_var(data->freqdomain_cpus);
kfree(data->freq_table); kfree(data->freq_table);
kfree(data); kfree(data);
@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy) static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_resume\n"); pr_debug("acpi_cpufreq_resume\n");
@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
static struct freq_attr *acpi_cpufreq_attr[] = { static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs, &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus, &freqdomain_cpus,
NULL, /* this is a placeholder for cpb, do not remove */ #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
#endif
NULL, NULL,
}; };
@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
* only if configured. This is considered legacy code, which * only if configured. This is considered legacy code, which
* will probably be removed at some point in the future. * will probably be removed at some point in the future.
*/ */
if (check_amd_hwpstate_cpu(0)) { if (!check_amd_hwpstate_cpu(0)) {
struct freq_attr **iter; struct freq_attr **attr;
pr_debug("adding sysfs entry for cpb\n"); pr_debug("CPB unsupported, do not expose it\n");
for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) for (attr = acpi_cpufreq_attr; *attr; attr++)
; if (*attr == &cpb) {
*attr = NULL;
/* make sure there is a terminator behind it */ break;
if (iter[1] == NULL) }
*iter = &cpb;
} }
#endif #endif
acpi_cpufreq_boost_init(); acpi_cpufreq_boost_init();

View File

@ -112,12 +112,6 @@ static inline bool has_target(void)
return cpufreq_driver->target_index || cpufreq_driver->target; return cpufreq_driver->target_index || cpufreq_driver->target;
} }
/*
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
* sections
*/
static DECLARE_RWSEM(cpufreq_rwsem);
/* internal prototypes */ /* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event); unsigned int event);
@ -277,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
* freed as that depends on the kobj count. * freed as that depends on the kobj count.
* *
* It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
* valid policy is found. This is done to make sure the driver doesn't get
* unregistered while the policy is being used.
*
* Return: A valid policy on success, otherwise NULL on failure. * Return: A valid policy on success, otherwise NULL on failure.
*/ */
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@ -291,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (WARN_ON(cpu >= nr_cpu_ids)) if (WARN_ON(cpu >= nr_cpu_ids))
return NULL; return NULL;
if (!down_read_trylock(&cpufreq_rwsem))
return NULL;
/* get the cpufreq driver */ /* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags); read_lock_irqsave(&cpufreq_driver_lock, flags);
@ -306,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags); read_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!policy)
up_read(&cpufreq_rwsem);
return policy; return policy;
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_get); EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@ -320,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
* *
* This decrements the kobject reference count incremented earlier by calling * This decrements the kobject reference count incremented earlier by calling
* cpufreq_cpu_get(). * cpufreq_cpu_get().
*
* It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
*/ */
void cpufreq_cpu_put(struct cpufreq_policy *policy) void cpufreq_cpu_put(struct cpufreq_policy *policy)
{ {
kobject_put(&policy->kobj); kobject_put(&policy->kobj);
up_read(&cpufreq_rwsem);
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_put); EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@ -539,9 +520,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
{ {
int err = -EINVAL; int err = -EINVAL;
if (!cpufreq_driver)
goto out;
if (cpufreq_driver->setpolicy) { if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE; *policy = CPUFREQ_POLICY_PERFORMANCE;
@ -576,7 +554,6 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_unlock(&cpufreq_governor_mutex); mutex_unlock(&cpufreq_governor_mutex);
} }
out:
return err; return err;
} }
@ -625,9 +602,7 @@ static ssize_t store_##file_name \
int ret, temp; \ int ret, temp; \
struct cpufreq_policy new_policy; \ struct cpufreq_policy new_policy; \
\ \
ret = cpufreq_get_policy(&new_policy, policy->cpu); \ memcpy(&new_policy, policy, sizeof(*policy)); \
if (ret) \
return -EINVAL; \
\ \
ret = sscanf(buf, "%u", &new_policy.object); \ ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \ if (ret != 1) \
@ -681,9 +656,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
char str_governor[16]; char str_governor[16];
struct cpufreq_policy new_policy; struct cpufreq_policy new_policy;
ret = cpufreq_get_policy(&new_policy, policy->cpu); memcpy(&new_policy, policy, sizeof(*policy));
if (ret)
return ret;
ret = sscanf(buf, "%15s", str_governor); ret = sscanf(buf, "%15s", str_governor);
if (ret != 1) if (ret != 1)
@ -694,14 +667,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
return -EINVAL; return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy); ret = cpufreq_set_policy(policy, &new_policy);
return ret ? ret : count;
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
if (ret)
return ret;
else
return count;
} }
/** /**
@ -851,9 +817,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret; ssize_t ret;
if (!down_read_trylock(&cpufreq_rwsem))
return -EINVAL;
down_read(&policy->rwsem); down_read(&policy->rwsem);
if (fattr->show) if (fattr->show)
@ -862,7 +825,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO; ret = -EIO;
up_read(&policy->rwsem); up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
return ret; return ret;
} }
@ -879,9 +841,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!cpu_online(policy->cpu)) if (!cpu_online(policy->cpu))
goto unlock; goto unlock;
if (!down_read_trylock(&cpufreq_rwsem))
goto unlock;
down_write(&policy->rwsem); down_write(&policy->rwsem);
/* Updating inactive policies is invalid, so avoid doing that. */ /* Updating inactive policies is invalid, so avoid doing that. */
@ -897,8 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
unlock_policy_rwsem: unlock_policy_rwsem:
up_write(&policy->rwsem); up_write(&policy->rwsem);
up_read(&cpufreq_rwsem);
unlock: unlock:
put_online_cpus(); put_online_cpus();
@ -1027,8 +984,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
} }
} }
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct device *dev)
{ {
struct freq_attr **drv_attr; struct freq_attr **drv_attr;
int ret = 0; int ret = 0;
@ -1060,11 +1016,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
return cpufreq_add_dev_symlink(policy); return cpufreq_add_dev_symlink(policy);
} }
static void cpufreq_init_policy(struct cpufreq_policy *policy) static int cpufreq_init_policy(struct cpufreq_policy *policy)
{ {
struct cpufreq_governor *gov = NULL; struct cpufreq_governor *gov = NULL;
struct cpufreq_policy new_policy; struct cpufreq_policy new_policy;
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy)); memcpy(&new_policy, policy, sizeof(*policy));
@ -1083,16 +1038,10 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
/* set default policy */ /* set default policy */
ret = cpufreq_set_policy(policy, &new_policy); return cpufreq_set_policy(policy, &new_policy);
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
} }
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
unsigned int cpu, struct device *dev)
{ {
int ret = 0; int ret = 0;
@ -1126,33 +1075,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
return 0; return 0;
} }
static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (likely(policy)) {
/* Policy should be inactive here */
WARN_ON(!policy_is_inactive(policy));
down_write(&policy->rwsem);
policy->cpu = cpu;
policy->governor = NULL;
up_write(&policy->rwsem);
}
return policy;
}
static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
{ {
struct device *dev = get_cpu_device(cpu);
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret; int ret;
if (WARN_ON(!dev))
return NULL;
policy = kzalloc(sizeof(*policy), GFP_KERNEL); policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy) if (!policy)
return NULL; return NULL;
@ -1180,10 +1111,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
init_completion(&policy->kobj_unregister); init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
policy->cpu = dev->id; policy->cpu = cpu;
/* Set this once on allocation */ /* Set this once on allocation */
policy->kobj_cpu = dev->id; policy->kobj_cpu = cpu;
return policy; return policy;
@ -1245,59 +1176,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
kfree(policy); kfree(policy);
} }
/** static int cpufreq_online(unsigned int cpu)
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{ {
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
bool new_policy;
unsigned long flags; unsigned long flags;
bool recover_policy = !sif; unsigned int j;
int ret;
pr_debug("adding CPU %u\n", cpu); pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
if (cpu_is_offline(cpu)) {
/*
* Only possible if we are here from the subsys_interface add
* callback. A hotplug notifier will follow and we will handle
* it as CPU online then. For now, just create the sysfs link,
* unless there is no policy or the link is already present.
*/
policy = per_cpu(cpufreq_cpu_data, cpu);
return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
? add_cpu_dev_symlink(policy, cpu) : 0;
}
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
/* Check if this CPU already has a policy to manage it */ /* Check if this CPU already has a policy to manage it */
policy = per_cpu(cpufreq_cpu_data, cpu); policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy && !policy_is_inactive(policy)) { if (policy) {
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
ret = cpufreq_add_policy_cpu(policy, cpu, dev); if (!policy_is_inactive(policy))
up_read(&cpufreq_rwsem); return cpufreq_add_policy_cpu(policy, cpu);
return ret;
}
/* /* This is the only online CPU for the policy. Start over. */
* Restore the saved policy when doing light-weight init and fall back new_policy = false;
* to the full init if that fails. down_write(&policy->rwsem);
*/ policy->cpu = cpu;
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; policy->governor = NULL;
if (!policy) { up_write(&policy->rwsem);
recover_policy = false; } else {
policy = cpufreq_policy_alloc(dev); new_policy = true;
policy = cpufreq_policy_alloc(cpu);
if (!policy) if (!policy)
goto nomem_out; return -ENOMEM;
} }
cpumask_copy(policy->cpus, cpumask_of(cpu)); cpumask_copy(policy->cpus, cpumask_of(cpu));
@ -1308,17 +1214,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
ret = cpufreq_driver->init(policy); ret = cpufreq_driver->init(policy);
if (ret) { if (ret) {
pr_debug("initialization failed\n"); pr_debug("initialization failed\n");
goto err_set_policy_cpu; goto out_free_policy;
} }
down_write(&policy->rwsem); down_write(&policy->rwsem);
/* related cpus should atleast have policy->cpus */ if (new_policy) {
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); /* related_cpus should at least include policy->cpus. */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
/* Remember which CPUs have been present at the policy creation time. */ /* Remember CPUs present at the policy creation time. */
if (!recover_policy)
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
}
/* /*
* affected cpus must always be the one, which are online. We aren't * affected cpus must always be the one, which are online. We aren't
@ -1326,7 +1232,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
*/ */
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (!recover_policy) { if (new_policy) {
policy->user_policy.min = policy->min; policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
@ -1340,7 +1246,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->cur = cpufreq_driver->get(policy->cpu); policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) { if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__); pr_err("%s: ->get() failed\n", __func__);
goto err_get_freq; goto out_exit_policy;
} }
} }
@ -1387,10 +1293,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy); CPUFREQ_START, policy);
if (!recover_policy) { if (new_policy) {
ret = cpufreq_add_dev_interface(policy, dev); ret = cpufreq_add_dev_interface(policy);
if (ret) if (ret)
goto err_out_unregister; goto out_exit_policy;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy); CPUFREQ_CREATE_POLICY, policy);
@ -1399,18 +1305,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
} }
cpufreq_init_policy(policy); ret = cpufreq_init_policy(policy);
if (ret) {
if (!recover_policy) { pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
policy->user_policy.policy = policy->policy; __func__, cpu, ret);
policy->user_policy.governor = policy->governor; /* cpufreq_policy_free() will notify based on this */
new_policy = false;
goto out_exit_policy;
} }
up_write(&policy->rwsem); up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD); kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
/* Callback for handling stuff after policy is ready */ /* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready) if (cpufreq_driver->ready)
cpufreq_driver->ready(policy); cpufreq_driver->ready(policy);
@ -1419,24 +1326,47 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return 0; return 0;
err_out_unregister: out_exit_policy:
err_get_freq:
up_write(&policy->rwsem); up_write(&policy->rwsem);
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
err_set_policy_cpu: out_free_policy:
cpufreq_policy_free(policy, recover_policy); cpufreq_policy_free(policy, !new_policy);
nomem_out: return ret;
up_read(&cpufreq_rwsem); }
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
* @sif: Subsystem interface structure pointer (not used)
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned cpu = dev->id;
int ret;
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
if (cpu_online(cpu)) {
ret = cpufreq_online(cpu);
} else {
/*
* A hotplug notifier will follow and we will handle it as CPU
* online then. For now, just create the sysfs link, unless
* there is no policy or the link is already present.
*/
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
? add_cpu_dev_symlink(policy, cpu) : 0;
}
return ret; return ret;
} }
static int __cpufreq_remove_dev_prepare(struct device *dev) static void cpufreq_offline_prepare(unsigned int cpu)
{ {
unsigned int cpu = dev->id;
int ret = 0;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu); pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@ -1444,11 +1374,11 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
policy = cpufreq_cpu_get_raw(cpu); policy = cpufreq_cpu_get_raw(cpu);
if (!policy) { if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL; return;
} }
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) if (ret)
pr_err("%s: Failed to stop governor\n", __func__); pr_err("%s: Failed to stop governor\n", __func__);
} }
@ -1469,7 +1399,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
/* Start governor again for active policy */ /* Start governor again for active policy */
if (!policy_is_inactive(policy)) { if (!policy_is_inactive(policy)) {
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret) if (!ret)
ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
@ -1479,28 +1409,24 @@ static int __cpufreq_remove_dev_prepare(struct device *dev)
} else if (cpufreq_driver->stop_cpu) { } else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy); cpufreq_driver->stop_cpu(policy);
} }
return ret;
} }
static int __cpufreq_remove_dev_finish(struct device *dev) static void cpufreq_offline_finish(unsigned int cpu)
{ {
unsigned int cpu = dev->id;
int ret;
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy) { if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL; return;
} }
/* Only proceed for inactive policies */ /* Only proceed for inactive policies */
if (!policy_is_inactive(policy)) if (!policy_is_inactive(policy))
return 0; return;
/* If cpu is last user of policy, free policy */ /* If cpu is last user of policy, free policy */
if (has_target()) { if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret) if (ret)
pr_err("%s: Failed to exit governor\n", __func__); pr_err("%s: Failed to exit governor\n", __func__);
} }
@ -1512,8 +1438,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev)
*/ */
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
return 0;
} }
/** /**
@ -1530,8 +1454,8 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return 0; return 0;
if (cpu_online(cpu)) { if (cpu_online(cpu)) {
__cpufreq_remove_dev_prepare(dev); cpufreq_offline_prepare(cpu);
__cpufreq_remove_dev_finish(dev); cpufreq_offline_finish(cpu);
} }
cpumask_clear_cpu(cpu, policy->real_cpus); cpumask_clear_cpu(cpu, policy->real_cpus);
@ -2247,7 +2171,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
if (new_policy->min > policy->max || new_policy->max < policy->min) /*
* This check works well when we store new min/max freq attributes,
* because new_policy is a copy of policy with one field updated.
*/
if (new_policy->min > new_policy->max)
return -EINVAL; return -EINVAL;
/* verify the cpu speed can be set within this limit */ /* verify the cpu speed can be set within this limit */
@ -2259,10 +2187,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_ADJUST, new_policy); CPUFREQ_ADJUST, new_policy);
/* adjust if necessary - hardware incompatibility*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, new_policy);
/* /*
* verify the cpu speed can be set within this limit, which might be * verify the cpu speed can be set within this limit, which might be
* different to the first one * different to the first one
@ -2296,16 +2220,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor; old_gov = policy->governor;
/* end old governor */ /* end old governor */
if (old_gov) { if (old_gov) {
__cpufreq_governor(policy, CPUFREQ_GOV_STOP); ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
/* This can happen due to race with other operations */
pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
__func__, old_gov->name, ret);
return ret;
}
up_write(&policy->rwsem); up_write(&policy->rwsem);
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
down_write(&policy->rwsem); down_write(&policy->rwsem);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
return ret;
}
} }
/* start new governor */ /* start new governor */
policy->governor = new_policy->governor; policy->governor = new_policy->governor;
if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) if (!ret) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
if (!ret)
goto out; goto out;
up_write(&policy->rwsem); up_write(&policy->rwsem);
@ -2317,11 +2256,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name); pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) { if (old_gov) {
policy->governor = old_gov; policy->governor = old_gov;
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
__cpufreq_governor(policy, CPUFREQ_GOV_START); policy->governor = NULL;
else
__cpufreq_governor(policy, CPUFREQ_GOV_START);
} }
return -EINVAL; return ret;
out: out:
pr_debug("governor: change or update limits\n"); pr_debug("governor: change or update limits\n");
@ -2350,8 +2291,6 @@ int cpufreq_update_policy(unsigned int cpu)
memcpy(&new_policy, policy, sizeof(*policy)); memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min; new_policy.min = policy->user_policy.min;
new_policy.max = policy->user_policy.max; new_policy.max = policy->user_policy.max;
new_policy.policy = policy->user_policy.policy;
new_policy.governor = policy->user_policy.governor;
/* /*
* BIOS might change freq behind our back * BIOS might change freq behind our back
@ -2387,27 +2326,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
dev = get_cpu_device(cpu); switch (action & ~CPU_TASKS_FROZEN) {
if (dev) { case CPU_ONLINE:
switch (action & ~CPU_TASKS_FROZEN) { cpufreq_online(cpu);
case CPU_ONLINE: break;
cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
__cpufreq_remove_dev_prepare(dev); cpufreq_offline_prepare(cpu);
break; break;
case CPU_POST_DEAD: case CPU_POST_DEAD:
__cpufreq_remove_dev_finish(dev); cpufreq_offline_finish(cpu);
break; break;
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
cpufreq_add_dev(dev, NULL); cpufreq_online(cpu);
break; break;
}
} }
return NOTIFY_OK; return NOTIFY_OK;
} }
@ -2515,10 +2450,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name); pr_debug("trying to register driver %s\n", driver_data->name);
/* Protect against concurrent CPU online/offline. */
get_online_cpus();
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) { if (cpufreq_driver) {
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return -EEXIST; ret = -EEXIST;
goto out;
} }
cpufreq_driver = driver_data; cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@ -2557,7 +2496,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
register_hotcpu_notifier(&cpufreq_cpu_notifier); register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name); pr_debug("driver %s up and running\n", driver_data->name);
return 0; out:
put_online_cpus();
return ret;
err_if_unreg: err_if_unreg:
subsys_interface_unregister(&cpufreq_interface); subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg: err_boost_unreg:
@ -2567,7 +2509,7 @@ err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL; cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret; goto out;
} }
EXPORT_SYMBOL_GPL(cpufreq_register_driver); EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@ -2588,19 +2530,20 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name); pr_debug("unregistering driver %s\n", driver->name);
/* Protect against concurrent cpu hotplug */
get_online_cpus();
subsys_interface_unregister(&cpufreq_interface); subsys_interface_unregister(&cpufreq_interface);
if (cpufreq_boost_supported()) if (cpufreq_boost_supported())
cpufreq_sysfs_remove_file(&boost.attr); cpufreq_sysfs_remove_file(&boost.attr);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier); unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
down_write(&cpufreq_rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL; cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
up_write(&cpufreq_rwsem); put_online_cpus();
return 0; return 0;
} }

View File

@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
static void cs_check_cpu(int cpu, unsigned int load) static void cs_check_cpu(int cpu, unsigned int load)
{ {
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
} }
} }
static void cs_dbs_timer(struct work_struct *work) static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data, bool modify_all)
{ {
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
cpu);
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex); if (modify_all)
if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
modify_all = false;
else
dbs_check_cpu(dbs_data, cpu);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); return delay_for_sampling_rate(cs_tuners->sampling_rate);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
} }
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!dbs_info->enable) if (!dbs_info->enable)
return 0; return 0;
policy = dbs_info->cdbs.cur_policy; policy = dbs_info->cdbs.shared->policy;
/* /*
* we only care if our internally tracked freq moves outside the 'valid' * we only care if our internally tracked freq moves outside the 'valid'

View File

@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{ {
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpufreq_policy *policy; struct cpufreq_policy *policy = cdbs->shared->policy;
unsigned int sampling_rate; unsigned int sampling_rate;
unsigned int max_load = 0; unsigned int max_load = 0;
unsigned int ignore_nice; unsigned int ignore_nice;
@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
ignore_nice = cs_tuners->ignore_nice_load; ignore_nice = cs_tuners->ignore_nice_load;
} }
policy = cdbs->cur_policy;
/* Get Absolute Load */ /* Get Absolute Load */
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs; struct cpu_dbs_info *j_cdbs;
u64 cur_wall_time, cur_idle_time; u64 cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time; unsigned int idle_time, wall_time;
unsigned int load; unsigned int load;
@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
unsigned int delay) unsigned int delay)
{ {
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
} }
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
static inline void gov_cancel_work(struct dbs_data *dbs_data, static inline void gov_cancel_work(struct dbs_data *dbs_data,
struct cpufreq_policy *policy) struct cpufreq_policy *policy)
{ {
struct cpu_dbs_common_info *cdbs; struct cpu_dbs_info *cdbs;
int i; int i;
for_each_cpu(i, policy->cpus) { for_each_cpu(i, policy->cpus) {
cdbs = dbs_data->cdata->get_cpu_cdbs(i); cdbs = dbs_data->cdata->get_cpu_cdbs(i);
cancel_delayed_work_sync(&cdbs->work); cancel_delayed_work_sync(&cdbs->dwork);
} }
} }
/* Will return if we need to evaluate cpu load again or not */ /* Will return if we need to evaluate cpu load again or not */
bool need_load_eval(struct cpu_dbs_common_info *cdbs, static bool need_load_eval(struct cpu_common_dbs_info *shared,
unsigned int sampling_rate) unsigned int sampling_rate)
{ {
if (policy_is_shared(cdbs->cur_policy)) { if (policy_is_shared(shared->policy)) {
ktime_t time_now = ktime_get(); ktime_t time_now = ktime_get();
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
/* Do nothing if we recently have sampled */ /* Do nothing if we recently have sampled */
if (delta_us < (s64)(sampling_rate / 2)) if (delta_us < (s64)(sampling_rate / 2))
return false; return false;
else else
cdbs->time_stamp = time_now; shared->time_stamp = time_now;
} }
return true; return true;
} }
EXPORT_SYMBOL_GPL(need_load_eval);
static void dbs_timer(struct work_struct *work)
{
struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
dwork.work);
struct cpu_common_dbs_info *shared = cdbs->shared;
struct cpufreq_policy *policy = shared->policy;
struct dbs_data *dbs_data = policy->governor_data;
unsigned int sampling_rate, delay;
bool modify_all = true;
mutex_lock(&shared->timer_mutex);
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
sampling_rate = cs_tuners->sampling_rate;
} else {
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
sampling_rate = od_tuners->sampling_rate;
}
if (!need_load_eval(cdbs->shared, sampling_rate))
modify_all = false;
delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
gov_queue_work(dbs_data, policy, delay, modify_all);
mutex_unlock(&shared->timer_mutex);
}
static void set_sampling_rate(struct dbs_data *dbs_data, static void set_sampling_rate(struct dbs_data *dbs_data,
unsigned int sampling_rate) unsigned int sampling_rate)
@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
} }
} }
static int alloc_common_dbs_info(struct cpufreq_policy *policy,
struct common_dbs_data *cdata)
{
struct cpu_common_dbs_info *shared;
int j;
/* Allocate memory for the common information for policy->cpus */
shared = kzalloc(sizeof(*shared), GFP_KERNEL);
if (!shared)
return -ENOMEM;
/* Set shared for all CPUs, online+offline */
for_each_cpu(j, policy->related_cpus)
cdata->get_cpu_cdbs(j)->shared = shared;
return 0;
}
static void free_common_dbs_info(struct cpufreq_policy *policy,
struct common_dbs_data *cdata)
{
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
struct cpu_common_dbs_info *shared = cdbs->shared;
int j;
for_each_cpu(j, policy->cpus)
cdata->get_cpu_cdbs(j)->shared = NULL;
kfree(shared);
}
static int cpufreq_governor_init(struct cpufreq_policy *policy, static int cpufreq_governor_init(struct cpufreq_policy *policy,
struct dbs_data *dbs_data, struct dbs_data *dbs_data,
struct common_dbs_data *cdata) struct common_dbs_data *cdata)
@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
unsigned int latency; unsigned int latency;
int ret; int ret;
/* State should be equivalent to EXIT */
if (policy->governor_data)
return -EBUSY;
if (dbs_data) { if (dbs_data) {
if (WARN_ON(have_governor_per_policy())) if (WARN_ON(have_governor_per_policy()))
return -EINVAL; return -EINVAL;
ret = alloc_common_dbs_info(policy, cdata);
if (ret)
return ret;
dbs_data->usage_count++; dbs_data->usage_count++;
policy->governor_data = dbs_data; policy->governor_data = dbs_data;
return 0; return 0;
@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!dbs_data) if (!dbs_data)
return -ENOMEM; return -ENOMEM;
ret = alloc_common_dbs_info(policy, cdata);
if (ret)
goto free_dbs_data;
dbs_data->cdata = cdata; dbs_data->cdata = cdata;
dbs_data->usage_count = 1; dbs_data->usage_count = 1;
ret = cdata->init(dbs_data, !policy->governor->initialized); ret = cdata->init(dbs_data, !policy->governor->initialized);
if (ret) if (ret)
goto free_dbs_data; goto free_common_dbs_info;
/* policy latency is in ns. Convert it to us first */ /* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000; latency = policy->cpuinfo.transition_latency / 1000;
@ -300,15 +372,22 @@ put_kobj:
} }
cdata_exit: cdata_exit:
cdata->exit(dbs_data, !policy->governor->initialized); cdata->exit(dbs_data, !policy->governor->initialized);
free_common_dbs_info:
free_common_dbs_info(policy, cdata);
free_dbs_data: free_dbs_data:
kfree(dbs_data); kfree(dbs_data);
return ret; return ret;
} }
static void cpufreq_governor_exit(struct cpufreq_policy *policy, static int cpufreq_governor_exit(struct cpufreq_policy *policy,
struct dbs_data *dbs_data) struct dbs_data *dbs_data)
{ {
struct common_dbs_data *cdata = dbs_data->cdata; struct common_dbs_data *cdata = dbs_data->cdata;
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
/* State should be equivalent to INIT */
if (!cdbs->shared || cdbs->shared->policy)
return -EBUSY;
policy->governor_data = NULL; policy->governor_data = NULL;
if (!--dbs_data->usage_count) { if (!--dbs_data->usage_count) {
@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy,
cdata->exit(dbs_data, policy->governor->initialized == 1); cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data); kfree(dbs_data);
} }
free_common_dbs_info(policy, cdata);
return 0;
} }
static int cpufreq_governor_start(struct cpufreq_policy *policy, static int cpufreq_governor_start(struct cpufreq_policy *policy,
@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
{ {
struct common_dbs_data *cdata = dbs_data->cdata; struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
struct cpu_common_dbs_info *shared = cdbs->shared;
int io_busy = 0; int io_busy = 0;
if (!policy->cur) if (!policy->cur)
return -EINVAL; return -EINVAL;
/* State should be equivalent to INIT */
if (!shared || shared->policy)
return -EBUSY;
if (cdata->governor == GOV_CONSERVATIVE) { if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
io_busy = od_tuners->io_is_busy; io_busy = od_tuners->io_is_busy;
} }
shared->policy = policy;
shared->time_stamp = ktime_get();
mutex_init(&shared->timer_mutex);
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j); struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
unsigned int prev_load; unsigned int prev_load;
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = j_cdbs->prev_cpu_idle =
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
if (ignore_nice) if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
mutex_init(&j_cdbs->timer_mutex); INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
} }
if (cdata->governor == GOV_CONSERVATIVE) { if (cdata->governor == GOV_CONSERVATIVE) {
@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
od_ops->powersave_bias_init_cpu(cpu); od_ops->powersave_bias_init_cpu(cpu);
} }
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
true); true);
return 0; return 0;
} }
static void cpufreq_governor_stop(struct cpufreq_policy *policy, static int cpufreq_governor_stop(struct cpufreq_policy *policy,
struct dbs_data *dbs_data) struct dbs_data *dbs_data)
{ {
struct common_dbs_data *cdata = dbs_data->cdata; struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
struct cpu_common_dbs_info *shared = cdbs->shared;
/* State should be equivalent to START */
if (!shared || !shared->policy)
return -EBUSY;
gov_cancel_work(dbs_data, policy);
if (cdata->governor == GOV_CONSERVATIVE) { if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info = struct cs_cpu_dbs_info_s *cs_dbs_info =
@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy,
cs_dbs_info->enable = 0; cs_dbs_info->enable = 0;
} }
gov_cancel_work(dbs_data, policy); shared->policy = NULL;
mutex_destroy(&shared->timer_mutex);
mutex_destroy(&cpu_cdbs->timer_mutex); return 0;
cpu_cdbs->cur_policy = NULL;
} }
static void cpufreq_governor_limits(struct cpufreq_policy *policy, static int cpufreq_governor_limits(struct cpufreq_policy *policy,
struct dbs_data *dbs_data) struct dbs_data *dbs_data)
{ {
struct common_dbs_data *cdata = dbs_data->cdata; struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
if (!cpu_cdbs->cur_policy) /* State should be equivalent to START */
return; if (!cdbs->shared || !cdbs->shared->policy)
return -EBUSY;
mutex_lock(&cpu_cdbs->timer_mutex); mutex_lock(&cdbs->shared->timer_mutex);
if (policy->max < cpu_cdbs->cur_policy->cur) if (policy->max < cdbs->shared->policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, __cpufreq_driver_target(cdbs->shared->policy, policy->max,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
else if (policy->min > cpu_cdbs->cur_policy->cur) else if (policy->min > cdbs->shared->policy->cur)
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, __cpufreq_driver_target(cdbs->shared->policy, policy->min,
CPUFREQ_RELATION_L); CPUFREQ_RELATION_L);
dbs_check_cpu(dbs_data, cpu); dbs_check_cpu(dbs_data, cpu);
mutex_unlock(&cpu_cdbs->timer_mutex); mutex_unlock(&cdbs->shared->timer_mutex);
return 0;
} }
int cpufreq_governor_dbs(struct cpufreq_policy *policy, int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event) struct common_dbs_data *cdata, unsigned int event)
{ {
struct dbs_data *dbs_data; struct dbs_data *dbs_data;
int ret = 0; int ret;
/* Lock governor to block concurrent initialization of governor */ /* Lock governor to block concurrent initialization of governor */
mutex_lock(&cdata->mutex); mutex_lock(&cdata->mutex);
@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
else else
dbs_data = cdata->gdbs_data; dbs_data = cdata->gdbs_data;
if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) { if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
ret = -EINVAL; ret = -EINVAL;
goto unlock; goto unlock;
} }
@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
ret = cpufreq_governor_init(policy, dbs_data, cdata); ret = cpufreq_governor_init(policy, dbs_data, cdata);
break; break;
case CPUFREQ_GOV_POLICY_EXIT: case CPUFREQ_GOV_POLICY_EXIT:
cpufreq_governor_exit(policy, dbs_data); ret = cpufreq_governor_exit(policy, dbs_data);
break; break;
case CPUFREQ_GOV_START: case CPUFREQ_GOV_START:
ret = cpufreq_governor_start(policy, dbs_data); ret = cpufreq_governor_start(policy, dbs_data);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
cpufreq_governor_stop(policy, dbs_data); ret = cpufreq_governor_stop(policy, dbs_data);
break; break;
case CPUFREQ_GOV_LIMITS: case CPUFREQ_GOV_LIMITS:
cpufreq_governor_limits(policy, dbs_data); ret = cpufreq_governor_limits(policy, dbs_data);
break; break;
default:
ret = -EINVAL;
} }
unlock: unlock:

View File

@ -109,7 +109,7 @@ store_one(_gov, file_name)
/* create helper routines */ /* create helper routines */
#define define_get_cpu_dbs_routines(_dbs_info) \ #define define_get_cpu_dbs_routines(_dbs_info) \
static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
{ \ { \
return &per_cpu(_dbs_info, cpu).cdbs; \ return &per_cpu(_dbs_info, cpu).cdbs; \
} \ } \
@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \
* cs_*: Conservative governor * cs_*: Conservative governor
*/ */
/* Common to all CPUs of a policy */
struct cpu_common_dbs_info {
struct cpufreq_policy *policy;
/*
* percpu mutex that serializes governor limit change with dbs_timer
* invocation. We do not want dbs_timer to run when user is changing
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
};
/* Per cpu structures */ /* Per cpu structures */
struct cpu_dbs_common_info { struct cpu_dbs_info {
int cpu;
u64 prev_cpu_idle; u64 prev_cpu_idle;
u64 prev_cpu_wall; u64 prev_cpu_wall;
u64 prev_cpu_nice; u64 prev_cpu_nice;
@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
* wake-up from idle. * wake-up from idle.
*/ */
unsigned int prev_load; unsigned int prev_load;
struct cpufreq_policy *cur_policy; struct delayed_work dwork;
struct delayed_work work; struct cpu_common_dbs_info *shared;
/*
* percpu mutex that serializes governor limit change with gov_dbs_timer
* invocation. We do not want gov_dbs_timer to run when user is changing
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
}; };
struct od_cpu_dbs_info_s { struct od_cpu_dbs_info_s {
struct cpu_dbs_common_info cdbs; struct cpu_dbs_info cdbs;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo; unsigned int freq_lo;
unsigned int freq_lo_jiffies; unsigned int freq_lo_jiffies;
@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
}; };
struct cs_cpu_dbs_info_s { struct cs_cpu_dbs_info_s {
struct cpu_dbs_common_info cdbs; struct cpu_dbs_info cdbs;
unsigned int down_skip; unsigned int down_skip;
unsigned int requested_freq; unsigned int requested_freq;
unsigned int enable:1; unsigned int enable:1;
@ -204,9 +208,11 @@ struct common_dbs_data {
*/ */
struct dbs_data *gdbs_data; struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
void *(*get_cpu_dbs_info_s)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work); unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data,
bool modify_all);
void (*gov_check_cpu)(int cpu, unsigned int load); void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data, bool notify); int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify); void (*exit)(struct dbs_data *dbs_data, bool notify);
@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
extern struct mutex cpufreq_governor_lock; extern struct mutex cpufreq_governor_lock;
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct cpufreq_policy *policy, int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event); struct common_dbs_data *cdata, unsigned int event);
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,

View File

@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
static void od_check_cpu(int cpu, unsigned int load) static void od_check_cpu(int cpu, unsigned int load)
{ {
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
} }
} }
static void od_dbs_timer(struct work_struct *work) static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data, bool modify_all)
{ {
struct od_cpu_dbs_info_s *dbs_info = struct cpufreq_policy *policy = cdbs->shared->policy;
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); unsigned int cpu = policy->cpu;
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
cpu); cpu);
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
int delay = 0, sample_type = core_dbs_info->sample_type; int delay = 0, sample_type = dbs_info->sample_type;
bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex); if (!modify_all)
if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
modify_all = false;
goto max_delay; goto max_delay;
}
/* Common NORMAL_SAMPLE setup */ /* Common NORMAL_SAMPLE setup */
core_dbs_info->sample_type = OD_NORMAL_SAMPLE; dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) { if (sample_type == OD_SUB_SAMPLE) {
delay = core_dbs_info->freq_lo_jiffies; delay = dbs_info->freq_lo_jiffies;
__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, __cpufreq_driver_target(policy, dbs_info->freq_lo,
core_dbs_info->freq_lo, CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
} else { } else {
dbs_check_cpu(dbs_data, cpu); dbs_check_cpu(dbs_data, cpu);
if (core_dbs_info->freq_lo) { if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */ /* Setup timer for SUB_SAMPLE */
core_dbs_info->sample_type = OD_SUB_SAMPLE; dbs_info->sample_type = OD_SUB_SAMPLE;
delay = core_dbs_info->freq_hi_jiffies; delay = dbs_info->freq_hi_jiffies;
} }
} }
max_delay: max_delay:
if (!delay) if (!delay)
delay = delay_for_sampling_rate(od_tuners->sampling_rate delay = delay_for_sampling_rate(od_tuners->sampling_rate
* core_dbs_info->rate_mult); * dbs_info->rate_mult);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); return delay;
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
} }
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
dbs_info = &per_cpu(od_cpu_dbs_info, cpu); dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex); mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
if (!delayed_work_pending(&dbs_info->cdbs.work)) { if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
continue; continue;
} }
next_sampling = jiffies + usecs_to_jiffies(new_rate); next_sampling = jiffies + usecs_to_jiffies(new_rate);
appointed_at = dbs_info->cdbs.work.timer.expires; appointed_at = dbs_info->cdbs.dwork.timer.expires;
if (time_before(next_sampling, appointed_at)) { if (time_before(next_sampling, appointed_at)) {
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
cancel_delayed_work_sync(&dbs_info->cdbs.work); cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
mutex_lock(&dbs_info->cdbs.timer_mutex); mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, gov_queue_work(dbs_data, policy,
usecs_to_jiffies(new_rate), true); usecs_to_jiffies(new_rate), true);
} }
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
} }
} }
@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct cpu_common_dbs_info *shared;
if (cpumask_test_cpu(cpu, &done)) if (cpumask_test_cpu(cpu, &done))
continue; continue;
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
if (!policy) if (!shared)
continue; continue;
policy = shared->policy;
cpumask_or(&done, &done, policy->cpus); cpumask_or(&done, &done, policy->cpus);
if (policy->governor != &cpufreq_gov_ondemand) if (policy->governor != &cpufreq_gov_ondemand)

View File

@ -78,7 +78,7 @@ static int eps_acpi_init(void)
static int eps_acpi_exit(struct cpufreq_policy *policy) static int eps_acpi_exit(struct cpufreq_policy *policy)
{ {
if (eps_acpi_cpu_perf) { if (eps_acpi_cpu_perf) {
acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0); acpi_processor_unregister_performance(0);
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
kfree(eps_acpi_cpu_perf); kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL; eps_acpi_cpu_perf = NULL;

View File

@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io { struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data; struct acpi_processor_performance acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume; unsigned int resume;
}; };
@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data; struct cpufreq_acpi_io *data;
unsigned int result = 0; unsigned int result = 0;
struct cpufreq_frequency_table *freq_table;
pr_debug("acpi_cpufreq_cpu_init\n"); pr_debug("acpi_cpufreq_cpu_init\n");
@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
} }
/* alloc freq_table */ /* alloc freq_table */
data->freq_table = kzalloc(sizeof(*data->freq_table) * freq_table = kzalloc(sizeof(*freq_table) *
(data->acpi_data.state_count + 1), (data->acpi_data.state_count + 1),
GFP_KERNEL); GFP_KERNEL);
if (!data->freq_table) { if (!freq_table) {
result = -ENOMEM; result = -ENOMEM;
goto err_unreg; goto err_unreg;
} }
@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
for (i = 0; i <= data->acpi_data.state_count; i++) for (i = 0; i <= data->acpi_data.state_count; i++)
{ {
if (i < data->acpi_data.state_count) { if (i < data->acpi_data.state_count) {
data->freq_table[i].frequency = freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000; data->acpi_data.states[i].core_frequency * 1000;
} else { } else {
data->freq_table[i].frequency = CPUFREQ_TABLE_END; freq_table[i].frequency = CPUFREQ_TABLE_END;
} }
} }
result = cpufreq_table_validate_and_show(policy, data->freq_table); result = cpufreq_table_validate_and_show(policy, freq_table);
if (result) { if (result) {
goto err_freqfree; goto err_freqfree;
} }
@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
return (result); return (result);
err_freqfree: err_freqfree:
kfree(data->freq_table); kfree(freq_table);
err_unreg: err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu); acpi_processor_unregister_performance(cpu);
err_free: err_free:
kfree(data); kfree(data);
acpi_io_data[cpu] = NULL; acpi_io_data[cpu] = NULL;
@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
if (data) { if (data) {
acpi_io_data[policy->cpu] = NULL; acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data, acpi_processor_unregister_performance(policy->cpu);
policy->cpu); kfree(policy->freq_table);
kfree(data); kfree(data);
} }

View File

@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
/* get current setting */ /* get current setting */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) { if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7; vco.s = (cm_osc >> 8) & 7;
} else if (machine_is_cintegrator()) { else if (machine_is_cintegrator())
vco.s = 1; vco.s = 1;
}
vco.v = cm_osc & 255; vco.v = cm_osc & 255;
vco.r = 22; vco.r = 22;
freqs.old = icst_hz(&cclk_params, vco) / 1000; freqs.old = icst_hz(&cclk_params, vco) / 1000;
@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
/* detect memory etc. */ /* detect memory etc. */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) { if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7; vco.s = (cm_osc >> 8) & 7;
} else { else
vco.s = 1; vco.s = 1;
}
vco.v = cm_osc & 255; vco.v = cm_osc & 255;
vco.r = 22; vco.r = 22;
@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) if (!res)
return -ENODEV; return -ENODEV;
cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
module_platform_driver_probe(integrator_cpufreq_driver, module_platform_driver_probe(integrator_cpufreq_driver,
integrator_cpufreq_probe); integrator_cpufreq_probe);
MODULE_AUTHOR ("Russell M. King"); MODULE_AUTHOR("Russell M. King");
MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
MODULE_LICENSE ("GPL"); MODULE_LICENSE("GPL");

View File

@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
} }
/************************** sysfs end ************************/ /************************** sysfs end ************************/
static void intel_pstate_hwp_enable(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{ {
hwp_active++;
pr_info("intel_pstate: HWP enabled\n"); pr_info("intel_pstate: HWP enabled\n");
wrmsrl( MSR_PM_ENABLE, 0x1); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
} }
static int byt_get_min_pstate(void) static int byt_get_min_pstate(void)
@ -522,7 +521,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
int32_t vid_fp; int32_t vid_fp;
u32 vid; u32 vid;
val = pstate << 8; val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled) if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
@ -611,7 +610,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
{ {
u64 val; u64 val;
val = pstate << 8; val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled) if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
@ -909,6 +908,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x4c, byt_params), ICPU(0x4c, byt_params),
ICPU(0x4e, core_params), ICPU(0x4e, core_params),
ICPU(0x4f, core_params), ICPU(0x4f, core_params),
ICPU(0x5e, core_params),
ICPU(0x56, core_params), ICPU(0x56, core_params),
ICPU(0x57, knl_params), ICPU(0x57, knl_params),
{} {}
@ -933,6 +933,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum]; cpu = all_cpu_data[cpunum];
cpu->cpu = cpunum; cpu->cpu = cpunum;
if (hwp_active)
intel_pstate_hwp_enable(cpu);
intel_pstate_get_cpu_pstates(cpu); intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer); init_timer_deferrable(&cpu->timer);
@ -1170,6 +1174,10 @@ static struct hw_vendor_info vendor_info[] = {
{1, "ORACLE", "X4270M3 ", PPC}, {1, "ORACLE", "X4270M3 ", PPC},
{1, "ORACLE", "X4270M2 ", PPC}, {1, "ORACLE", "X4270M2 ", PPC},
{1, "ORACLE", "X4170M2 ", PPC}, {1, "ORACLE", "X4170M2 ", PPC},
{1, "ORACLE", "X4170 M3", PPC},
{1, "ORACLE", "X4275 M3", PPC},
{1, "ORACLE", "X6-2 ", PPC},
{1, "ORACLE", "Sudbury ", PPC},
{0, "", ""}, {0, "", ""},
}; };
@ -1246,7 +1254,7 @@ static int __init intel_pstate_init(void)
return -ENOMEM; return -ENOMEM;
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
intel_pstate_hwp_enable(); hwp_active++;
if (!hwp_active && hwp_only) if (!hwp_active && hwp_only)
goto out; goto out;

View File

@ -0,0 +1,527 @@
/*
* Copyright (c) 2015 Linaro Ltd.
* Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/thermal.h>
#define MIN_VOLT_SHIFT (100000)
#define MAX_VOLT_SHIFT (200000)
#define MAX_VOLT_LIMIT (1150000)
#define VOLT_TOL (10000)
/*
* The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
* on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
* Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
* voltage inputs need to be controlled under a hardware limitation:
* 100mV < Vsram - Vproc < 200mV
*
* When scaling the clock frequency of a CPU clock domain, the clock source
* needs to be switched to another stable PLL clock temporarily until
* the original PLL becomes stable at target frequency.
*/
struct mtk_cpu_dvfs_info {
struct device *cpu_dev;
struct regulator *proc_reg;
struct regulator *sram_reg;
struct clk *cpu_clk;
struct clk *inter_clk;
struct thermal_cooling_device *cdev;
int intermediate_voltage;
bool need_voltage_tracking;
};
static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
int new_vproc)
{
struct regulator *proc_reg = info->proc_reg;
struct regulator *sram_reg = info->sram_reg;
int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
old_vproc = regulator_get_voltage(proc_reg);
old_vsram = regulator_get_voltage(sram_reg);
/* Vsram should not exceed the maximum allowed voltage of SoC. */
new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
if (old_vproc < new_vproc) {
/*
* When scaling up voltages, Vsram and Vproc scale up step
* by step. At each step, set Vsram to (Vproc + 200mV) first,
* then set Vproc to (Vsram - 100mV).
* Keep doing it until Vsram and Vproc hit target voltages.
*/
do {
old_vsram = regulator_get_voltage(sram_reg);
old_vproc = regulator_get_voltage(proc_reg);
vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
vsram = MAX_VOLT_LIMIT;
/*
* If the target Vsram hits the maximum voltage,
* try to set the exact voltage value first.
*/
ret = regulator_set_voltage(sram_reg, vsram,
vsram);
if (ret)
ret = regulator_set_voltage(sram_reg,
vsram - VOLT_TOL,
vsram);
vproc = new_vproc;
} else {
ret = regulator_set_voltage(sram_reg, vsram,
vsram + VOLT_TOL);
vproc = vsram - MIN_VOLT_SHIFT;
}
if (ret)
return ret;
ret = regulator_set_voltage(proc_reg, vproc,
vproc + VOLT_TOL);
if (ret) {
regulator_set_voltage(sram_reg, old_vsram,
old_vsram);
return ret;
}
} while (vproc < new_vproc || vsram < new_vsram);
} else if (old_vproc > new_vproc) {
/*
* When scaling down voltages, Vsram and Vproc scale down step
* by step. At each step, set Vproc to (Vsram - 200mV) first,
* then set Vproc to (Vproc + 100mV).
* Keep doing it until Vsram and Vproc hit target voltages.
*/
do {
old_vproc = regulator_get_voltage(proc_reg);
old_vsram = regulator_get_voltage(sram_reg);
vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
ret = regulator_set_voltage(proc_reg, vproc,
vproc + VOLT_TOL);
if (ret)
return ret;
if (vproc == new_vproc)
vsram = new_vsram;
else
vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
vsram = MAX_VOLT_LIMIT;
/*
* If the target Vsram hits the maximum voltage,
* try to set the exact voltage value first.
*/
ret = regulator_set_voltage(sram_reg, vsram,
vsram);
if (ret)
ret = regulator_set_voltage(sram_reg,
vsram - VOLT_TOL,
vsram);
} else {
ret = regulator_set_voltage(sram_reg, vsram,
vsram + VOLT_TOL);
}
if (ret) {
regulator_set_voltage(proc_reg, old_vproc,
old_vproc);
return ret;
}
} while (vproc > new_vproc + VOLT_TOL ||
vsram > new_vsram + VOLT_TOL);
}
return 0;
}
static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
{
if (info->need_voltage_tracking)
return mtk_cpufreq_voltage_tracking(info, vproc);
else
return regulator_set_voltage(info->proc_reg, vproc,
vproc + VOLT_TOL);
}
static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_frequency_table *freq_table = policy->freq_table;
struct clk *cpu_clk = policy->clk;
struct clk *armpll = clk_get_parent(cpu_clk);
struct mtk_cpu_dvfs_info *info = policy->driver_data;
struct device *cpu_dev = info->cpu_dev;
struct dev_pm_opp *opp;
long freq_hz, old_freq_hz;
int vproc, old_vproc, inter_vproc, target_vproc, ret;
inter_vproc = info->intermediate_voltage;
old_freq_hz = clk_get_rate(cpu_clk);
old_vproc = regulator_get_voltage(info->proc_reg);
freq_hz = freq_table[index].frequency * 1000;
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("cpu%d: failed to find OPP for %ld\n",
policy->cpu, freq_hz);
return PTR_ERR(opp);
}
vproc = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
/*
* If the new voltage or the intermediate voltage is higher than the
* current voltage, scale up voltage first.
*/
target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
if (old_vproc < target_vproc) {
ret = mtk_cpufreq_set_voltage(info, target_vproc);
if (ret) {
pr_err("cpu%d: failed to scale up voltage!\n",
policy->cpu);
mtk_cpufreq_set_voltage(info, old_vproc);
return ret;
}
}
/* Reparent the CPU clock to intermediate clock. */
ret = clk_set_parent(cpu_clk, info->inter_clk);
if (ret) {
pr_err("cpu%d: failed to re-parent cpu clock!\n",
policy->cpu);
mtk_cpufreq_set_voltage(info, old_vproc);
WARN_ON(1);
return ret;
}
/* Set the original PLL to target rate. */
ret = clk_set_rate(armpll, freq_hz);
if (ret) {
pr_err("cpu%d: failed to scale cpu clock rate!\n",
policy->cpu);
clk_set_parent(cpu_clk, armpll);
mtk_cpufreq_set_voltage(info, old_vproc);
return ret;
}
/* Set parent of CPU clock back to the original PLL. */
ret = clk_set_parent(cpu_clk, armpll);
if (ret) {
pr_err("cpu%d: failed to re-parent cpu clock!\n",
policy->cpu);
mtk_cpufreq_set_voltage(info, inter_vproc);
WARN_ON(1);
return ret;
}
/*
* If the new voltage is lower than the intermediate voltage or the
* original voltage, scale down to the new voltage.
*/
if (vproc < inter_vproc || vproc < old_vproc) {
ret = mtk_cpufreq_set_voltage(info, vproc);
if (ret) {
pr_err("cpu%d: failed to scale down voltage!\n",
policy->cpu);
clk_set_parent(cpu_clk, info->inter_clk);
clk_set_rate(armpll, old_freq_hz);
clk_set_parent(cpu_clk, armpll);
return ret;
}
}
return 0;
}
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
struct device_node *np = of_node_get(info->cpu_dev->of_node);
if (WARN_ON(!np))
return;
if (of_find_property(np, "#cooling-cells", NULL)) {
info->cdev = of_cpufreq_cooling_register(np,
policy->related_cpus);
if (IS_ERR(info->cdev)) {
dev_err(info->cpu_dev,
"running cpufreq without cooling device: %ld\n",
PTR_ERR(info->cdev));
info->cdev = NULL;
}
}
of_node_put(np);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
{
struct device *cpu_dev;
struct regulator *proc_reg = ERR_PTR(-ENODEV);
struct regulator *sram_reg = ERR_PTR(-ENODEV);
struct clk *cpu_clk = ERR_PTR(-ENODEV);
struct clk *inter_clk = ERR_PTR(-ENODEV);
struct dev_pm_opp *opp;
unsigned long rate;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("failed to get cpu%d device\n", cpu);
return -ENODEV;
}
cpu_clk = clk_get(cpu_dev, "cpu");
if (IS_ERR(cpu_clk)) {
if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
else
pr_err("failed to get cpu clk for cpu%d\n", cpu);
ret = PTR_ERR(cpu_clk);
return ret;
}
inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(inter_clk)) {
if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
pr_warn("intermediate clk for cpu%d not ready, retry.\n",
cpu);
else
pr_err("failed to get intermediate clk for cpu%d\n",
cpu);
ret = PTR_ERR(inter_clk);
goto out_free_resources;
}
proc_reg = regulator_get_exclusive(cpu_dev, "proc");
if (IS_ERR(proc_reg)) {
if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
pr_warn("proc regulator for cpu%d not ready, retry.\n",
cpu);
else
pr_err("failed to get proc regulator for cpu%d\n",
cpu);
ret = PTR_ERR(proc_reg);
goto out_free_resources;
}
/* Both presence and absence of sram regulator are valid cases. */
sram_reg = regulator_get_exclusive(cpu_dev, "sram");
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_warn("no OPP table for cpu%d\n", cpu);
goto out_free_resources;
}
/* Search a safe voltage for intermediate frequency. */
rate = clk_get_rate(inter_clk);
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to get intermediate opp for cpu%d\n", cpu);
ret = PTR_ERR(opp);
goto out_free_opp_table;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
info->cpu_dev = cpu_dev;
info->proc_reg = proc_reg;
info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
info->cpu_clk = cpu_clk;
info->inter_clk = inter_clk;
/*
* If SRAM regulator is present, software "voltage tracking" is needed
* for this CPU power domain.
*/
info->need_voltage_tracking = !IS_ERR(sram_reg);
return 0;
out_free_opp_table:
of_free_opp_table(cpu_dev);
out_free_resources:
if (!IS_ERR(proc_reg))
regulator_put(proc_reg);
if (!IS_ERR(sram_reg))
regulator_put(sram_reg);
if (!IS_ERR(cpu_clk))
clk_put(cpu_clk);
if (!IS_ERR(inter_clk))
clk_put(inter_clk);
return ret;
}
static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
{
if (!IS_ERR(info->proc_reg))
regulator_put(info->proc_reg);
if (!IS_ERR(info->sram_reg))
regulator_put(info->sram_reg);
if (!IS_ERR(info->cpu_clk))
clk_put(info->cpu_clk);
if (!IS_ERR(info->inter_clk))
clk_put(info->inter_clk);
of_free_opp_table(info->cpu_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info;
struct cpufreq_frequency_table *freq_table;
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
if (ret) {
pr_err("%s failed to initialize dvfs info for cpu%d\n",
__func__, policy->cpu);
goto out_free_dvfs_info;
}
ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table for cpu%d: %d\n",
policy->cpu, ret);
goto out_release_dvfs_info;
}
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
pr_err("%s: invalid frequency table: %d\n", __func__, ret);
goto out_free_cpufreq_table;
}
/* CPUs in the same cluster share a clock and power domain. */
cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
policy->driver_data = info;
policy->clk = info->cpu_clk;
return 0;
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
out_release_dvfs_info:
mtk_cpu_dvfs_info_release(info);
out_free_dvfs_info:
kfree(info);
return ret;
}
static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
cpufreq_cooling_unregister(info->cdev);
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
mtk_cpu_dvfs_info_release(info);
kfree(info);
return 0;
}
static struct cpufreq_driver mt8173_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = mtk_cpufreq_set_target,
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
.ready = mtk_cpufreq_ready,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
static int mt8173_cpufreq_probe(struct platform_device *pdev)
{
int ret;
ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
if (ret)
pr_err("failed to register mtk cpufreq driver\n");
return ret;
}
static struct platform_driver mt8173_cpufreq_platdrv = {
.driver = {
.name = "mt8173-cpufreq",
},
.probe = mt8173_cpufreq_probe,
};
static int mt8173_cpufreq_driver_init(void)
{
struct platform_device *pdev;
int err;
if (!of_machine_is_compatible("mediatek,mt8173"))
return -ENODEV;
err = platform_driver_register(&mt8173_cpufreq_platdrv);
if (err)
return err;
/*
* Since there's no place to hold device registration code and no
* device tree based way to match cpufreq driver yet, both the driver
* and the device registration codes are put here to handle defer
* probing.
*/
pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
return PTR_ERR(pdev);
}
return 0;
}
device_initcall(mt8173_cpufreq_driver_init);

View File

@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
return 0; return 0;
err2: err2:
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(0);
err1: err1:
free_cpumask_var(acpi_processor_perf->shared_cpu_map); free_cpumask_var(acpi_processor_perf->shared_cpu_map);
err05: err05:
@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
{ {
#ifdef CONFIG_X86_POWERNOW_K7_ACPI #ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) { if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(0);
free_cpumask_var(acpi_processor_perf->shared_cpu_map); free_cpumask_var(acpi_processor_perf->shared_cpu_map);
kfree(acpi_processor_perf); kfree(acpi_processor_perf);
} }

View File

@ -795,7 +795,7 @@ err_out_mem:
kfree(powernow_table); kfree(powernow_table);
err_out: err_out:
acpi_processor_unregister_performance(&data->acpi_data, data->cpu); acpi_processor_unregister_performance(data->cpu);
/* data->acpi_data.state_count informs us at ->exit() /* data->acpi_data.state_count informs us at ->exit()
* whether ACPI was used */ * whether ACPI was used */
@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{ {
if (data->acpi_data.state_count) if (data->acpi_data.state_count)
acpi_processor_unregister_performance(&data->acpi_data, acpi_processor_unregister_performance(data->cpu);
data->cpu);
free_cpumask_var(data->acpi_data.shared_cpu_map); free_cpumask_var(data->acpi_data.shared_cpu_map);
} }

View File

@ -27,20 +27,31 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
#define POWERNV_MAX_PSTATES 256 #define POWERNV_MAX_PSTATES 256
#define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31) #define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF) #define PMSR_MAX(x) ((x >> 32) & 0xFF)
#define PMSR_LP(x) ((x >> 48) & 0xFF)
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled; static bool rebooting, throttled, occ_reset;
static struct chip {
unsigned int id;
bool throttled;
cpumask_t mask;
struct work_struct throttle;
bool restore;
} *chips;
static int nr_chips;
/* /*
* Note: The set of pstates consists of contiguous integers, the * Note: The set of pstates consists of contiguous integers, the
@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void)
return powernv_pstate_info.max - powernv_pstate_info.nominal; return powernv_pstate_info.max - powernv_pstate_info.nominal;
} }
static void powernv_cpufreq_throttle_check(unsigned int cpu) static void powernv_cpufreq_throttle_check(void *data)
{ {
unsigned int cpu = smp_processor_id();
unsigned long pmsr; unsigned long pmsr;
int pmsr_pmax, pmsr_lp; int pmsr_pmax, i;
pmsr = get_pmspr(SPRN_PMSR); pmsr = get_pmspr(SPRN_PMSR);
for (i = 0; i < nr_chips; i++)
if (chips[i].id == cpu_to_chip_id(cpu))
break;
/* Check for Pmax Capping */ /* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr); pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) { if (pmsr_pmax != powernv_pstate_info.max) {
throttled = true; if (chips[i].throttled)
pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax); goto next;
pr_info("Max allowed Pstate is capped\n"); chips[i].throttled = true;
pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
chips[i].id, pmsr_pmax);
} else if (chips[i].throttled) {
chips[i].throttled = false;
pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
chips[i].id, pmsr_pmax);
} }
/* /* Check if Psafe_mode_active is set in PMSR. */
* Check for Psafe by reading LocalPstate next:
* or check if Psafe_mode_active is set in PMSR. if (pmsr & PMSR_PSAFE_ENABLE) {
*/
pmsr_lp = (s8)PMSR_LP(pmsr);
if ((pmsr_lp < powernv_pstate_info.min) ||
(pmsr & PMSR_PSAFE_ENABLE)) {
throttled = true; throttled = true;
pr_info("Pstate set to safe frequency\n"); pr_info("Pstate set to safe frequency\n");
} }
@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
return 0; return 0;
if (!throttled) if (!throttled)
powernv_cpufreq_throttle_check(smp_processor_id()); powernv_cpufreq_throttle_check(NULL);
freq_data.pstate_id = powernv_freqs[new_index].driver_data; freq_data.pstate_id = powernv_freqs[new_index].driver_data;
@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier, .notifier_call = powernv_cpufreq_reboot_notifier,
}; };
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
unsigned int cpu;
cpumask_var_t mask;
smp_call_function_any(&chip->mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
return;
chip->restore = false;
cpumask_copy(mask, &chip->mask);
for_each_cpu_and(cpu, mask, cpu_online_mask) {
int index, tcpu;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
cpufreq_frequency_table_target(&policy, policy.freq_table,
policy.cur,
CPUFREQ_RELATION_C, &index);
powernv_cpufreq_target_index(&policy, index);
for_each_cpu(tcpu, policy.cpus)
cpumask_clear_cpu(tcpu, mask);
}
}
static char throttle_reason[][30] = {
"No throttling",
"Power Cap",
"Processor Over Temperature",
"Power Supply Failure",
"Over Current",
"OCC Reset"
};
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
struct opal_msg *msg = _msg;
struct opal_occ_msg omsg;
int i;
if (msg_type != OPAL_MSG_OCC)
return 0;
omsg.type = be64_to_cpu(msg->params[0]);
switch (omsg.type) {
case OCC_RESET:
occ_reset = true;
pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
/*
* powernv_cpufreq_throttle_check() is called in
* target() callback which can detect the throttle state
* for governors like ondemand.
* But static governors will not call target() often thus
* report throttling here.
*/
if (!throttled) {
throttled = true;
pr_crit("CPU frequency is throttled for duration\n");
}
break;
case OCC_LOAD:
pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
break;
case OCC_THROTTLE:
omsg.chip = be64_to_cpu(msg->params[1]);
omsg.throttle_status = be64_to_cpu(msg->params[2]);
if (occ_reset) {
occ_reset = false;
throttled = false;
pr_info("OCC Active, CPU frequency is no longer throttled\n");
for (i = 0; i < nr_chips; i++) {
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
if (omsg.throttle_status &&
omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
pr_info("OCC: Chip %u Pmax reduced due to %s\n",
(unsigned int)omsg.chip,
throttle_reason[omsg.throttle_status]);
else if (!omsg.throttle_status)
pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
throttle_reason[omsg.throttle_status]);
else
return 0;
for (i = 0; i < nr_chips; i++)
if (chips[i].id == omsg.chip) {
if (!omsg.throttle_status)
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
}
return 0;
}
static struct notifier_block powernv_cpufreq_opal_nb = {
.notifier_call = powernv_cpufreq_occ_msg,
.next = NULL,
.priority = 0,
};
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{ {
struct powernv_smp_call_data freq_data; struct powernv_smp_call_data freq_data;
@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.attr = powernv_cpu_freq_attr, .attr = powernv_cpu_freq_attr,
}; };
static int init_chip_info(void)
{
unsigned int chip[256];
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
if (prev_chip_id != id) {
prev_chip_id = id;
chip[nr_chips++] = id;
}
}
chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips)
return -ENOMEM;
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
chips[i].throttled = false;
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
chips[i].restore = false;
}
return 0;
}
static int __init powernv_cpufreq_init(void) static int __init powernv_cpufreq_init(void)
{ {
int rc = 0; int rc = 0;
@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void)
return rc; return rc;
} }
/* Populate chip info */
rc = init_chip_info();
if (rc)
return rc;
register_reboot_notifier(&powernv_cpufreq_reboot_nb); register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
return cpufreq_register_driver(&powernv_cpufreq_driver); return cpufreq_register_driver(&powernv_cpufreq_driver);
} }
module_init(powernv_cpufreq_init); module_init(powernv_cpufreq_init);
@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void) static void __exit powernv_cpufreq_exit(void)
{ {
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_unregister(OPAL_MSG_OCC,
&powernv_cpufreq_opal_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver); cpufreq_unregister_driver(&powernv_cpufreq_driver);
} }
module_exit(powernv_cpufreq_exit); module_exit(powernv_cpufreq_exit);

View File

@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb,
struct cpufreq_frequency_table *cbe_freqs; struct cpufreq_frequency_table *cbe_freqs;
u8 node; u8 node;
/* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
* and CPUFREQ_NOTIFY policy events?) * policy events?)
*/ */
if (event == CPUFREQ_START) if (event == CPUFREQ_START)
return 0; return 0;

View File

@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table)
pentry = (struct sfi_freq_table_entry *)sb->pentry; pentry = (struct sfi_freq_table_entry *)sb->pentry;
totallen = num_freq_table_entries * sizeof(*pentry); totallen = num_freq_table_entries * sizeof(*pentry);
sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL); sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
if (!sfi_cpufreq_array) if (!sfi_cpufreq_array)
return -ENOMEM; return -ENOMEM;
memcpy(sfi_cpufreq_array, pentry, totallen);
return 0; return 0;
} }

View File

@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int prev_speed; unsigned int prev_speed;
unsigned int ret = 0; unsigned int ret = 0;
unsigned long flags; unsigned long flags;
struct timeval tv1, tv2; ktime_t tv1, tv2;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL; return -EINVAL;
@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
/* start latency measurement */ /* start latency measurement */
if (transition_latency) if (transition_latency)
do_gettimeofday(&tv1); tv1 = ktime_get();
/* switch to high state */ /* switch to high state */
set_state(SPEEDSTEP_HIGH); set_state(SPEEDSTEP_HIGH);
/* end latency measurement */ /* end latency measurement */
if (transition_latency) if (transition_latency)
do_gettimeofday(&tv2); tv2 = ktime_get();
*high_speed = speedstep_get_frequency(processor); *high_speed = speedstep_get_frequency(processor);
if (!*high_speed) { if (!*high_speed) {
@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
set_state(SPEEDSTEP_LOW); set_state(SPEEDSTEP_LOW);
if (transition_latency) { if (transition_latency) {
*transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
tv2.tv_usec - tv1.tv_usec;
pr_debug("transition latency is %u uSec\n", *transition_latency); pr_debug("transition latency is %u uSec\n", *transition_latency);
/* convert uSec to nSec and add 20% for safety reasons */ /* convert uSec to nSec and add 20% for safety reasons */

View File

@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
switch (val) { switch (val) {
case CPUFREQ_ADJUST: case CPUFREQ_ADJUST:
case CPUFREQ_INCOMPATIBLE:
pr_debug("min dma period: %d ps, " pr_debug("min dma period: %d ps, "
"new clock %d kHz\n", pxafb_display_dma_period(var), "new clock %d kHz\n", pxafb_display_dma_period(var),
policy->max); policy->max);

View File

@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
switch (val) { switch (val) {
case CPUFREQ_ADJUST: case CPUFREQ_ADJUST:
case CPUFREQ_INCOMPATIBLE:
dev_dbg(fbi->dev, "min dma period: %d ps, " dev_dbg(fbi->dev, "min dma period: %d ps, "
"new clock %d kHz\n", sa1100fb_min_dma_period(fbi), "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
policy->max); policy->max);

View File

@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
return 0; return 0;
err_unregister: err_unregister:
for_each_possible_cpu(i) { for_each_possible_cpu(i)
struct acpi_processor_performance *perf; acpi_processor_unregister_performance(i);
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
err_out: err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data(); free_acpi_perf_data();
@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
kfree(acpi_ids_done); kfree(acpi_ids_done);
kfree(acpi_id_present); kfree(acpi_id_present);
kfree(acpi_id_cst_present); kfree(acpi_id_cst_present);
for_each_possible_cpu(i) { for_each_possible_cpu(i)
struct acpi_processor_performance *perf; acpi_processor_unregister_performance(i);
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
free_acpi_perf_data(); free_acpi_perf_data();
} }

View File

@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
extern int acpi_processor_register_performance(struct acpi_processor_performance extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu); *performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(struct extern void acpi_processor_unregister_performance(unsigned int cpu);
acpi_processor_performance
*performance,
unsigned int cpu);
/* note: this locks both the calling module and the processor module /* note: this locks both the calling module and the processor module
if a _PPC object exists, rmmod is disallowed then */ if a _PPC object exists, rmmod is disallowed then */

View File

@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency; unsigned int transition_latency;
}; };
struct cpufreq_real_policy { struct cpufreq_user_policy {
unsigned int min; /* in kHz */ unsigned int min; /* in kHz */
unsigned int max; /* in kHz */ unsigned int max; /* in kHz */
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
}; };
struct cpufreq_policy { struct cpufreq_policy {
@ -88,7 +86,7 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */ * called, but you're in IRQ context */
struct cpufreq_real_policy user_policy; struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
struct list_head policy_list; struct list_head policy_list;
@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */ /* Policy Notifiers */
#define CPUFREQ_ADJUST (0) #define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (1)
#define CPUFREQ_NOTIFY (2) #define CPUFREQ_START (2)
#define CPUFREQ_START (3) #define CPUFREQ_CREATE_POLICY (3)
#define CPUFREQ_CREATE_POLICY (4) #define CPUFREQ_REMOVE_POLICY (4)
#define CPUFREQ_REMOVE_POLICY (5)
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);