Merge back cpufreq material for v3.18.

This commit is contained in:
Rafael J. Wysocki 2014-10-03 15:41:16 +02:00
commit 6f1293ff74
24 changed files with 442 additions and 280 deletions

View File

@ -1,8 +1,8 @@
Generic CPU0 cpufreq driver
Generic cpufreq driver
It is a generic cpufreq driver for CPU0 frequency management. It
supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
systems which share clock and voltage across all CPUs.
It is a generic DT based cpufreq driver for frequency management. It supports
both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share
clock and voltage across all CPUs.
Both required and optional properties listed below must be defined
under node /cpus/cpu@0.

View File

@ -32,7 +32,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_MVEBU_V7_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPUFREQ_GENERIC=y
CONFIG_CPUFREQ_DT=y
CONFIG_VFP=y
CONFIG_NET=y
CONFIG_INET=y

View File

@ -20,7 +20,7 @@
static void __init imx27_dt_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
mxc_arch_reset_init_dt();

View File

@ -51,7 +51,7 @@ static void __init imx51_ipu_mipi_setup(void)
static void __init imx51_dt_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
mxc_arch_reset_init_dt();
imx51_ipu_mipi_setup();

View File

@ -644,7 +644,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
}
}
platform_device_register_simple("cpufreq-generic", -1, NULL, 0);
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}

View File

@ -282,7 +282,7 @@ static inline void omap_init_cpufreq(void)
if (!of_have_populated_dt())
devinfo.name = "omap-cpufreq";
else
devinfo.name = "cpufreq-cpu0";
devinfo.name = "cpufreq-dt";
platform_device_register_full(&devinfo);
}

View File

@ -50,7 +50,7 @@ static void __init ape6evm_add_standard_devices(void)
r8a73a4_add_dt_devices();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0);
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
}
static const char *ape6evm_boards_compat_dt[] __initdata = {

View File

@ -12,6 +12,6 @@
int __init shmobile_cpufreq_init(void)
{
platform_device_register_simple("cpufreq-cpu0", -1, NULL, 0);
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}

View File

@ -775,7 +775,7 @@ void __init sh73a0_add_early_devices(void)
void __init sh73a0_add_standard_devices_dt(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", .id = -1, };
struct platform_device_info devinfo = { .name = "cpufreq-dt", .id = -1, };
/* clocks are setup late during boot in the case of DT */
sh73a0_clock_init();
@ -784,7 +784,7 @@ void __init sh73a0_add_standard_devices_dt(void)
ARRAY_SIZE(sh73a0_devices_dt));
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
/* Instantiate cpufreq-cpu0 */
/* Instantiate cpufreq-dt */
platform_device_register_full(&devinfo);
}

View File

@ -104,7 +104,7 @@ static int __init zynq_get_revision(void)
*/
static void __init zynq_init_machine(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device *parent = NULL;

View File

@ -183,14 +183,14 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
config CPUFREQ_DT
tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF
# if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds a generic cpufreq driver for CPU0 frequency management.
This adds a generic DT based cpufreq driver for frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
systems which share clock and voltage across all CPUs.

View File

@ -92,7 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC

View File

@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
##################################################################################
# x86 drivers.

View File

@ -1,248 +0,0 @@
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* The OPP code in function cpu0_set_target() is reused from
* drivers/cpufreq/omap-cpufreq.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/thermal.h>
static unsigned int transition_latency;
static unsigned int voltage_tolerance; /* in percentage */
static struct device *cpu_dev;
static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
static struct thermal_cooling_device *cdev;
static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz <= 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
return ret;
}
}
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
return ret;
}
/* scaling down? scale voltage after frequency */
if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
clk_set_rate(cpu_clk, old_freq * 1000);
}
}
return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
policy->clk = cpu_clk;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = cpu0_set_target,
.get = cpufreq_generic_get,
.init = cpu0_cpufreq_init,
.name = "generic_cpu0",
.attr = cpufreq_generic_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int ret;
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get cpu0 device\n");
return -ENODEV;
}
np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_err("failed to find cpu0 node\n");
return -ENOENT;
}
cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
/*
* If cpu0 regulator supply node is present, but regulator is
* not yet registered, we should try defering probe.
*/
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
ret = -EPROBE_DEFER;
goto out_put_node;
}
pr_warn("failed to get cpu0 regulator: %ld\n",
PTR_ERR(cpu_reg));
}
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_reg;
}
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_put_clk;
}
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
struct dev_pm_opp *opp;
unsigned long min_uV, max_uV;
int i;
/*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = dev_pm_opp_get_voltage(opp);
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
}
ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
if (ret) {
pr_err("failed register driver: %d\n", ret);
goto out_free_table;
}
/*
* For now, just loading the cooling device;
* thermal DT code takes care of matching them.
*/
if (of_find_property(np, "#cooling-cells", NULL)) {
cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
if (IS_ERR(cdev))
pr_err("running cpufreq without cooling device: %ld\n",
PTR_ERR(cdev));
}
of_node_put(np);
return 0;
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_clk:
if (!IS_ERR(cpu_clk))
clk_put(cpu_clk);
out_put_reg:
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
out_put_node:
of_node_put(np);
return ret;
}
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_cooling_unregister(cdev);
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
static struct platform_driver cpu0_cpufreq_platdrv = {
.driver = {
.name = "cpufreq-cpu0",
.owner = THIS_MODULE,
},
.probe = cpu0_cpufreq_probe,
.remove = cpu0_cpufreq_remove,
};
module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,364 @@
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* Copyright (C) 2014 Linaro.
* Viresh Kumar <viresh.kumar@linaro.org>
*
* The OPP code in function set_target() is reused from
* drivers/cpufreq/omap-cpufreq.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/thermal.h>
struct private_data {
struct device *cpu_dev;
struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
unsigned int voltage_tolerance; /* in percentage */
};
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
struct clk *cpu_clk = policy->clk;
struct private_data *priv = policy->driver_data;
struct device *cpu_dev = priv->cpu_dev;
struct regulator *cpu_reg = priv->cpu_reg;
unsigned long volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz <= 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n",
freq_Hz);
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * priv->voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
dev_err(cpu_dev, "failed to scale voltage up: %d\n",
ret);
return ret;
}
}
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
return ret;
}
/* scaling down? scale voltage after frequency */
if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
dev_err(cpu_dev, "failed to scale voltage down: %d\n",
ret);
clk_set_rate(cpu_clk, old_freq * 1000);
}
}
return ret;
}
static int allocate_resources(int cpu, struct device **cdev,
struct regulator **creg, struct clk **cclk)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
int ret = 0;
char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("failed to get cpu%d device\n", cpu);
return -ENODEV;
}
/* Try "cpu0" for older DTs */
if (!cpu)
reg = reg_cpu0;
else
reg = reg_cpu;
try_again:
cpu_reg = regulator_get_optional(cpu_dev, reg);
if (IS_ERR(cpu_reg)) {
/*
* If cpu's regulator supply node is present, but regulator is
* not yet registered, we should try defering probe.
*/
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
cpu);
return -EPROBE_DEFER;
}
/* Try with "cpu-supply" */
if (reg == reg_cpu0) {
reg = reg_cpu;
goto try_again;
}
dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n",
cpu, PTR_ERR(cpu_reg));
}
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
/* put regulator */
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
ret = PTR_ERR(cpu_clk);
/*
* If cpu's clk node is present, but clock is not yet
* registered, we should try defering probe.
*/
if (ret == -EPROBE_DEFER)
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
else
dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
cpu);
} else {
*cdev = cpu_dev;
*creg = cpu_reg;
*cclk = cpu_clk;
}
return ret;
}
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct thermal_cooling_device *cdev;
struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
unsigned int transition_latency;
int ret;
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
if (ret) {
pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
return ret;
}
np = of_node_get(cpu_dev->of_node);
if (!np) {
dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
ret = -ENOENT;
goto out_put_reg_clk;
}
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_put_node;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto out_free_table;
}
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
struct dev_pm_opp *opp;
unsigned long min_uV, max_uV;
int i;
/*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = dev_pm_opp_get_voltage(opp);
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
}
/*
* For now, just loading the cooling device;
* thermal DT code takes care of matching them.
*/
if (of_find_property(np, "#cooling-cells", NULL)) {
cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
if (IS_ERR(cdev))
dev_err(cpu_dev,
"running cpufreq without cooling device: %ld\n",
PTR_ERR(cdev));
else
priv->cdev = cdev;
}
priv->cpu_dev = cpu_dev;
priv->cpu_reg = cpu_reg;
policy->driver_data = priv;
policy->clk = cpu_clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
if (ret)
goto out_cooling_unregister;
of_node_put(np);
return 0;
out_cooling_unregister:
cpufreq_cooling_unregister(priv->cdev);
kfree(priv);
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
out_put_reg_clk:
clk_put(cpu_clk);
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
return ret;
}
static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
clk_put(policy->clk);
if (!IS_ERR(priv->cpu_reg))
regulator_put(priv->cpu_reg);
kfree(priv);
return 0;
}
static struct cpufreq_driver dt_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
.get = cpufreq_generic_get,
.init = cpufreq_init,
.exit = cpufreq_exit,
.name = "cpufreq-dt",
.attr = cpufreq_generic_attr,
};
static int dt_cpufreq_probe(struct platform_device *pdev)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
int ret;
/*
* All per-cluster (CPUs sharing clock/voltages) initialization is done
* from ->init(). In probe(), we just need to make sure that clk and
* regulators are available. Else defer probe and retry.
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
if (ret)
return ret;
clk_put(cpu_clk);
if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg);
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
dev_err(cpu_dev, "failed register driver: %d\n", ret);
return ret;
}
static int dt_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&dt_cpufreq_driver);
return 0;
}
static struct platform_driver dt_cpufreq_platdrv = {
.driver = {
.name = "cpufreq-dt",
.owner = THIS_MODULE,
},
.probe = dt_cpufreq_probe,
.remove = dt_cpufreq_remove,
};
module_platform_driver(dt_cpufreq_platdrv);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic cpufreq driver");
MODULE_LICENSE("GPL");

View File

@ -437,7 +437,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
@ -455,10 +455,10 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
goto out;
if (cpufreq_driver->setpolicy) {
if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
err = 0;
} else if (!strnicmp(str_governor, "powersave",
} else if (!strncasecmp(str_governor, "powersave",
CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
@ -1382,7 +1382,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!cpufreq_suspended)
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu);
} else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
} else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}

View File

@ -127,7 +127,7 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
* cpufreq-cpu0 driver.
* cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
if (!np) {

View File

@ -174,7 +174,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
* cpufreq-cpu0 driver.
* cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
if (!np) {

View File

@ -153,7 +153,7 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
* cpufreq-cpu0 driver.
* cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
if (!np) {

View File

@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*
* This driver provides the clk notifier callbacks that are used when
* the cpufreq-cpu0 driver changes to frequency to alert the highbank
* the cpufreq-dt driver changes to frequency to alert the highbank
* EnergyCore Management Engine (ECME) about the need to change
* voltage. The ECME interfaces with the actual voltage regulators.
*/
@ -60,7 +60,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
static int hb_cpufreq_driver_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct device *cpu_dev;
struct clk *cpu_clk;
struct device_node *np;
@ -95,7 +95,7 @@ static int hb_cpufreq_driver_init(void)
goto out_put_node;
}
/* Instantiate cpufreq-cpu0 */
/* Instantiate cpufreq-dt */
platform_device_register_full(&devinfo);
out_put_node:

View File

@ -26,6 +26,7 @@
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/of.h>
#include <linux/reboot.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@ -35,6 +36,7 @@
#define POWERNV_MAX_PSTATES 256
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting;
/*
* Note: The set of pstates consists of contiguous integers, the
@ -283,6 +285,15 @@ static void set_pstate(void *freq_data)
set_pmspr(SPRN_PMCR, val);
}
/*
* get_nominal_index: Returns the index corresponding to the nominal
* pstate in the cpufreq table
*/
static inline unsigned int get_nominal_index(void)
{
return powernv_pstate_info.max - powernv_pstate_info.nominal;
}
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
@ -293,6 +304,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
{
struct powernv_smp_call_data freq_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
/*
@ -317,6 +331,33 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
return cpufreq_table_validate_and_show(policy, powernv_freqs);
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
struct cpufreq_policy cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
cpufreq_get_policy(&cpu_policy, cpu);
powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
}
return NOTIFY_DONE;
}
static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier,
};
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
freq_data.pstate_id = powernv_pstate_info.min;
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
}
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
@ -324,6 +365,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.get = powernv_cpufreq_get,
.stop_cpu = powernv_cpufreq_stop_cpu,
.attr = powernv_cpu_freq_attr,
};
@ -342,12 +384,14 @@ static int __init powernv_cpufreq_init(void)
return rc;
}
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
return cpufreq_register_driver(&powernv_cpufreq_driver);
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver);
}
module_exit(powernv_cpufreq_exit);

View File

@ -199,7 +199,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
data->table = table;
per_cpu(cpu_data, cpu) = data;
/* update ->cpus if we have cluster, no harm if not */
cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));

View File

@ -597,7 +597,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* and dependencies on platform headers. It is necessary to enable
* S5PV210 multi-platform support and will be removed together with
* this whole driver as soon as S5PV210 gets migrated to use
* cpufreq-cpu0 driver.
* cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {

View File

@ -112,6 +112,9 @@ struct cpufreq_policy {
spinlock_t transition_lock;
wait_queue_head_t transition_wait;
struct task_struct *transition_task; /* Task which is doing the transition */
/* For cpufreq driver's internal use */
void *driver_data;
};
/* Only for ACPI */