android_kernel_lge_bullhead/drivers/cpufreq/qcom-cpufreq.c

499 lines
12 KiB
C
Raw Permalink Normal View History

/* drivers/cpufreq/qcom-cpufreq.c
*
* MSM architecture cpufreq driver
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
* Author: Mike A. Chan <mikechan@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/suspend.h>
#include <linux/clk.h>
#include <linux/clk/msm-clk-provider.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <trace/events/power.h>
static DEFINE_MUTEX(l2bw_lock);
static struct clk *cpu_clk[NR_CPUS];
static struct clk *l2_clk;
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
static bool hotplug_ready;
struct cpufreq_suspend_t {
struct mutex suspend_mutex;
int device_suspended;
};
static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
unsigned int index)
{
int ret = 0;
struct cpufreq_freqs freqs;
unsigned long rate;
freqs.old = policy->cur;
freqs.new = new_freq;
freqs.cpu = policy->cpu;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
rate = new_freq * 1000;
rate = clk_round_rate(cpu_clk[policy->cpu], rate);
ret = clk_set_rate(cpu_clk[policy->cpu], rate);
if (!ret) {
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
trace_cpu_frequency_switch_end(policy->cpu);
}
return ret;
}
static int msm_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret = 0;
int index;
struct cpufreq_frequency_table *table;
mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
if (target_freq == policy->cur)
goto done;
if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
pr_debug("cpufreq: cpu%d scheduling frequency change "
"in suspend.\n", policy->cpu);
ret = -EFAULT;
goto done;
}
table = cpufreq_frequency_get_table(policy->cpu);
if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
&index)) {
pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
ret = -EINVAL;
goto done;
}
pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
policy->cpu, target_freq, relation,
policy->min, policy->max, table[index].frequency);
ret = set_cpu_freq(policy, table[index].frequency,
table[index].driver_data);
done:
mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
return ret;
}
static int msm_cpufreq_verify(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
{
return clk_get_rate(cpu_clk[cpu]) / 1000;
}
static int msm_cpufreq_init(struct cpufreq_policy *policy)
{
int cur_freq;
int index;
int ret = 0;
struct cpufreq_frequency_table *table =
per_cpu(freq_table, policy->cpu);
int cpu;
/*
* In some SoC, some cores are clocked by same source, and their
* frequencies can not be changed independently. Find all other
* CPUs that share same clock, and mark them as controlled by
* same policy.
*/
for_each_possible_cpu(cpu)
if (cpu_clk[cpu] == cpu_clk[policy->cpu])
cpumask_set_cpu(cpu, policy->cpus);
if (cpufreq_frequency_table_cpuinfo(policy, table))
pr_err("cpufreq: failed to get policy min/max\n");
cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
if (cpufreq_frequency_table_target(policy, table, cur_freq,
CPUFREQ_RELATION_H, &index) &&
cpufreq_frequency_table_target(policy, table, cur_freq,
CPUFREQ_RELATION_L, &index)) {
pr_info("cpufreq: cpu%d at invalid freq: %d\n",
policy->cpu, cur_freq);
return -EINVAL;
}
/*
* Call set_cpu_freq unconditionally so that when cpu is set to
* online, frequency limit will always be updated.
*/
ret = set_cpu_freq(policy, table[index].frequency,
table[index].driver_data);
if (ret)
return ret;
pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
policy->cpu, cur_freq, table[index].frequency);
policy->cur = table[index].frequency;
cpufreq_frequency_table_get_attr(table, policy->cpu);
return 0;
}
static int msm_cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
int rc;
/* Fail hotplug until this driver can get CPU clocks */
if (!hotplug_ready)
return NOTIFY_BAD;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
clk_disable(cpu_clk[cpu]);
clk_disable(l2_clk);
break;
/*
* Scale down clock/power of CPU that is dead and scale it back up
* before the CPU is brought up.
*/
case CPU_DEAD:
clk_unprepare(cpu_clk[cpu]);
clk_unprepare(l2_clk);
break;
qcom-cpufreq: Move the enabling of the CPU/L2 clocks to CPU_STARTING It is a requirement of PLLs in MSM SoCs that the PLL power supplies be enabled while the PLL is running. If the PLL is locked and outputs enabled, turning off the regulator(s) supplying the PLL will cause the PLL to enter an unpredictable state. Now in the CPU_UP_PREPARE notifier, the CPU clocks are prepared and enabled, causing the source HFPLLs to also turn on. Note that the CPU isn't clocked yet. It is possible that execution is pre-empted and the CPU running the notifier enters power collapse. If all other CPUs also enter power collapse, then it is possible for an RPM notification to go out, allowing the RPM to transition the Apps processor to its sleep set. This can result in the HFPLL supplies being turned off while the HFPLL is running, violating the requirement mentioned above. Once the CPU is unclamped, the CPU is effectively unclocked, due to the HFPLL being in an unknown state. There is a check that is enabled in the PM code's CPU_UP_PREPARE notifier callback. This check ensures that the problematic RPM notification cannot occur until the core that is being brought online also enters power collapse. However, there is no ordering guarantee between that PM's hotplug notifier callback's execution and the cpufreq hotplug notifier callback's execution. This ordering depends on program link order, which is unreliable. It is necessary to ensure that once the HFPLL is enabled, the RPM cannot transition apps to its sleep set. Move the enabling of the CPU clocks to the CPU_STARTING notifier, which runs on the CPU bring brought online. The CPU_STARTING notifier is guaranteed to run after the CPU_UP_PREPARE notifier, which implies that the aforementioned do-not-notify-rpm check is executed *before* the HFPLL is enabled. Therefore even if all cores enter power collapse after the HFPLL is enabled, the HFPLL supplies are guaranteed to stay on, or the CPU clock is switched to the safe source and the HFPLL is turned off. CRs-Fixed: 622738 Change-Id: I136841405806c07e755919859e649ded0c719abb Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
2014-03-21 04:14:57 +01:00
case CPU_UP_CANCELED:
clk_unprepare(cpu_clk[cpu]);
clk_unprepare(l2_clk);
break;
case CPU_UP_PREPARE:
qcom-cpufreq: Move the enabling of the CPU/L2 clocks to CPU_STARTING It is a requirement of PLLs in MSM SoCs that the PLL power supplies be enabled while the PLL is running. If the PLL is locked and outputs enabled, turning off the regulator(s) supplying the PLL will cause the PLL to enter an unpredictable state. Now in the CPU_UP_PREPARE notifier, the CPU clocks are prepared and enabled, causing the source HFPLLs to also turn on. Note that the CPU isn't clocked yet. It is possible that execution is pre-empted and the CPU running the notifier enters power collapse. If all other CPUs also enter power collapse, then it is possible for an RPM notification to go out, allowing the RPM to transition the Apps processor to its sleep set. This can result in the HFPLL supplies being turned off while the HFPLL is running, violating the requirement mentioned above. Once the CPU is unclamped, the CPU is effectively unclocked, due to the HFPLL being in an unknown state. There is a check that is enabled in the PM code's CPU_UP_PREPARE notifier callback. This check ensures that the problematic RPM notification cannot occur until the core that is being brought online also enters power collapse. However, there is no ordering guarantee between that PM's hotplug notifier callback's execution and the cpufreq hotplug notifier callback's execution. This ordering depends on program link order, which is unreliable. It is necessary to ensure that once the HFPLL is enabled, the RPM cannot transition apps to its sleep set. Move the enabling of the CPU clocks to the CPU_STARTING notifier, which runs on the CPU bring brought online. The CPU_STARTING notifier is guaranteed to run after the CPU_UP_PREPARE notifier, which implies that the aforementioned do-not-notify-rpm check is executed *before* the HFPLL is enabled. Therefore even if all cores enter power collapse after the HFPLL is enabled, the HFPLL supplies are guaranteed to stay on, or the CPU clock is switched to the safe source and the HFPLL is turned off. CRs-Fixed: 622738 Change-Id: I136841405806c07e755919859e649ded0c719abb Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
2014-03-21 04:14:57 +01:00
rc = clk_prepare(l2_clk);
if (rc < 0)
return NOTIFY_BAD;
qcom-cpufreq: Move the enabling of the CPU/L2 clocks to CPU_STARTING It is a requirement of PLLs in MSM SoCs that the PLL power supplies be enabled while the PLL is running. If the PLL is locked and outputs enabled, turning off the regulator(s) supplying the PLL will cause the PLL to enter an unpredictable state. Now in the CPU_UP_PREPARE notifier, the CPU clocks are prepared and enabled, causing the source HFPLLs to also turn on. Note that the CPU isn't clocked yet. It is possible that execution is pre-empted and the CPU running the notifier enters power collapse. If all other CPUs also enter power collapse, then it is possible for an RPM notification to go out, allowing the RPM to transition the Apps processor to its sleep set. This can result in the HFPLL supplies being turned off while the HFPLL is running, violating the requirement mentioned above. Once the CPU is unclamped, the CPU is effectively unclocked, due to the HFPLL being in an unknown state. There is a check that is enabled in the PM code's CPU_UP_PREPARE notifier callback. This check ensures that the problematic RPM notification cannot occur until the core that is being brought online also enters power collapse. However, there is no ordering guarantee between that PM's hotplug notifier callback's execution and the cpufreq hotplug notifier callback's execution. This ordering depends on program link order, which is unreliable. It is necessary to ensure that once the HFPLL is enabled, the RPM cannot transition apps to its sleep set. Move the enabling of the CPU clocks to the CPU_STARTING notifier, which runs on the CPU bring brought online. The CPU_STARTING notifier is guaranteed to run after the CPU_UP_PREPARE notifier, which implies that the aforementioned do-not-notify-rpm check is executed *before* the HFPLL is enabled. Therefore even if all cores enter power collapse after the HFPLL is enabled, the HFPLL supplies are guaranteed to stay on, or the CPU clock is switched to the safe source and the HFPLL is turned off. CRs-Fixed: 622738 Change-Id: I136841405806c07e755919859e649ded0c719abb Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
2014-03-21 04:14:57 +01:00
rc = clk_prepare(cpu_clk[cpu]);
if (rc < 0) {
qcom-cpufreq: Move the enabling of the CPU/L2 clocks to CPU_STARTING It is a requirement of PLLs in MSM SoCs that the PLL power supplies be enabled while the PLL is running. If the PLL is locked and outputs enabled, turning off the regulator(s) supplying the PLL will cause the PLL to enter an unpredictable state. Now in the CPU_UP_PREPARE notifier, the CPU clocks are prepared and enabled, causing the source HFPLLs to also turn on. Note that the CPU isn't clocked yet. It is possible that execution is pre-empted and the CPU running the notifier enters power collapse. If all other CPUs also enter power collapse, then it is possible for an RPM notification to go out, allowing the RPM to transition the Apps processor to its sleep set. This can result in the HFPLL supplies being turned off while the HFPLL is running, violating the requirement mentioned above. Once the CPU is unclamped, the CPU is effectively unclocked, due to the HFPLL being in an unknown state. There is a check that is enabled in the PM code's CPU_UP_PREPARE notifier callback. This check ensures that the problematic RPM notification cannot occur until the core that is being brought online also enters power collapse. However, there is no ordering guarantee between that PM's hotplug notifier callback's execution and the cpufreq hotplug notifier callback's execution. This ordering depends on program link order, which is unreliable. It is necessary to ensure that once the HFPLL is enabled, the RPM cannot transition apps to its sleep set. Move the enabling of the CPU clocks to the CPU_STARTING notifier, which runs on the CPU bring brought online. The CPU_STARTING notifier is guaranteed to run after the CPU_UP_PREPARE notifier, which implies that the aforementioned do-not-notify-rpm check is executed *before* the HFPLL is enabled. Therefore even if all cores enter power collapse after the HFPLL is enabled, the HFPLL supplies are guaranteed to stay on, or the CPU clock is switched to the safe source and the HFPLL is turned off. CRs-Fixed: 622738 Change-Id: I136841405806c07e755919859e649ded0c719abb Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
2014-03-21 04:14:57 +01:00
clk_unprepare(l2_clk);
return NOTIFY_BAD;
}
break;
qcom-cpufreq: Move the enabling of the CPU/L2 clocks to CPU_STARTING It is a requirement of PLLs in MSM SoCs that the PLL power supplies be enabled while the PLL is running. If the PLL is locked and outputs enabled, turning off the regulator(s) supplying the PLL will cause the PLL to enter an unpredictable state. Now in the CPU_UP_PREPARE notifier, the CPU clocks are prepared and enabled, causing the source HFPLLs to also turn on. Note that the CPU isn't clocked yet. It is possible that execution is pre-empted and the CPU running the notifier enters power collapse. If all other CPUs also enter power collapse, then it is possible for an RPM notification to go out, allowing the RPM to transition the Apps processor to its sleep set. This can result in the HFPLL supplies being turned off while the HFPLL is running, violating the requirement mentioned above. Once the CPU is unclamped, the CPU is effectively unclocked, due to the HFPLL being in an unknown state. There is a check that is enabled in the PM code's CPU_UP_PREPARE notifier callback. This check ensures that the problematic RPM notification cannot occur until the core that is being brought online also enters power collapse. However, there is no ordering guarantee between that PM's hotplug notifier callback's execution and the cpufreq hotplug notifier callback's execution. This ordering depends on program link order, which is unreliable. It is necessary to ensure that once the HFPLL is enabled, the RPM cannot transition apps to its sleep set. Move the enabling of the CPU clocks to the CPU_STARTING notifier, which runs on the CPU bring brought online. The CPU_STARTING notifier is guaranteed to run after the CPU_UP_PREPARE notifier, which implies that the aforementioned do-not-notify-rpm check is executed *before* the HFPLL is enabled. Therefore even if all cores enter power collapse after the HFPLL is enabled, the HFPLL supplies are guaranteed to stay on, or the CPU clock is switched to the safe source and the HFPLL is turned off. CRs-Fixed: 622738 Change-Id: I136841405806c07e755919859e649ded0c719abb Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
2014-03-21 04:14:57 +01:00
case CPU_STARTING:
rc = clk_enable(l2_clk);
if (rc < 0)
return NOTIFY_BAD;
rc = clk_enable(cpu_clk[cpu]);
if (rc) {
clk_disable(l2_clk);
return NOTIFY_BAD;
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
.notifier_call = msm_cpufreq_cpu_callback,
};
static int msm_cpufreq_suspend(void)
{
int cpu;
for_each_possible_cpu(cpu) {
mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
}
return NOTIFY_DONE;
}
static int msm_cpufreq_resume(void)
{
int cpu, ret;
struct cpufreq_policy policy;
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
}
/*
* Freq request might be rejected during suspend, resulting
* in policy->cur violating min/max constraint.
* Correct the frequency as soon as possible.
*/
get_online_cpus();
for_each_online_cpu(cpu) {
ret = cpufreq_get_policy(&policy, cpu);
if (ret)
continue;
if (policy.cur <= policy.max && policy.cur >= policy.min)
continue;
ret = cpufreq_update_policy(cpu);
if (ret)
pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
cpu);
else
pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
cpu);
}
put_online_cpus();
return NOTIFY_DONE;
}
static int msm_cpufreq_pm_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
switch (event) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
return msm_cpufreq_resume();
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
return msm_cpufreq_suspend();
default:
return NOTIFY_DONE;
}
}
static struct notifier_block msm_cpufreq_pm_notifier = {
.notifier_call = msm_cpufreq_pm_event,
};
static struct freq_attr *msm_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver msm_cpufreq_driver = {
/* lps calculations are handled here. */
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
.get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
};
static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
char *tbl_name, int cpu)
{
int ret, nf, i;
u32 *data;
struct cpufreq_frequency_table *ftbl;
/* Parse list of usable CPU frequencies. */
if (!of_find_property(dev->of_node, tbl_name, &nf))
return ERR_PTR(-EINVAL);
nf /= sizeof(*data);
if (nf == 0)
return ERR_PTR(-EINVAL);
data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
if (ret)
return ERR_PTR(ret);
ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
if (!ftbl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < nf; i++) {
unsigned long f;
f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
if (IS_ERR_VALUE(f))
break;
f /= 1000;
/*
* Check if this is the last feasible frequency in the table.
*
* The table listing frequencies higher than what the HW can
* support is not an error since the table might be shared
* across CPUs in different speed bins. It's also not
* sufficient to check if the rounded rate is lower than the
* requested rate as it doesn't cover the following example:
*
* Table lists: 2.2 GHz and 2.5 GHz.
* Rounded rate returns: 2.2 GHz and 2.3 GHz.
*
* In this case, we can CPUfreq to use 2.2 GHz and 2.3 GHz
* instead of rejecting the 2.5 GHz table entry.
*/
if (i > 0 && f <= ftbl[i-1].frequency)
break;
ftbl[i].driver_data = i;
ftbl[i].frequency = f;
}
ftbl[i].driver_data = i;
ftbl[i].frequency = CPUFREQ_TABLE_END;
devm_kfree(dev, data);
return ftbl;
}
static int __init msm_cpufreq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[] = "cpu??_clk";
char tbl_name[] = "qcom,cpufreq-table-??";
struct clk *c;
int cpu;
struct cpufreq_frequency_table *ftbl;
l2_clk = devm_clk_get(dev, "l2_clk");
if (IS_ERR(l2_clk))
l2_clk = NULL;
for_each_possible_cpu(cpu) {
snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
c = devm_clk_get(dev, clk_name);
if (IS_ERR(c))
return PTR_ERR(c);
c->flags |= CLKFLAG_NO_RATE_CACHE;
cpu_clk[cpu] = c;
}
hotplug_ready = true;
/* Use per-policy governor tunable for some targets */
if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
/* Parse commong cpufreq table for all CPUs */
ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
if (!IS_ERR(ftbl)) {
for_each_possible_cpu(cpu)
per_cpu(freq_table, cpu) = ftbl;
return 0;
}
/*
* No common table. Parse individual tables for each unique
* CPU clock.
*/
for_each_possible_cpu(cpu) {
snprintf(tbl_name, sizeof(tbl_name),
"qcom,cpufreq-table-%d", cpu);
ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
/* CPU0 must contain freq table */
if (cpu == 0 && IS_ERR(ftbl)) {
dev_err(dev, "Failed to parse CPU0's freq table\n");
return PTR_ERR(ftbl);
}
if (cpu == 0) {
per_cpu(freq_table, cpu) = ftbl;
continue;
}
if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
dev_err(dev, "Failed to parse CPU%d's freq table\n",
cpu);
return PTR_ERR(ftbl);
}
/* Use previous CPU's table if it shares same clock */
if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
if (!IS_ERR(ftbl)) {
dev_warn(dev, "Conflicting tables for CPU%d\n",
cpu);
kfree(ftbl);
}
ftbl = per_cpu(freq_table, cpu - 1);
}
per_cpu(freq_table, cpu) = ftbl;
}
return 0;
}
static struct of_device_id match_table[] = {
{ .compatible = "qcom,msm-cpufreq" },
{}
};
static struct platform_driver msm_cpufreq_plat_driver = {
.driver = {
.name = "msm-cpufreq",
.of_match_table = match_table,
.owner = THIS_MODULE,
},
};
static int __init msm_cpufreq_register(void)
{
int cpu, rc;
for_each_possible_cpu(cpu) {
mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
}
rc = platform_driver_probe(&msm_cpufreq_plat_driver,
msm_cpufreq_probe);
if (rc < 0) {
/* Unblock hotplug if msm-cpufreq probe fails */
unregister_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
for_each_possible_cpu(cpu)
mutex_destroy(&(per_cpu(cpufreq_suspend, cpu).
suspend_mutex));
return rc;
}
register_pm_notifier(&msm_cpufreq_pm_notifier);
return cpufreq_register_driver(&msm_cpufreq_driver);
}
subsys_initcall(msm_cpufreq_register);
static int __init msm_cpufreq_early_register(void)
{
return register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
}
core_initcall(msm_cpufreq_early_register);