2 * arch/arm/mach-tegra/cpu-tegra.c
4 * Copyright (C) 2010 Google, Inc.
7 * Colin Cross <ccross@google.com>
8 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
10 * Copyright (C) 2010-2011 NVIDIA Corporation
12 * This software is licensed under the terms of the GNU General Public
13 * License version 2, as published by the Free Software Foundation, and
14 * may be copied, distributed, and modified under those terms.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/clk.h>
33 #include <linux/suspend.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpu.h>
37 #include <asm/system.h>
45 /* tegra throttling and edp governors require frequencies in the table
46 to be in ascending order */
47 static struct cpufreq_frequency_table *freq_table;
49 static struct clk *cpu_clk;
50 static struct clk *emc_clk;
52 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
53 static DEFINE_MUTEX(tegra_cpu_lock);
54 static bool is_suspended;
55 static int suspend_index;
57 unsigned int tegra_getspeed(unsigned int cpu);
58 static int tegra_update_cpu_speed(unsigned long rate);
60 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
61 /* CPU frequency is gradually lowered when throttling is enabled */
62 #define THROTTLE_DELAY msecs_to_jiffies(2000)
64 static bool is_throttling;
65 static int throttle_lowest_index;
66 static int throttle_highest_index;
67 static int throttle_index;
68 static int throttle_next_index;
69 static struct delayed_work throttle_work;
70 static struct workqueue_struct *workqueue;
72 #define tegra_cpu_is_throttling() (is_throttling)
74 static void tegra_throttle_work_func(struct work_struct *work)
76 unsigned int current_freq;
78 mutex_lock(&tegra_cpu_lock);
79 current_freq = tegra_getspeed(0);
80 throttle_index = throttle_next_index;
82 if (freq_table[throttle_index].frequency < current_freq)
83 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
85 if (throttle_index > throttle_lowest_index) {
86 throttle_next_index = throttle_index - 1;
87 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
90 mutex_unlock(&tegra_cpu_lock);
94 * tegra_throttling_enable
95 * This function may sleep
97 void tegra_throttling_enable(bool enable)
99 mutex_lock(&tegra_cpu_lock);
101 if (enable && !is_throttling) {
102 unsigned int current_freq = tegra_getspeed(0);
104 is_throttling = true;
106 for (throttle_index = throttle_highest_index;
107 throttle_index >= throttle_lowest_index;
109 if (freq_table[throttle_index].frequency
113 throttle_index = max(throttle_index, throttle_lowest_index);
114 throttle_next_index = throttle_index;
115 queue_delayed_work(workqueue, &throttle_work, 0);
116 } else if (!enable && is_throttling) {
117 cancel_delayed_work_sync(&throttle_work);
118 is_throttling = false;
119 /* restore speed requested by governor */
120 tegra_cpu_cap_highest_speed(NULL);
123 mutex_unlock(&tegra_cpu_lock);
125 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
127 static unsigned int throttle_governor_speed(unsigned int requested_speed)
129 return tegra_cpu_is_throttling() ?
130 min(requested_speed, freq_table[throttle_index].frequency) :
134 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
136 return sprintf(buf, "%u\n", is_throttling);
139 cpufreq_freq_attr_ro(throttle);
141 #ifdef CONFIG_DEBUG_FS
142 static int throttle_debug_set(void *data, u64 val)
144 tegra_throttling_enable(val);
147 static int throttle_debug_get(void *data, u64 *val)
149 *val = (u64) is_throttling;
153 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
155 static struct dentry *cpu_tegra_debugfs_root;
157 static int __init tegra_cpu_debug_init(void)
159 cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
161 if (!cpu_tegra_debugfs_root)
164 if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
170 debugfs_remove_recursive(cpu_tegra_debugfs_root);
175 static void __exit tegra_cpu_debug_exit(void)
177 debugfs_remove_recursive(cpu_tegra_debugfs_root);
180 late_initcall(tegra_cpu_debug_init);
181 module_exit(tegra_cpu_debug_exit);
182 #endif /* CONFIG_DEBUG_FS */
184 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
185 #define tegra_cpu_is_throttling() (0)
186 #define throttle_governor_speed(requested_speed) (requested_speed)
187 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
189 #ifdef CONFIG_TEGRA_EDP_LIMITS
191 static const struct tegra_edp_limits *cpu_edp_limits;
192 static int cpu_edp_limits_size;
193 static int edp_thermal_index;
194 static cpumask_t edp_cpumask;
195 static unsigned int edp_limit;
197 static void edp_update_limit(void)
200 unsigned int limit = cpumask_weight(&edp_cpumask);
205 BUG_ON((edp_thermal_index >= cpu_edp_limits_size) || (limit == 0));
206 limit = cpu_edp_limits[edp_thermal_index].freq_limits[limit - 1];
208 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
209 if (freq_table[i].frequency > limit) {
213 BUG_ON(i == 0); /* min freq above the limit or table empty */
214 edp_limit = freq_table[i-1].frequency;
217 static unsigned int edp_governor_speed(unsigned int requested_speed)
219 if ((!cpu_edp_limits) || (requested_speed <= edp_limit))
220 return requested_speed;
225 int tegra_edp_update_thermal_zone(int temperature)
229 int nlimits = cpu_edp_limits_size;
237 if (temperature < cpu_edp_limits[0].temperature) {
240 for (i = 0; i < (nlimits - 1); i++) {
241 if (temperature >= cpu_edp_limits[i].temperature &&
242 temperature < cpu_edp_limits[i + 1].temperature) {
249 mutex_lock(&tegra_cpu_lock);
250 edp_thermal_index = index;
252 /* Update cpu rate if cpufreq (at least on cpu0) is already started */
253 if (target_cpu_speed[0]) {
255 tegra_cpu_cap_highest_speed(NULL);
257 mutex_unlock(&tegra_cpu_lock);
261 EXPORT_SYMBOL_GPL(tegra_edp_update_thermal_zone);
263 static int tegra_cpu_edp_notify(
264 struct notifier_block *nb, unsigned long event, void *hcpu)
267 unsigned int cpu_speed, new_speed;
268 int cpu = (long)hcpu;
272 mutex_lock(&tegra_cpu_lock);
273 cpu_set(cpu, edp_cpumask);
276 cpu_speed = tegra_getspeed(0);
277 new_speed = edp_governor_speed(cpu_speed);
278 if (new_speed < cpu_speed) {
279 ret = tegra_update_cpu_speed(new_speed);
281 cpu_clear(cpu, edp_cpumask);
285 printk(KERN_DEBUG "tegra CPU:%sforce EDP limit %u kHz"
286 "\n", ret ? " failed to " : " ", new_speed);
288 mutex_unlock(&tegra_cpu_lock);
291 mutex_lock(&tegra_cpu_lock);
292 cpu_clear(cpu, edp_cpumask);
294 tegra_cpu_cap_highest_speed(NULL);
295 mutex_unlock(&tegra_cpu_lock);
298 return notifier_from_errno(ret);
301 static struct notifier_block tegra_cpu_edp_notifier = {
302 .notifier_call = tegra_cpu_edp_notify,
305 static void tegra_cpu_edp_init(bool resume)
307 if (!cpu_edp_limits) {
309 pr_info("cpu-tegra: no EDP table is provided\n");
313 /* FIXME: use the highest temperature limits if sensor is not on-line?
314 * If thermal zone is not set yet by the sensor, edp_thermal_index = 0.
315 * Boot frequency allowed SoC to get here, should work till sensor is
318 edp_cpumask = *cpu_online_mask;
322 register_hotcpu_notifier(&tegra_cpu_edp_notifier);
323 pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000);
327 static void tegra_cpu_edp_exit(void)
332 unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
335 void tegra_init_cpu_edp_limits(const struct tegra_edp_limits *limits, int size)
337 cpu_edp_limits = limits;
338 cpu_edp_limits_size = size;
341 #else /* CONFIG_TEGRA_EDP_LIMITS */
343 #define edp_governor_speed(requested_speed) (requested_speed)
344 #define tegra_cpu_edp_init(resume)
345 #define tegra_cpu_edp_exit()
346 #endif /* CONFIG_TEGRA_EDP_LIMITS */
348 int tegra_verify_speed(struct cpufreq_policy *policy)
350 return cpufreq_frequency_table_verify(policy, freq_table);
353 unsigned int tegra_getspeed(unsigned int cpu)
357 if (cpu >= CONFIG_NR_CPUS)
360 rate = clk_get_rate(cpu_clk) / 1000;
364 static int tegra_update_cpu_speed(unsigned long rate)
367 struct cpufreq_freqs freqs;
369 freqs.old = tegra_getspeed(0);
372 rate = clk_round_rate(cpu_clk, rate * 1000);
373 if (!IS_ERR_VALUE(rate))
374 freqs.new = rate / 1000;
376 if (freqs.old == freqs.new)
380 * Vote on memory bus frequency based on cpu frequency
381 * This sets the minimum frequency, display or avp may request higher
383 clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
385 for_each_online_cpu(freqs.cpu)
386 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
388 #ifdef CONFIG_CPU_FREQ_DEBUG
389 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
390 freqs.old, freqs.new);
393 ret = clk_set_rate(cpu_clk, freqs.new * 1000);
395 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
400 for_each_online_cpu(freqs.cpu)
401 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
406 unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
408 unsigned int cnt = 0;
411 for_each_online_cpu(i)
412 if (target_cpu_speed[i] <= speed_limit)
417 unsigned int tegra_get_slowest_cpu_n(void) {
418 unsigned int cpu = nr_cpu_ids;
419 unsigned long rate = ULONG_MAX;
422 for_each_online_cpu(i)
423 if ((i > 0) && (rate > target_cpu_speed[i])) {
425 rate = target_cpu_speed[i];
430 unsigned long tegra_cpu_lowest_speed(void) {
431 unsigned long rate = ULONG_MAX;
434 for_each_online_cpu(i)
435 rate = min(rate, target_cpu_speed[i]);
439 unsigned long tegra_cpu_highest_speed(void) {
440 unsigned long rate = 0;
443 for_each_online_cpu(i)
444 rate = max(rate, target_cpu_speed[i]);
448 int tegra_cpu_cap_highest_speed(unsigned int *speed_cap)
450 unsigned int new_speed = tegra_cpu_highest_speed();
452 new_speed = throttle_governor_speed(new_speed);
453 new_speed = edp_governor_speed(new_speed);
455 *speed_cap = new_speed;
456 return tegra_update_cpu_speed(new_speed);
459 static int tegra_target(struct cpufreq_policy *policy,
460 unsigned int target_freq,
461 unsigned int relation)
465 unsigned int new_speed;
468 mutex_lock(&tegra_cpu_lock);
475 cpufreq_frequency_table_target(policy, freq_table, target_freq,
478 freq = freq_table[idx].frequency;
480 target_cpu_speed[policy->cpu] = freq;
481 ret = tegra_cpu_cap_highest_speed(&new_speed);
483 tegra_auto_hotplug_governor(new_speed, false);
485 mutex_unlock(&tegra_cpu_lock);
491 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
494 mutex_lock(&tegra_cpu_lock);
495 if (event == PM_SUSPEND_PREPARE) {
497 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
498 freq_table[suspend_index].frequency);
499 tegra_update_cpu_speed(freq_table[suspend_index].frequency);
500 tegra_auto_hotplug_governor(
501 freq_table[suspend_index].frequency, true);
502 } else if (event == PM_POST_SUSPEND) {
503 is_suspended = false;
504 tegra_cpu_edp_init(true);
506 mutex_unlock(&tegra_cpu_lock);
511 static struct notifier_block tegra_cpu_pm_notifier = {
512 .notifier_call = tegra_pm_notify,
515 static int tegra_cpu_init(struct cpufreq_policy *policy)
517 if (policy->cpu >= CONFIG_NR_CPUS)
520 cpu_clk = clk_get_sys(NULL, "cpu");
522 return PTR_ERR(cpu_clk);
524 emc_clk = clk_get_sys("cpu", "emc");
525 if (IS_ERR(emc_clk)) {
527 return PTR_ERR(emc_clk);
533 cpufreq_frequency_table_cpuinfo(policy, freq_table);
534 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
535 policy->cur = tegra_getspeed(policy->cpu);
536 target_cpu_speed[policy->cpu] = policy->cur;
538 /* FIXME: what's the actual transition time? */
539 policy->cpuinfo.transition_latency = 300 * 1000;
541 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
542 cpumask_copy(policy->related_cpus, cpu_possible_mask);
544 if (policy->cpu == 0) {
545 register_pm_notifier(&tegra_cpu_pm_notifier);
551 static int tegra_cpu_exit(struct cpufreq_policy *policy)
553 cpufreq_frequency_table_cpuinfo(policy, freq_table);
554 clk_disable(emc_clk);
560 static struct freq_attr *tegra_cpufreq_attr[] = {
561 &cpufreq_freq_attr_scaling_available_freqs,
562 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
568 static struct cpufreq_driver tegra_cpufreq_driver = {
569 .verify = tegra_verify_speed,
570 .target = tegra_target,
571 .get = tegra_getspeed,
572 .init = tegra_cpu_init,
573 .exit = tegra_cpu_exit,
575 .attr = tegra_cpufreq_attr,
578 static int __init tegra_cpufreq_init(void)
582 struct tegra_cpufreq_table_data *table_data =
583 tegra_cpufreq_table_get();
585 suspend_index = table_data->suspend_index;
587 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
589 * High-priority, others flags default: not bound to a specific
590 * CPU, has rescue worker task (in case of allocation deadlock,
591 * etc.). Single-threaded.
593 workqueue = alloc_workqueue("cpu-tegra",
594 WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
597 INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
599 throttle_lowest_index = table_data->throttle_lowest_index;
600 throttle_highest_index = table_data->throttle_highest_index;
602 ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
606 freq_table = table_data->freq_table;
607 tegra_cpu_edp_init(false);
608 return cpufreq_register_driver(&tegra_cpufreq_driver);
611 static void __exit tegra_cpufreq_exit(void)
613 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
614 destroy_workqueue(workqueue);
616 tegra_cpu_edp_exit();
617 tegra_auto_hotplug_exit();
618 cpufreq_unregister_driver(&tegra_cpufreq_driver);
622 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
623 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
624 MODULE_LICENSE("GPL");
625 module_init(tegra_cpufreq_init);
626 module_exit(tegra_cpufreq_exit);