2 * arch/arm/mach-tegra/cpu-tegra.c
4 * Copyright (C) 2010 Google, Inc.
7 * Colin Cross <ccross@google.com>
8 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
10 * Copyright (C) 2010-2012 NVIDIA CORPORATION. All rights reserved.
12 * This software is licensed under the terms of the GNU General Public
13 * License version 2, as published by the Free Software Foundation, and
14 * may be copied, distributed, and modified under those terms.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/clk.h>
33 #include <linux/suspend.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpu.h>
40 #include <mach/thermal.h>
43 #include "cpu-tegra.h"
46 /* tegra throttling and edp governors require frequencies in the table
47 to be in ascending order */
48 static struct cpufreq_frequency_table *freq_table;
50 static struct clk *cpu_clk;
51 static struct clk *emc_clk;
53 static unsigned long policy_max_speed[CONFIG_NR_CPUS];
54 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
55 static DEFINE_MUTEX(tegra_cpu_lock);
56 static bool is_suspended;
57 static int suspend_index;
59 static bool force_policy_max;
61 static int force_policy_max_set(const char *arg, const struct kernel_param *kp)
64 bool old_policy = force_policy_max;
66 mutex_lock(&tegra_cpu_lock);
68 ret = param_set_bool(arg, kp);
69 if ((ret == 0) && (old_policy != force_policy_max))
70 tegra_cpu_set_speed_cap(NULL);
72 mutex_unlock(&tegra_cpu_lock);
76 static int force_policy_max_get(char *buffer, const struct kernel_param *kp)
78 return param_get_bool(buffer, kp);
81 static struct kernel_param_ops policy_ops = {
82 .set = force_policy_max_set,
83 .get = force_policy_max_get,
85 module_param_cb(force_policy_max, &policy_ops, &force_policy_max, 0644);
88 static unsigned int cpu_user_cap;
90 static inline void _cpu_user_cap_set_locked(void)
92 #ifndef CONFIG_TEGRA_CPU_CAP_EXACT_FREQ
93 if (cpu_user_cap != 0) {
95 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
96 if (freq_table[i].frequency > cpu_user_cap)
99 i = (i == 0) ? 0 : i - 1;
100 cpu_user_cap = freq_table[i].frequency;
103 tegra_cpu_set_speed_cap(NULL);
106 void tegra_cpu_user_cap_set(unsigned int speed_khz)
108 mutex_lock(&tegra_cpu_lock);
110 cpu_user_cap = speed_khz;
111 _cpu_user_cap_set_locked();
113 mutex_unlock(&tegra_cpu_lock);
116 static int cpu_user_cap_set(const char *arg, const struct kernel_param *kp)
120 mutex_lock(&tegra_cpu_lock);
122 ret = param_set_uint(arg, kp);
124 _cpu_user_cap_set_locked();
126 mutex_unlock(&tegra_cpu_lock);
130 static int cpu_user_cap_get(char *buffer, const struct kernel_param *kp)
132 return param_get_uint(buffer, kp);
135 static struct kernel_param_ops cap_ops = {
136 .set = cpu_user_cap_set,
137 .get = cpu_user_cap_get,
139 module_param_cb(cpu_user_cap, &cap_ops, &cpu_user_cap, 0644);
141 static unsigned int user_cap_speed(unsigned int requested_speed)
143 if ((cpu_user_cap) && (requested_speed > cpu_user_cap))
145 return requested_speed;
148 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
150 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
152 return sprintf(buf, "%u\n", tegra_is_throttling());
155 cpufreq_freq_attr_ro(throttle);
156 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
158 #ifdef CONFIG_TEGRA_EDP_LIMITS
160 static const struct tegra_edp_limits *cpu_edp_limits;
161 static int cpu_edp_limits_size;
163 static const unsigned int *system_edp_limits;
164 static bool system_edp_alarm;
166 static int edp_thermal_index;
167 static cpumask_t edp_cpumask;
168 static unsigned int edp_limit;
170 unsigned int tegra_get_edp_limit(void)
175 static unsigned int edp_predict_limit(unsigned int cpus)
177 unsigned int limit = 0;
180 if (cpu_edp_limits) {
181 BUG_ON(edp_thermal_index >= cpu_edp_limits_size);
182 limit = cpu_edp_limits[edp_thermal_index].freq_limits[cpus - 1];
184 if (system_edp_limits && system_edp_alarm)
185 limit = min(limit, system_edp_limits[cpus - 1]);
190 static void edp_update_limit(void)
192 unsigned int limit = edp_predict_limit(cpumask_weight(&edp_cpumask));
194 #ifdef CONFIG_TEGRA_EDP_EXACT_FREQ
198 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
199 if (freq_table[i].frequency > limit) {
203 BUG_ON(i == 0); /* min freq above the limit or table empty */
204 edp_limit = freq_table[i-1].frequency;
208 static unsigned int edp_governor_speed(unsigned int requested_speed)
210 if ((!edp_limit) || (requested_speed <= edp_limit))
211 return requested_speed;
216 int tegra_edp_get_trip_temp(void *data, long trip)
218 tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
219 return cpu_edp_limits[trip].temperature * 1000;
222 int tegra_edp_get_trip_size(void)
224 tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
225 return cpu_edp_limits_size-1;
228 int tegra_edp_get_max_state(struct thermal_cooling_device *cdev,
229 unsigned long *max_state)
235 /* Bitmask for which edp trip points have been breached */
236 static int edp_state_mask;
238 int tegra_edp_get_cur_state(struct thermal_cooling_device *cdev,
239 unsigned long *cur_state)
241 struct tegra_cooling_device *tegra_cdev = cdev->devdata;
242 int index = tegra_cdev->id && 0xffff;
243 *cur_state = !!((1 << index) & edp_state_mask);
247 int tegra_edp_set_cur_state(struct thermal_cooling_device *cdev,
248 unsigned long cur_state)
250 struct tegra_cooling_device *tegra_cdev = cdev->devdata;
257 mutex_lock(&tegra_cpu_lock);
258 index = tegra_cdev->id & 0xffff;
261 edp_state_mask |= 1 << index;
263 edp_state_mask &= ~(1 << index);
265 for (i=31; i>=0; i--)
266 if (edp_state_mask & (1 << i))
269 edp_thermal_index = i + 1;
271 /* Update cpu rate if cpufreq (at least on cpu0) is already started;
272 alter cpu dvfs table for this thermal zone if necessary */
273 tegra_cpu_dvfs_alter(edp_thermal_index, &edp_cpumask, true, 0);
274 if (target_cpu_speed[0]) {
276 tegra_cpu_set_speed_cap(NULL);
278 tegra_cpu_dvfs_alter(edp_thermal_index, &edp_cpumask, false, 0);
279 mutex_unlock(&tegra_cpu_lock);
284 static struct thermal_cooling_device_ops tegra_edp_cooling_ops = {
285 .get_max_state = tegra_edp_get_max_state,
286 .get_cur_state = tegra_edp_get_cur_state,
287 .set_cur_state = tegra_edp_set_cur_state,
290 int tegra_system_edp_alarm(bool alarm)
294 mutex_lock(&tegra_cpu_lock);
295 system_edp_alarm = alarm;
297 /* Update cpu rate if cpufreq (at least on cpu0) is already started
298 and cancel emergency throttling after either edp limit is applied
299 or alarm is canceled */
300 if (target_cpu_speed[0]) {
302 ret = tegra_cpu_set_speed_cap(NULL);
305 tegra_edp_throttle_cpu_now(0);
307 mutex_unlock(&tegra_cpu_lock);
312 bool tegra_cpu_edp_favor_up(unsigned int n, int mp_overhead)
314 unsigned int current_limit, next_limit;
319 if (n >= ARRAY_SIZE(cpu_edp_limits->freq_limits))
322 current_limit = edp_predict_limit(n);
323 next_limit = edp_predict_limit(n + 1);
325 return ((next_limit * (n + 1)) >=
326 (current_limit * n * (100 + mp_overhead) / 100));
329 bool tegra_cpu_edp_favor_down(unsigned int n, int mp_overhead)
331 unsigned int current_limit, next_limit;
336 if (n > ARRAY_SIZE(cpu_edp_limits->freq_limits))
339 current_limit = edp_predict_limit(n);
340 next_limit = edp_predict_limit(n - 1);
342 return ((next_limit * (n - 1) * (100 + mp_overhead) / 100)) >
346 static int tegra_cpu_edp_notify(
347 struct notifier_block *nb, unsigned long event, void *hcpu)
350 unsigned int cpu_speed, new_speed;
351 int cpu = (long)hcpu;
355 mutex_lock(&tegra_cpu_lock);
356 cpu_set(cpu, edp_cpumask);
359 cpu_speed = tegra_getspeed(0);
360 new_speed = edp_governor_speed(cpu_speed);
361 if (new_speed < cpu_speed) {
362 ret = tegra_cpu_set_speed_cap(NULL);
363 printk(KERN_DEBUG "cpu-tegra:%sforce EDP limit %u kHz"
364 "\n", ret ? " failed to " : " ", new_speed);
367 ret = tegra_cpu_dvfs_alter(
368 edp_thermal_index, &edp_cpumask, false, event);
370 cpu_clear(cpu, edp_cpumask);
373 mutex_unlock(&tegra_cpu_lock);
376 mutex_lock(&tegra_cpu_lock);
377 cpu_clear(cpu, edp_cpumask);
378 tegra_cpu_dvfs_alter(
379 edp_thermal_index, &edp_cpumask, true, event);
381 tegra_cpu_set_speed_cap(NULL);
382 mutex_unlock(&tegra_cpu_lock);
385 return notifier_from_errno(ret);
388 static struct notifier_block tegra_cpu_edp_notifier = {
389 .notifier_call = tegra_cpu_edp_notify,
392 static struct thermal_cooling_device *edp_cdev;
393 static struct tegra_cooling_device edp_cooling_devices[] = {
394 { .id = CDEV_EDPTABLE_ID_EDP_0 },
395 { .id = CDEV_EDPTABLE_ID_EDP_1 },
396 { .id = CDEV_EDPTABLE_ID_EDP_2 },
397 { .id = CDEV_EDPTABLE_ID_EDP_3 },
398 { .id = CDEV_EDPTABLE_ID_EDP_4 },
402 static void tegra_cpu_edp_init(bool resume)
404 tegra_get_system_edp_limits(&system_edp_limits);
405 tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
407 if (!(cpu_edp_limits || system_edp_limits)) {
409 pr_info("cpu-tegra: no EDP table is provided\n");
413 /* FIXME: use the highest temperature limits if sensor is not on-line?
414 * If thermal zone is not set yet by the sensor, edp_thermal_index = 0.
415 * Boot frequency allowed SoC to get here, should work till sensor is
418 edp_cpumask = *cpu_online_mask;
422 register_hotcpu_notifier(&tegra_cpu_edp_notifier);
423 pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000);
428 for (i=0; i<cpu_edp_limits_size-1; i++) {
429 edp_cdev = thermal_cooling_device_register(
431 &edp_cooling_devices[i],
432 &tegra_edp_cooling_ops);
438 static void tegra_cpu_edp_exit(void)
440 if (!(cpu_edp_limits || system_edp_limits))
443 unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
446 #ifdef CONFIG_DEBUG_FS
448 static int system_edp_alarm_get(void *data, u64 *val)
450 *val = (u64)system_edp_alarm;
453 static int system_edp_alarm_set(void *data, u64 val)
455 if (val > 1) { /* emulate emergency throttling */
456 tegra_edp_throttle_cpu_now(val);
459 return tegra_system_edp_alarm((bool)val);
461 DEFINE_SIMPLE_ATTRIBUTE(system_edp_alarm_fops,
462 system_edp_alarm_get, system_edp_alarm_set, "%llu\n");
464 static int __init tegra_edp_debug_init(struct dentry *cpu_tegra_debugfs_root)
466 if (!debugfs_create_file("edp_alarm", 0644, cpu_tegra_debugfs_root,
467 NULL, &system_edp_alarm_fops))
474 #else /* CONFIG_TEGRA_EDP_LIMITS */
475 #define edp_governor_speed(requested_speed) (requested_speed)
476 #define tegra_cpu_edp_init(resume)
477 #define tegra_cpu_edp_exit()
478 #define tegra_edp_debug_init(cpu_tegra_debugfs_root) (0)
479 #endif /* CONFIG_TEGRA_EDP_LIMITS */
481 #ifdef CONFIG_DEBUG_FS
483 static struct dentry *cpu_tegra_debugfs_root;
485 static int __init tegra_cpu_debug_init(void)
487 cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
489 if (!cpu_tegra_debugfs_root)
492 if (tegra_edp_debug_init(cpu_tegra_debugfs_root))
498 debugfs_remove_recursive(cpu_tegra_debugfs_root);
502 static void __exit tegra_cpu_debug_exit(void)
504 debugfs_remove_recursive(cpu_tegra_debugfs_root);
507 late_initcall(tegra_cpu_debug_init);
508 module_exit(tegra_cpu_debug_exit);
509 #endif /* CONFIG_DEBUG_FS */
511 static int tegra_verify_speed(struct cpufreq_policy *policy)
513 return cpufreq_frequency_table_verify(policy, freq_table);
516 unsigned int tegra_getspeed(unsigned int cpu)
520 if (cpu >= CONFIG_NR_CPUS)
523 rate = clk_get_rate(cpu_clk) / 1000;
527 int tegra_update_cpu_speed(unsigned long rate)
530 struct cpufreq_freqs freqs;
532 freqs.old = tegra_getspeed(0);
535 rate = clk_round_rate(cpu_clk, rate * 1000);
536 if (!IS_ERR_VALUE(rate))
537 freqs.new = rate / 1000;
539 if (freqs.old == freqs.new)
543 * Vote on memory bus frequency based on cpu frequency
544 * This sets the minimum frequency, display or avp may request higher
546 if (freqs.old < freqs.new) {
547 ret = tegra_update_mselect_rate(freqs.new);
549 pr_err("cpu-tegra: Failed to scale mselect for cpu"
550 " frequency %u kHz\n", freqs.new);
553 ret = clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
555 pr_err("cpu-tegra: Failed to scale emc for cpu"
556 " frequency %u kHz\n", freqs.new);
561 for_each_online_cpu(freqs.cpu)
562 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
564 #ifdef CONFIG_CPU_FREQ_DEBUG
565 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
566 freqs.old, freqs.new);
569 ret = clk_set_rate(cpu_clk, freqs.new * 1000);
571 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
576 for_each_online_cpu(freqs.cpu)
577 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
579 if (freqs.old > freqs.new) {
580 clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
581 tegra_update_mselect_rate(freqs.new);
587 unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
589 unsigned int cnt = 0;
592 for_each_online_cpu(i)
593 if (target_cpu_speed[i] <= speed_limit)
598 unsigned int tegra_get_slowest_cpu_n(void) {
599 unsigned int cpu = nr_cpu_ids;
600 unsigned long rate = ULONG_MAX;
603 for_each_online_cpu(i)
604 if ((i > 0) && (rate > target_cpu_speed[i])) {
606 rate = target_cpu_speed[i];
611 unsigned long tegra_cpu_lowest_speed(void) {
612 unsigned long rate = ULONG_MAX;
615 for_each_online_cpu(i)
616 rate = min(rate, target_cpu_speed[i]);
620 unsigned long tegra_cpu_highest_speed(void) {
621 unsigned long policy_max = ULONG_MAX;
622 unsigned long rate = 0;
625 for_each_online_cpu(i) {
626 if (force_policy_max)
627 policy_max = min(policy_max, policy_max_speed[i]);
628 rate = max(rate, target_cpu_speed[i]);
630 rate = min(rate, policy_max);
634 int tegra_cpu_set_speed_cap(unsigned int *speed_cap)
637 unsigned int new_speed = tegra_cpu_highest_speed();
639 #ifdef CONFIG_TEGRA_EDP_LIMITS
646 new_speed = tegra_throttle_governor_speed(new_speed);
647 new_speed = edp_governor_speed(new_speed);
648 new_speed = user_cap_speed(new_speed);
650 *speed_cap = new_speed;
652 ret = tegra_update_cpu_speed(new_speed);
654 tegra_auto_hotplug_governor(new_speed, false);
658 int tegra_suspended_target(unsigned int target_freq)
660 unsigned int new_speed = target_freq;
665 /* apply only "hard" caps */
666 new_speed = tegra_throttle_governor_speed(new_speed);
667 new_speed = edp_governor_speed(new_speed);
669 return tegra_update_cpu_speed(new_speed);
672 static int tegra_target(struct cpufreq_policy *policy,
673 unsigned int target_freq,
674 unsigned int relation)
678 unsigned int new_speed;
681 mutex_lock(&tegra_cpu_lock);
683 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
688 freq = freq_table[idx].frequency;
690 target_cpu_speed[policy->cpu] = freq;
691 ret = tegra_cpu_set_speed_cap(&new_speed);
693 mutex_unlock(&tegra_cpu_lock);
699 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
702 mutex_lock(&tegra_cpu_lock);
703 if (event == PM_SUSPEND_PREPARE) {
705 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
706 freq_table[suspend_index].frequency);
707 tegra_update_cpu_speed(freq_table[suspend_index].frequency);
708 tegra_auto_hotplug_governor(
709 freq_table[suspend_index].frequency, true);
710 } else if (event == PM_POST_SUSPEND) {
712 is_suspended = false;
713 tegra_cpu_edp_init(true);
714 tegra_cpu_set_speed_cap(&freq);
715 pr_info("Tegra cpufreq resume: restoring frequency to %d kHz\n",
718 mutex_unlock(&tegra_cpu_lock);
723 static struct notifier_block tegra_cpu_pm_notifier = {
724 .notifier_call = tegra_pm_notify,
727 static int tegra_cpu_init(struct cpufreq_policy *policy)
729 if (policy->cpu >= CONFIG_NR_CPUS)
732 cpu_clk = clk_get_sys(NULL, "cpu");
734 return PTR_ERR(cpu_clk);
736 emc_clk = clk_get_sys("cpu", "emc");
737 if (IS_ERR(emc_clk)) {
739 return PTR_ERR(emc_clk);
745 cpufreq_frequency_table_cpuinfo(policy, freq_table);
746 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
747 policy->cur = tegra_getspeed(policy->cpu);
748 target_cpu_speed[policy->cpu] = policy->cur;
750 /* FIXME: what's the actual transition time? */
751 policy->cpuinfo.transition_latency = 300 * 1000;
753 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
754 cpumask_copy(policy->related_cpus, cpu_possible_mask);
756 if (policy->cpu == 0) {
757 register_pm_notifier(&tegra_cpu_pm_notifier);
763 static int tegra_cpu_exit(struct cpufreq_policy *policy)
765 cpufreq_frequency_table_cpuinfo(policy, freq_table);
766 clk_disable(emc_clk);
772 static int tegra_cpufreq_policy_notifier(
773 struct notifier_block *nb, unsigned long event, void *data)
776 struct cpufreq_policy *policy = data;
778 if (event == CPUFREQ_NOTIFY) {
779 ret = cpufreq_frequency_table_target(policy, freq_table,
780 policy->max, CPUFREQ_RELATION_H, &i);
781 policy_max_speed[policy->cpu] =
782 ret ? policy->max : freq_table[i].frequency;
787 static struct notifier_block tegra_cpufreq_policy_nb = {
788 .notifier_call = tegra_cpufreq_policy_notifier,
791 static struct freq_attr *tegra_cpufreq_attr[] = {
792 &cpufreq_freq_attr_scaling_available_freqs,
793 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
799 static struct cpufreq_driver tegra_cpufreq_driver = {
800 .verify = tegra_verify_speed,
801 .target = tegra_target,
802 .get = tegra_getspeed,
803 .init = tegra_cpu_init,
804 .exit = tegra_cpu_exit,
806 .attr = tegra_cpufreq_attr,
809 static int __init tegra_cpufreq_init(void)
813 struct tegra_cpufreq_table_data *table_data =
814 tegra_cpufreq_table_get();
815 if (IS_ERR_OR_NULL(table_data))
818 suspend_index = table_data->suspend_index;
820 ret = tegra_throttle_init(&tegra_cpu_lock);
824 ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
828 freq_table = table_data->freq_table;
829 tegra_cpu_edp_init(false);
831 ret = cpufreq_register_notifier(
832 &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
836 return cpufreq_register_driver(&tegra_cpufreq_driver);
839 static void __exit tegra_cpufreq_exit(void)
841 tegra_throttle_exit();
842 tegra_cpu_edp_exit();
843 tegra_auto_hotplug_exit();
844 cpufreq_unregister_driver(&tegra_cpufreq_driver);
845 cpufreq_unregister_notifier(
846 &tegra_cpufreq_policy_nb, CPUFREQ_POLICY_NOTIFIER);
850 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
851 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
852 MODULE_LICENSE("GPL");
853 module_init(tegra_cpufreq_init);
854 module_exit(tegra_cpufreq_exit);