48386d0129e04f5b93c51eddec98a1e1d9085bde
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * Author:
7  *      Colin Cross <ccross@google.com>
8  *      Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/sched.h>
25 #include <linux/cpufreq.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/err.h>
29 #include <linux/clk.h>
30 #include <linux/io.h>
31 #include <linux/suspend.h>
32 #include <linux/debugfs.h>
33
34 #include <asm/system.h>
35
36 #include <mach/clk.h>
37
38 #include "clock.h"
39 #include "pm.h"
40
41 static struct cpufreq_frequency_table *freq_table;
42
43
44 static struct clk *cpu_clk;
45 static struct clk *emc_clk;
46
47 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
48 static DEFINE_MUTEX(tegra_cpu_lock);
49 static bool is_suspended;
50
51 unsigned int tegra_getspeed(unsigned int cpu);
52 static int tegra_update_cpu_speed(unsigned long rate);
53 static unsigned long tegra_cpu_highest_speed(void);
54
55 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
56 /* CPU frequency is gradually lowered when throttling is enabled */
57 #define THROTTLE_DELAY          msecs_to_jiffies(2000)
58
59 static bool is_throttling;
60 static int throttle_lowest_index;
61 static int throttle_highest_index;
62 static int throttle_index;
63 static int throttle_next_index;
64 static struct delayed_work throttle_work;
65 static struct workqueue_struct *workqueue;
66
67 #define tegra_cpu_is_throttling() (is_throttling)
68
69 static void tegra_throttle_work_func(struct work_struct *work)
70 {
71         unsigned int current_freq;
72
73         mutex_lock(&tegra_cpu_lock);
74         current_freq = tegra_getspeed(0);
75         throttle_index = throttle_next_index;
76
77         if (freq_table[throttle_index].frequency < current_freq)
78                 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
79
80         if (throttle_index > throttle_lowest_index) {
81                 throttle_next_index = throttle_index - 1;
82                 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
83         }
84
85         mutex_unlock(&tegra_cpu_lock);
86 }
87
88 /*
89  * tegra_throttling_enable
90  * This function may sleep
91  */
92 void tegra_throttling_enable(bool enable)
93 {
94         mutex_lock(&tegra_cpu_lock);
95
96         if (enable && !is_throttling) {
97                 unsigned int current_freq = tegra_getspeed(0);
98
99                 is_throttling = true;
100
101                 for (throttle_index = throttle_highest_index;
102                      throttle_index >= throttle_lowest_index;
103                      throttle_index--)
104                         if (freq_table[throttle_index].frequency
105                             < current_freq)
106                                 break;
107
108                 throttle_index = max(throttle_index, throttle_lowest_index);
109                 throttle_next_index = throttle_index;
110                 queue_delayed_work(workqueue, &throttle_work, 0);
111         } else if (!enable && is_throttling) {
112                 cancel_delayed_work_sync(&throttle_work);
113                 is_throttling = false;
114                 /* restore speed requested by governor */
115                 tegra_update_cpu_speed(tegra_cpu_highest_speed());
116         }
117
118         mutex_unlock(&tegra_cpu_lock);
119 }
120 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
121
122 static unsigned int throttle_governor_speed(unsigned int requested_speed)
123 {
124         return tegra_cpu_is_throttling() ?
125                 min(requested_speed, freq_table[throttle_index].frequency) :
126                 requested_speed;
127 }
128
129 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
130 {
131         return sprintf(buf, "%u\n", is_throttling);
132 }
133
134 cpufreq_freq_attr_ro(throttle);
135
136 #ifdef CONFIG_DEBUG_FS
137 static int throttle_debug_set(void *data, u64 val)
138 {
139         tegra_throttling_enable(val);
140         return 0;
141 }
142 static int throttle_debug_get(void *data, u64 *val)
143 {
144         *val = (u64) is_throttling;
145         return 0;
146 }
147
148 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
149
150 static struct dentry *cpu_tegra_debugfs_root;
151
152 static int __init tegra_cpu_debug_init(void)
153 {
154         cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
155
156         if (!cpu_tegra_debugfs_root)
157                 return -ENOMEM;
158
159         if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
160                 goto err_out;
161
162         return 0;
163
164 err_out:
165         debugfs_remove_recursive(cpu_tegra_debugfs_root);
166         return -ENOMEM;
167
168 }
169
170 static void __exit tegra_cpu_debug_exit(void)
171 {
172         debugfs_remove_recursive(cpu_tegra_debugfs_root);
173 }
174
175 late_initcall(tegra_cpu_debug_init);
176 module_exit(tegra_cpu_debug_exit);
177 #endif /* CONFIG_DEBUG_FS */
178
179 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
180 #define tegra_cpu_is_throttling() (0)
181 #define throttle_governor_speed(requested_speed) (requested_speed)
182
183 void tegra_throttling_enable(bool enable)
184 {
185 }
186 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
187
188 int tegra_verify_speed(struct cpufreq_policy *policy)
189 {
190         return cpufreq_frequency_table_verify(policy, freq_table);
191 }
192
193 unsigned int tegra_getspeed(unsigned int cpu)
194 {
195         unsigned long rate;
196
197         if (cpu >= CONFIG_NR_CPUS)
198                 return 0;
199
200         rate = clk_get_rate(cpu_clk) / 1000;
201         return rate;
202 }
203
204 static int tegra_update_cpu_speed(unsigned long rate)
205 {
206         int ret = 0;
207         struct cpufreq_freqs freqs;
208
209         freqs.old = tegra_getspeed(0);
210         freqs.new = rate;
211
212         if (freqs.old == freqs.new)
213                 return ret;
214
215         /*
216          * Vote on memory bus frequency based on cpu frequency
217          * This sets the minimum frequency, display or avp may request higher
218          */
219         clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(rate));
220
221         for_each_online_cpu(freqs.cpu)
222                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
223
224 #ifdef CONFIG_CPU_FREQ_DEBUG
225         printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
226                freqs.old, freqs.new);
227 #endif
228
229         ret = clk_set_rate(cpu_clk, freqs.new * 1000);
230         if (ret) {
231                 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
232                         freqs.new);
233                 return ret;
234         }
235
236         for_each_online_cpu(freqs.cpu)
237                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
238
239         return 0;
240 }
241
242 static unsigned long tegra_cpu_highest_speed(void)
243 {
244         unsigned long rate = 0;
245         int i;
246
247         for_each_online_cpu(i)
248                 rate = max(rate, target_cpu_speed[i]);
249         return rate;
250 }
251
252 static int tegra_target(struct cpufreq_policy *policy,
253                        unsigned int target_freq,
254                        unsigned int relation)
255 {
256         int idx;
257         unsigned int freq;
258         unsigned int new_speed;
259         int ret = 0;
260
261         mutex_lock(&tegra_cpu_lock);
262
263         if (is_suspended) {
264                 ret = -EBUSY;
265                 goto out;
266         }
267
268         cpufreq_frequency_table_target(policy, freq_table, target_freq,
269                 relation, &idx);
270
271         freq = freq_table[idx].frequency;
272
273         target_cpu_speed[policy->cpu] = freq;
274         new_speed = throttle_governor_speed(tegra_cpu_highest_speed());
275         ret = tegra_update_cpu_speed(new_speed);
276 out:
277         mutex_unlock(&tegra_cpu_lock);
278
279         if (ret == 0)
280                 tegra_auto_hotplug_governor(new_speed);
281
282         return ret;
283 }
284
285
286 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
287         void *dummy)
288 {
289         mutex_lock(&tegra_cpu_lock);
290         if (event == PM_SUSPEND_PREPARE) {
291                 is_suspended = true;
292                 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
293                         freq_table[0].frequency);
294                 tegra_update_cpu_speed(freq_table[0].frequency);
295         } else if (event == PM_POST_SUSPEND) {
296                 is_suspended = false;
297         }
298         mutex_unlock(&tegra_cpu_lock);
299
300         return NOTIFY_OK;
301 }
302
303 static struct notifier_block tegra_cpu_pm_notifier = {
304         .notifier_call = tegra_pm_notify,
305 };
306
307 static int tegra_cpu_init(struct cpufreq_policy *policy)
308 {
309         if (policy->cpu >= CONFIG_NR_CPUS)
310                 return -EINVAL;
311
312         cpu_clk = clk_get_sys(NULL, "cpu");
313         if (IS_ERR(cpu_clk))
314                 return PTR_ERR(cpu_clk);
315
316         emc_clk = clk_get_sys("cpu", "emc");
317         if (IS_ERR(emc_clk)) {
318                 clk_put(cpu_clk);
319                 return PTR_ERR(emc_clk);
320         }
321
322         clk_enable(emc_clk);
323         clk_enable(cpu_clk);
324
325         cpufreq_frequency_table_cpuinfo(policy, freq_table);
326         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
327         policy->cur = tegra_getspeed(policy->cpu);
328         target_cpu_speed[policy->cpu] = policy->cur;
329
330         /* FIXME: what's the actual transition time? */
331         policy->cpuinfo.transition_latency = 300 * 1000;
332
333         policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
334         cpumask_copy(policy->related_cpus, cpu_possible_mask);
335
336         if (policy->cpu == 0) {
337                 register_pm_notifier(&tegra_cpu_pm_notifier);
338         }
339
340         return 0;
341 }
342
343 static int tegra_cpu_exit(struct cpufreq_policy *policy)
344 {
345         cpufreq_frequency_table_cpuinfo(policy, freq_table);
346         clk_disable(emc_clk);
347         clk_put(emc_clk);
348         clk_put(cpu_clk);
349         return 0;
350 }
351
352 static struct freq_attr *tegra_cpufreq_attr[] = {
353         &cpufreq_freq_attr_scaling_available_freqs,
354 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
355         &throttle,
356 #endif
357         NULL,
358 };
359
360 static struct cpufreq_driver tegra_cpufreq_driver = {
361         .verify         = tegra_verify_speed,
362         .target         = tegra_target,
363         .get            = tegra_getspeed,
364         .init           = tegra_cpu_init,
365         .exit           = tegra_cpu_exit,
366         .name           = "tegra",
367         .attr           = tegra_cpufreq_attr,
368 };
369
370 static int __init tegra_cpufreq_init(void)
371 {
372         int ret = 0;
373
374         struct tegra_cpufreq_table_data *table_data =
375                 tegra_cpufreq_table_get();
376         BUG_ON(!table_data);
377
378 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
379         /*
380          * High-priority, others flags default: not bound to a specific
381          * CPU, has rescue worker task (in case of allocation deadlock,
382          * etc.).  Single-threaded.
383          */
384         workqueue = alloc_workqueue("cpu-tegra",
385                                     WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
386         if (!workqueue)
387                 return -ENOMEM;
388         INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
389
390         throttle_lowest_index = table_data->throttle_lowest_index;
391         throttle_highest_index = table_data->throttle_highest_index;
392 #endif
393         ret = tegra_auto_hotplug_init();
394         if (ret)
395                 return ret;
396
397         freq_table = table_data->freq_table;
398         return cpufreq_register_driver(&tegra_cpufreq_driver);
399 }
400
401 static void __exit tegra_cpufreq_exit(void)
402 {
403 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
404         destroy_workqueue(workqueue);
405 #endif
406         tegra_auto_hotplug_exit();
407         cpufreq_unregister_driver(&tegra_cpufreq_driver);
408 }
409
410
411 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
412 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
413 MODULE_LICENSE("GPL");
414 module_init(tegra_cpufreq_init);
415 module_exit(tegra_cpufreq_exit);