[ARM/tegra] Add Tegra3 support
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * Author:
7  *      Colin Cross <ccross@google.com>
8  *      Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/sched.h>
25 #include <linux/cpufreq.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/err.h>
29 #include <linux/clk.h>
30 #include <linux/io.h>
31 #include <linux/suspend.h>
32 #include <linux/debugfs.h>
33
34 #include <asm/system.h>
35
36 #include <mach/clk.h>
37
38 #include "clock.h"
39
40 static struct cpufreq_frequency_table *freq_table;
41
42
43 static struct clk *cpu_clk;
44 static struct clk *emc_clk;
45
46 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
47 static DEFINE_MUTEX(tegra_cpu_lock);
48 static bool is_suspended;
49
50 unsigned int tegra_getspeed(unsigned int cpu);
51 static int tegra_update_cpu_speed(unsigned long rate);
52 static unsigned long tegra_cpu_highest_speed(void);
53
54 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
55 /* CPU frequency is gradually lowered when throttling is enabled */
56 #define THROTTLE_DELAY          msecs_to_jiffies(2000)
57
58 static bool is_throttling;
59 static int throttle_lowest_index;
60 static int throttle_highest_index;
61 static int throttle_index;
62 static int throttle_next_index;
63 static struct delayed_work throttle_work;
64 static struct workqueue_struct *workqueue;
65
66 #define tegra_cpu_is_throttling() (is_throttling)
67
68 static void tegra_throttle_work_func(struct work_struct *work)
69 {
70         unsigned int current_freq;
71
72         mutex_lock(&tegra_cpu_lock);
73         current_freq = tegra_getspeed(0);
74         throttle_index = throttle_next_index;
75
76         if (freq_table[throttle_index].frequency < current_freq)
77                 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
78
79         if (throttle_index > throttle_lowest_index) {
80                 throttle_next_index = throttle_index - 1;
81                 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
82         }
83
84         mutex_unlock(&tegra_cpu_lock);
85 }
86
87 /*
88  * tegra_throttling_enable
89  * This function may sleep
90  */
91 void tegra_throttling_enable(bool enable)
92 {
93         mutex_lock(&tegra_cpu_lock);
94
95         if (enable && !is_throttling) {
96                 unsigned int current_freq = tegra_getspeed(0);
97
98                 is_throttling = true;
99
100                 for (throttle_index = throttle_highest_index;
101                      throttle_index >= throttle_lowest_index;
102                      throttle_index--)
103                         if (freq_table[throttle_index].frequency
104                             < current_freq)
105                                 break;
106
107                 throttle_index = max(throttle_index, throttle_lowest_index);
108                 throttle_next_index = throttle_index;
109                 queue_delayed_work(workqueue, &throttle_work, 0);
110         } else if (!enable && is_throttling) {
111                 cancel_delayed_work_sync(&throttle_work);
112                 is_throttling = false;
113                 /* restore speed requested by governor */
114                 tegra_update_cpu_speed(tegra_cpu_highest_speed());
115         }
116
117         mutex_unlock(&tegra_cpu_lock);
118 }
119 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
120
121 static unsigned int throttle_governor_speed(unsigned int requested_speed)
122 {
123         return tegra_cpu_is_throttling() ?
124                 min(requested_speed, freq_table[throttle_index].frequency) :
125                 requested_speed;
126 }
127
128 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
129 {
130         return sprintf(buf, "%u\n", is_throttling);
131 }
132
133 cpufreq_freq_attr_ro(throttle);
134
135 #ifdef CONFIG_DEBUG_FS
136 static int throttle_debug_set(void *data, u64 val)
137 {
138         tegra_throttling_enable(val);
139         return 0;
140 }
141 static int throttle_debug_get(void *data, u64 *val)
142 {
143         *val = (u64) is_throttling;
144         return 0;
145 }
146
147 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
148
149 static struct dentry *cpu_tegra_debugfs_root;
150
151 static int __init tegra_cpu_debug_init(void)
152 {
153         cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
154
155         if (!cpu_tegra_debugfs_root)
156                 return -ENOMEM;
157
158         if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
159                 goto err_out;
160
161         return 0;
162
163 err_out:
164         debugfs_remove_recursive(cpu_tegra_debugfs_root);
165         return -ENOMEM;
166
167 }
168
169 static void __exit tegra_cpu_debug_exit(void)
170 {
171         debugfs_remove_recursive(cpu_tegra_debugfs_root);
172 }
173
174 late_initcall(tegra_cpu_debug_init);
175 module_exit(tegra_cpu_debug_exit);
176 #endif /* CONFIG_DEBUG_FS */
177
178 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
179 #define tegra_cpu_is_throttling() (0)
180 #define throttle_governor_speed(requested_speed) (requested_speed)
181
182 void tegra_throttling_enable(bool enable)
183 {
184 }
185 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
186
187 int tegra_verify_speed(struct cpufreq_policy *policy)
188 {
189         return cpufreq_frequency_table_verify(policy, freq_table);
190 }
191
192 unsigned int tegra_getspeed(unsigned int cpu)
193 {
194         unsigned long rate;
195
196         if (cpu >= CONFIG_NR_CPUS)
197                 return 0;
198
199         rate = clk_get_rate(cpu_clk) / 1000;
200         return rate;
201 }
202
203 static int tegra_update_cpu_speed(unsigned long rate)
204 {
205         int ret = 0;
206         struct cpufreq_freqs freqs;
207
208         freqs.old = tegra_getspeed(0);
209         freqs.new = rate;
210
211         if (freqs.old == freqs.new)
212                 return ret;
213
214         /*
215          * Vote on memory bus frequency based on cpu frequency
216          * This sets the minimum frequency, display or avp may request higher
217          */
218         if (rate >= 816000)
219                 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
220         else if (rate >= 608000)
221                 clk_set_rate(emc_clk, 300000000); /* cpu 608 MHz, emc 150Mhz */
222         else if (rate >= 456000)
223                 clk_set_rate(emc_clk, 150000000); /* cpu 456 MHz, emc 75Mhz */
224         else if (rate >= 312000)
225                 clk_set_rate(emc_clk, 100000000); /* cpu 312 MHz, emc 50Mhz */
226         else
227                 clk_set_rate(emc_clk, 50000000);  /* emc 25Mhz */
228
229         for_each_online_cpu(freqs.cpu)
230                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
231
232 #ifdef CONFIG_CPU_FREQ_DEBUG
233         printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
234                freqs.old, freqs.new);
235 #endif
236
237         ret = clk_set_rate(cpu_clk, freqs.new * 1000);
238         if (ret) {
239                 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
240                         freqs.new);
241                 return ret;
242         }
243
244         for_each_online_cpu(freqs.cpu)
245                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
246
247         return 0;
248 }
249
250 static unsigned long tegra_cpu_highest_speed(void)
251 {
252         unsigned long rate = 0;
253         int i;
254
255         for_each_online_cpu(i)
256                 rate = max(rate, target_cpu_speed[i]);
257         return rate;
258 }
259
260 static int tegra_target(struct cpufreq_policy *policy,
261                        unsigned int target_freq,
262                        unsigned int relation)
263 {
264         int idx;
265         unsigned int freq;
266         unsigned int new_speed;
267         int ret = 0;
268
269         mutex_lock(&tegra_cpu_lock);
270
271         if (is_suspended) {
272                 ret = -EBUSY;
273                 goto out;
274         }
275
276         cpufreq_frequency_table_target(policy, freq_table, target_freq,
277                 relation, &idx);
278
279         freq = freq_table[idx].frequency;
280
281         target_cpu_speed[policy->cpu] = freq;
282         new_speed = throttle_governor_speed(tegra_cpu_highest_speed());
283         ret = tegra_update_cpu_speed(new_speed);
284 out:
285         mutex_unlock(&tegra_cpu_lock);
286         return ret;
287 }
288
289
290 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
291         void *dummy)
292 {
293         mutex_lock(&tegra_cpu_lock);
294         if (event == PM_SUSPEND_PREPARE) {
295                 is_suspended = true;
296                 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
297                         freq_table[0].frequency);
298                 tegra_update_cpu_speed(freq_table[0].frequency);
299         } else if (event == PM_POST_SUSPEND) {
300                 is_suspended = false;
301         }
302         mutex_unlock(&tegra_cpu_lock);
303
304         return NOTIFY_OK;
305 }
306
307 static struct notifier_block tegra_cpu_pm_notifier = {
308         .notifier_call = tegra_pm_notify,
309 };
310
311 static int tegra_cpu_init(struct cpufreq_policy *policy)
312 {
313         if (policy->cpu >= CONFIG_NR_CPUS)
314                 return -EINVAL;
315
316         cpu_clk = clk_get_sys(NULL, "cpu");
317         if (IS_ERR(cpu_clk))
318                 return PTR_ERR(cpu_clk);
319
320         emc_clk = clk_get_sys("cpu", "emc");
321         if (IS_ERR(emc_clk)) {
322                 clk_put(cpu_clk);
323                 return PTR_ERR(emc_clk);
324         }
325
326         clk_enable(emc_clk);
327         clk_enable(cpu_clk);
328
329         cpufreq_frequency_table_cpuinfo(policy, freq_table);
330         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
331         policy->cur = tegra_getspeed(policy->cpu);
332         target_cpu_speed[policy->cpu] = policy->cur;
333
334         /* FIXME: what's the actual transition time? */
335         policy->cpuinfo.transition_latency = 300 * 1000;
336
337         policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
338         cpumask_copy(policy->related_cpus, cpu_possible_mask);
339
340         if (policy->cpu == 0) {
341                 register_pm_notifier(&tegra_cpu_pm_notifier);
342         }
343
344         return 0;
345 }
346
347 static int tegra_cpu_exit(struct cpufreq_policy *policy)
348 {
349         cpufreq_frequency_table_cpuinfo(policy, freq_table);
350         clk_disable(emc_clk);
351         clk_put(emc_clk);
352         clk_put(cpu_clk);
353         return 0;
354 }
355
356 static struct freq_attr *tegra_cpufreq_attr[] = {
357         &cpufreq_freq_attr_scaling_available_freqs,
358 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
359         &throttle,
360 #endif
361         NULL,
362 };
363
364 static struct cpufreq_driver tegra_cpufreq_driver = {
365         .verify         = tegra_verify_speed,
366         .target         = tegra_target,
367         .get            = tegra_getspeed,
368         .init           = tegra_cpu_init,
369         .exit           = tegra_cpu_exit,
370         .name           = "tegra",
371         .attr           = tegra_cpufreq_attr,
372 };
373
374 static int __init tegra_cpufreq_init(void)
375 {
376         struct tegra_cpufreq_table_data *table_data =
377                 tegra_cpufreq_table_get();
378         BUG_ON(!table_data);
379
380 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
381         /*
382          * High-priority, others flags default: not bound to a specific
383          * CPU, has rescue worker task (in case of allocation deadlock,
384          * etc.).  Single-threaded.
385          */
386         workqueue = alloc_workqueue("cpu-tegra",
387                                     WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
388         if (!workqueue)
389                 return -ENOMEM;
390         INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
391
392         throttle_lowest_index = table_data->throttle_lowest_index;
393         throttle_highest_index = table_data->throttle_highest_index;
394 #endif
395         freq_table = table_data->freq_table;
396         return cpufreq_register_driver(&tegra_cpufreq_driver);
397 }
398
399 static void __exit tegra_cpufreq_exit(void)
400 {
401 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
402         destroy_workqueue(workqueue);
403 #endif
404         cpufreq_unregister_driver(&tegra_cpufreq_driver);
405 }
406
407
408 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
409 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
410 MODULE_LICENSE("GPL");
411 module_init(tegra_cpufreq_init);
412 module_exit(tegra_cpufreq_exit);