5ffb8549af5156fbb8668141dacf80c6654141ed
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * Author:
7  *      Colin Cross <ccross@google.com>
8  *      Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9  *
10  * This software is licensed under the terms of the GNU General Public
11  * License version 2, as published by the Free Software Foundation, and
12  * may be copied, distributed, and modified under those terms.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/sched.h>
25 #include <linux/cpufreq.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/err.h>
29 #include <linux/clk.h>
30 #include <linux/io.h>
31 #include <linux/suspend.h>
32 #include <linux/debugfs.h>
33
34 #include <asm/system.h>
35
36 #include <mach/clk.h>
37
38 #include "clock.h"
39
40 static struct cpufreq_frequency_table *freq_table;
41
42 #define NUM_CPUS        2
43
44 static struct clk *cpu_clk;
45 static struct clk *emc_clk;
46
47 static unsigned long target_cpu_speed[NUM_CPUS];
48 static DEFINE_MUTEX(tegra_cpu_lock);
49 static bool is_suspended;
50
51 unsigned int tegra_getspeed(unsigned int cpu);
52 static int tegra_update_cpu_speed(unsigned long rate);
53 static unsigned long tegra_cpu_highest_speed(void);
54
55 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
56 /* CPU frequency is gradually lowered when throttling is enabled */
57 #define THROTTLE_DELAY          msecs_to_jiffies(2000)
58
59 static bool is_throttling;
60 static int throttle_lowest_index;
61 static int throttle_highest_index;
62 static int throttle_index;
63 static int throttle_next_index;
64 static struct delayed_work throttle_work;
65 static struct workqueue_struct *workqueue;
66
67 #define tegra_cpu_is_throttling() (is_throttling)
68
69 static void tegra_throttle_work_func(struct work_struct *work)
70 {
71         unsigned int current_freq;
72
73         mutex_lock(&tegra_cpu_lock);
74         current_freq = tegra_getspeed(0);
75         throttle_index = throttle_next_index;
76
77         if (freq_table[throttle_index].frequency < current_freq)
78                 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
79
80         if (throttle_index > throttle_lowest_index) {
81                 throttle_next_index = throttle_index - 1;
82                 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
83         }
84
85         mutex_unlock(&tegra_cpu_lock);
86 }
87
88 /*
89  * tegra_throttling_enable
90  * This function may sleep
91  */
92 void tegra_throttling_enable(bool enable)
93 {
94         mutex_lock(&tegra_cpu_lock);
95
96         if (enable && !is_throttling) {
97                 unsigned int current_freq = tegra_getspeed(0);
98
99                 is_throttling = true;
100
101                 for (throttle_index = throttle_highest_index;
102                      throttle_index >= throttle_lowest_index;
103                      throttle_index--)
104                         if (freq_table[throttle_index].frequency
105                             < current_freq)
106                                 break;
107
108                 throttle_index = max(throttle_index, throttle_lowest_index);
109                 throttle_next_index = throttle_index;
110                 queue_delayed_work(workqueue, &throttle_work, 0);
111         } else if (!enable && is_throttling) {
112                 cancel_delayed_work_sync(&throttle_work);
113                 is_throttling = false;
114                 /* restore speed requested by governor */
115                 tegra_update_cpu_speed(tegra_cpu_highest_speed());
116         }
117
118         mutex_unlock(&tegra_cpu_lock);
119 }
120 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
121
122 static unsigned int throttle_governor_speed(unsigned int requested_speed)
123 {
124         return tegra_cpu_is_throttling() ?
125                 min(requested_speed, freq_table[throttle_index].frequency) :
126                 requested_speed;
127 }
128
129 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
130 {
131         return sprintf(buf, "%u\n", is_throttling);
132 }
133
134 cpufreq_freq_attr_ro(throttle);
135
136 #ifdef CONFIG_DEBUG_FS
137 static int throttle_debug_set(void *data, u64 val)
138 {
139         tegra_throttling_enable(val);
140         return 0;
141 }
142 static int throttle_debug_get(void *data, u64 *val)
143 {
144         *val = (u64) is_throttling;
145         return 0;
146 }
147
148 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
149
150 static struct dentry *cpu_tegra_debugfs_root;
151
152 static int __init tegra_cpu_debug_init(void)
153 {
154         cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
155
156         if (!cpu_tegra_debugfs_root)
157                 return -ENOMEM;
158
159         if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
160                 goto err_out;
161
162         return 0;
163
164 err_out:
165         debugfs_remove_recursive(cpu_tegra_debugfs_root);
166         return -ENOMEM;
167
168 }
169
170 static void __exit tegra_cpu_debug_exit(void)
171 {
172         debugfs_remove_recursive(cpu_tegra_debugfs_root);
173 }
174
175 late_initcall(tegra_cpu_debug_init);
176 module_exit(tegra_cpu_debug_exit);
177 #endif /* CONFIG_DEBUG_FS */
178
179 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
180 #define tegra_cpu_is_throttling() (0)
181 #define throttle_governor_speed(requested_speed) (requested_speed)
182
183 void tegra_throttling_enable(bool enable)
184 {
185 }
186 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
187
188 int tegra_verify_speed(struct cpufreq_policy *policy)
189 {
190         return cpufreq_frequency_table_verify(policy, freq_table);
191 }
192
193 unsigned int tegra_getspeed(unsigned int cpu)
194 {
195         unsigned long rate;
196
197         if (cpu >= NUM_CPUS)
198                 return 0;
199
200         rate = clk_get_rate(cpu_clk) / 1000;
201         return rate;
202 }
203
204 static int tegra_update_cpu_speed(unsigned long rate)
205 {
206         int ret = 0;
207         struct cpufreq_freqs freqs;
208
209         freqs.old = tegra_getspeed(0);
210         freqs.new = rate;
211
212         if (freqs.old == freqs.new)
213                 return ret;
214
215         /*
216          * Vote on memory bus frequency based on cpu frequency
217          * This sets the minimum frequency, display or avp may request higher
218          */
219         if (rate >= 816000)
220                 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
221         else if (rate >= 608000)
222                 clk_set_rate(emc_clk, 300000000); /* cpu 608 MHz, emc 150Mhz */
223         else if (rate >= 456000)
224                 clk_set_rate(emc_clk, 150000000); /* cpu 456 MHz, emc 75Mhz */
225         else if (rate >= 312000)
226                 clk_set_rate(emc_clk, 100000000); /* cpu 312 MHz, emc 50Mhz */
227         else
228                 clk_set_rate(emc_clk, 50000000);  /* emc 25Mhz */
229
230         for_each_online_cpu(freqs.cpu)
231                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
232
233 #ifdef CONFIG_CPU_FREQ_DEBUG
234         printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
235                freqs.old, freqs.new);
236 #endif
237
238         ret = clk_set_rate(cpu_clk, freqs.new * 1000);
239         if (ret) {
240                 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
241                         freqs.new);
242                 return ret;
243         }
244
245         for_each_online_cpu(freqs.cpu)
246                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
247
248         return 0;
249 }
250
251 static unsigned long tegra_cpu_highest_speed(void)
252 {
253         unsigned long rate = 0;
254         int i;
255
256         for_each_online_cpu(i)
257                 rate = max(rate, target_cpu_speed[i]);
258         return rate;
259 }
260
261 static int tegra_target(struct cpufreq_policy *policy,
262                        unsigned int target_freq,
263                        unsigned int relation)
264 {
265         int idx;
266         unsigned int freq;
267         unsigned int new_speed;
268         int ret = 0;
269
270         mutex_lock(&tegra_cpu_lock);
271
272         if (is_suspended) {
273                 ret = -EBUSY;
274                 goto out;
275         }
276
277         cpufreq_frequency_table_target(policy, freq_table, target_freq,
278                 relation, &idx);
279
280         freq = freq_table[idx].frequency;
281
282         target_cpu_speed[policy->cpu] = freq;
283         new_speed = throttle_governor_speed(tegra_cpu_highest_speed());
284         ret = tegra_update_cpu_speed(new_speed);
285 out:
286         mutex_unlock(&tegra_cpu_lock);
287         return ret;
288 }
289
290
291 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
292         void *dummy)
293 {
294         mutex_lock(&tegra_cpu_lock);
295         if (event == PM_SUSPEND_PREPARE) {
296                 is_suspended = true;
297                 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
298                         freq_table[0].frequency);
299                 tegra_update_cpu_speed(freq_table[0].frequency);
300         } else if (event == PM_POST_SUSPEND) {
301                 is_suspended = false;
302         }
303         mutex_unlock(&tegra_cpu_lock);
304
305         return NOTIFY_OK;
306 }
307
308 static struct notifier_block tegra_cpu_pm_notifier = {
309         .notifier_call = tegra_pm_notify,
310 };
311
312 static int tegra_cpu_init(struct cpufreq_policy *policy)
313 {
314         if (policy->cpu >= NUM_CPUS)
315                 return -EINVAL;
316
317         cpu_clk = clk_get_sys(NULL, "cpu");
318         if (IS_ERR(cpu_clk))
319                 return PTR_ERR(cpu_clk);
320
321         emc_clk = clk_get_sys("cpu", "emc");
322         if (IS_ERR(emc_clk)) {
323                 clk_put(cpu_clk);
324                 return PTR_ERR(emc_clk);
325         }
326
327         clk_enable(emc_clk);
328         clk_enable(cpu_clk);
329
330         cpufreq_frequency_table_cpuinfo(policy, freq_table);
331         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
332         policy->cur = tegra_getspeed(policy->cpu);
333         target_cpu_speed[policy->cpu] = policy->cur;
334
335         /* FIXME: what's the actual transition time? */
336         policy->cpuinfo.transition_latency = 300 * 1000;
337
338         policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
339         cpumask_copy(policy->related_cpus, cpu_possible_mask);
340
341         if (policy->cpu == 0) {
342                 register_pm_notifier(&tegra_cpu_pm_notifier);
343         }
344
345         return 0;
346 }
347
348 static int tegra_cpu_exit(struct cpufreq_policy *policy)
349 {
350         cpufreq_frequency_table_cpuinfo(policy, freq_table);
351         clk_disable(emc_clk);
352         clk_put(emc_clk);
353         clk_put(cpu_clk);
354         return 0;
355 }
356
357 static struct freq_attr *tegra_cpufreq_attr[] = {
358         &cpufreq_freq_attr_scaling_available_freqs,
359 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
360         &throttle,
361 #endif
362         NULL,
363 };
364
365 static struct cpufreq_driver tegra_cpufreq_driver = {
366         .verify         = tegra_verify_speed,
367         .target         = tegra_target,
368         .get            = tegra_getspeed,
369         .init           = tegra_cpu_init,
370         .exit           = tegra_cpu_exit,
371         .name           = "tegra",
372         .attr           = tegra_cpufreq_attr,
373 };
374
375 static int __init tegra_cpufreq_init(void)
376 {
377         struct tegra_cpufreq_table_data *table_data =
378                 tegra_cpufreq_table_get();
379         BUG_ON(!table_data);
380
381 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
382         /*
383          * High-priority, others flags default: not bound to a specific
384          * CPU, has rescue worker task (in case of allocation deadlock,
385          * etc.).  Single-threaded.
386          */
387         workqueue = alloc_workqueue("cpu-tegra",
388                                     WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
389         if (!workqueue)
390                 return -ENOMEM;
391         INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
392
393         throttle_lowest_index = table_data->throttle_lowest_index;
394         throttle_highest_index = table_data->throttle_highest_index;
395 #endif
396         freq_table = table_data->freq_table;
397         return cpufreq_register_driver(&tegra_cpufreq_driver);
398 }
399
400 static void __exit tegra_cpufreq_exit(void)
401 {
402 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
403         destroy_workqueue(workqueue);
404 #endif
405         cpufreq_unregister_driver(&tegra_cpufreq_driver);
406 }
407
408
409 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
410 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
411 MODULE_LICENSE("GPL");
412 module_init(tegra_cpufreq_init);
413 module_exit(tegra_cpufreq_exit);