arm: tegra: Declare tegra_throttling_enable in .h
[linux-3.10.git] / arch / arm / mach-tegra / cpu-tegra.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * Author:
7  *      Colin Cross <ccross@google.com>
8  *      Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9  *
10  * Copyright (C) 2010-2011 NVIDIA Corporation
11  *
12  * This software is licensed under the terms of the GNU General Public
13  * License version 2, as published by the Free Software Foundation, and
14  * may be copied, distributed, and modified under those terms.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/clk.h>
32 #include <linux/io.h>
33 #include <linux/suspend.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpu.h>
36
37 #include <mach/edp.h>
38
39 #include "clock.h"
40 #include "pm.h"
41
42 /* tegra throttling and edp governors require frequencies in the table
43    to be in ascending order */
44 static struct cpufreq_frequency_table *freq_table;
45
46 static struct clk *cpu_clk;
47 static struct clk *emc_clk;
48
49 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
50 static DEFINE_MUTEX(tegra_cpu_lock);
51 static bool is_suspended;
52 static int suspend_index;
53
54 static unsigned int tegra_getspeed(unsigned int cpu);
55 static int tegra_update_cpu_speed(unsigned long rate);
56
57 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
58 /* CPU frequency is gradually lowered when throttling is enabled */
59 #define THROTTLE_DELAY          msecs_to_jiffies(2000)
60
61 static bool is_throttling;
62 static int throttle_lowest_index;
63 static int throttle_highest_index;
64 static int throttle_index;
65 static int throttle_next_index;
66 static struct delayed_work throttle_work;
67 static struct workqueue_struct *workqueue;
68
69 #define tegra_cpu_is_throttling() (is_throttling)
70
71 static void tegra_throttle_work_func(struct work_struct *work)
72 {
73         unsigned int current_freq;
74
75         mutex_lock(&tegra_cpu_lock);
76         current_freq = tegra_getspeed(0);
77         throttle_index = throttle_next_index;
78
79         if (freq_table[throttle_index].frequency < current_freq)
80                 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
81
82         if (throttle_index > throttle_lowest_index) {
83                 throttle_next_index = throttle_index - 1;
84                 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
85         }
86
87         mutex_unlock(&tegra_cpu_lock);
88 }
89
90 /*
91  * tegra_throttling_enable
92  * This function may sleep
93  */
94 void tegra_throttling_enable(bool enable)
95 {
96         mutex_lock(&tegra_cpu_lock);
97
98         if (enable && !is_throttling) {
99                 unsigned int current_freq = tegra_getspeed(0);
100
101                 is_throttling = true;
102
103                 for (throttle_index = throttle_highest_index;
104                      throttle_index >= throttle_lowest_index;
105                      throttle_index--)
106                         if (freq_table[throttle_index].frequency
107                             < current_freq)
108                                 break;
109
110                 throttle_index = max(throttle_index, throttle_lowest_index);
111                 throttle_next_index = throttle_index;
112                 queue_delayed_work(workqueue, &throttle_work, 0);
113         } else if (!enable && is_throttling) {
114                 cancel_delayed_work_sync(&throttle_work);
115                 is_throttling = false;
116                 /* restore speed requested by governor */
117                 tegra_cpu_cap_highest_speed(NULL);
118         }
119
120         mutex_unlock(&tegra_cpu_lock);
121 }
122 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
123
124 static unsigned int throttle_governor_speed(unsigned int requested_speed)
125 {
126         return tegra_cpu_is_throttling() ?
127                 min(requested_speed, freq_table[throttle_index].frequency) :
128                 requested_speed;
129 }
130
131 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
132 {
133         return sprintf(buf, "%u\n", is_throttling);
134 }
135
136 cpufreq_freq_attr_ro(throttle);
137
138 #ifdef CONFIG_DEBUG_FS
139 static int throttle_debug_set(void *data, u64 val)
140 {
141         tegra_throttling_enable(val);
142         return 0;
143 }
144 static int throttle_debug_get(void *data, u64 *val)
145 {
146         *val = (u64) is_throttling;
147         return 0;
148 }
149
150 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
151
152 static struct dentry *cpu_tegra_debugfs_root;
153
154 static int __init tegra_cpu_debug_init(void)
155 {
156         cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
157
158         if (!cpu_tegra_debugfs_root)
159                 return -ENOMEM;
160
161         if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
162                 goto err_out;
163
164         return 0;
165
166 err_out:
167         debugfs_remove_recursive(cpu_tegra_debugfs_root);
168         return -ENOMEM;
169
170 }
171
172 static void __exit tegra_cpu_debug_exit(void)
173 {
174         debugfs_remove_recursive(cpu_tegra_debugfs_root);
175 }
176
177 late_initcall(tegra_cpu_debug_init);
178 module_exit(tegra_cpu_debug_exit);
179 #endif /* CONFIG_DEBUG_FS */
180
181 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
182 #define tegra_cpu_is_throttling() (0)
183 #define throttle_governor_speed(requested_speed) (requested_speed)
184 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
185
186 #ifdef CONFIG_TEGRA_EDP_LIMITS
187
188 static const struct tegra_edp_limits *cpu_edp_limits;
189 static int cpu_edp_limits_size;
190 static int edp_thermal_index;
191 static cpumask_t edp_cpumask;
192 static unsigned int edp_limit;
193
194 static void edp_update_limit(void)
195 {
196         int i;
197         unsigned int limit;
198
199         if (!cpu_edp_limits)
200                 return;
201
202         limit = cpu_edp_limits[edp_thermal_index].freq_limits[
203                         cpumask_weight(&edp_cpumask) - 1];
204
205         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
206                 if (freq_table[i].frequency > limit) {
207                         break;
208                 }
209         }
210         BUG_ON(i == 0); /* min freq above the limit or table empty */
211         edp_limit = freq_table[i-1].frequency;
212 }
213
214 static unsigned int edp_governor_speed(unsigned int requested_speed)
215 {
216         if ((!cpu_edp_limits) || (requested_speed <= edp_limit))
217                 return requested_speed;
218         else
219                 return edp_limit;
220 }
221
222 int tegra_edp_update_thermal_zone(int temperature)
223 {
224         int i;
225         int ret = 0;
226         int nlimits = cpu_edp_limits_size;
227         int index;
228
229         if (!cpu_edp_limits)
230                 return -EINVAL;
231
232         index = nlimits - 1;
233
234         if (temperature < cpu_edp_limits[0].temperature) {
235                 index = 0;
236         } else {
237                 for (i = 0; i < (nlimits - 1); i++) {
238                         if (temperature >= cpu_edp_limits[i].temperature &&
239                            temperature < cpu_edp_limits[i + 1].temperature) {
240                                 index = i + 1;
241                                 break;
242                         }
243                 }
244         }
245
246         mutex_lock(&tegra_cpu_lock);
247
248         edp_thermal_index = index;
249         edp_update_limit();
250         ret = tegra_cpu_cap_highest_speed(NULL);
251         if (ret)
252                 pr_err("%s: update cpu speed fail(%d)", __func__, ret);
253
254         mutex_unlock(&tegra_cpu_lock);
255
256         return ret;
257 }
258 EXPORT_SYMBOL_GPL(tegra_edp_update_thermal_zone);
259
260 static int tegra_cpu_edp_notify(
261         struct notifier_block *nb, unsigned long event, void *hcpu)
262 {
263         int ret = 0;
264         unsigned int cpu_speed, new_speed;
265         int cpu = (long)hcpu;
266
267         switch (event) {
268         case CPU_UP_PREPARE:
269                 mutex_lock(&tegra_cpu_lock);
270                 cpu_set(cpu, edp_cpumask);
271                 edp_update_limit();
272
273                 cpu_speed = tegra_getspeed(0);
274                 new_speed = edp_governor_speed(cpu_speed);
275                 if (new_speed < cpu_speed) {
276                         ret = tegra_update_cpu_speed(new_speed);
277                         if (ret) {
278                                 cpu_clear(cpu, edp_cpumask);
279                                 edp_update_limit();
280                         }
281
282                         printk(KERN_DEBUG "tegra CPU:%sforce EDP limit %u kHz"
283                                 "\n", ret ? " failed to " : " ", new_speed);
284                 }
285                 mutex_unlock(&tegra_cpu_lock);
286                 break;
287         case CPU_DEAD:
288                 mutex_lock(&tegra_cpu_lock);
289                 cpu_clear(cpu, edp_cpumask);
290                 edp_update_limit();
291                 mutex_unlock(&tegra_cpu_lock);
292                 break;
293         }
294         return notifier_from_errno(ret);
295 }
296
297 static struct notifier_block tegra_cpu_edp_notifier = {
298         .notifier_call = tegra_cpu_edp_notify,
299 };
300
301 static void tegra_cpu_edp_init(bool resume)
302 {
303         if (!cpu_edp_limits) {
304                 if (!resume)
305                         pr_info("tegra CPU: no EDP table is provided\n");
306                 return;
307         }
308
309         edp_thermal_index = 0;
310         edp_cpumask = *cpu_online_mask;
311         edp_update_limit();
312
313         if (!resume)
314                 register_hotcpu_notifier(&tegra_cpu_edp_notifier);
315
316         pr_info("tegra CPU: set EDP limit %u MHz\n", edp_limit / 1000);
317 }
318
319 static void tegra_cpu_edp_exit(void)
320 {
321         if (!cpu_edp_limits)
322                 return;
323
324         unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
325 }
326
327 void tegra_init_cpu_edp_limits(const struct tegra_edp_limits *limits, int size)
328 {
329         cpu_edp_limits = limits;
330         cpu_edp_limits_size = size;
331 }
332
333 #else   /* CONFIG_TEGRA_EDP_LIMITS */
334
335 #define edp_governor_speed(requested_speed) (requested_speed)
336 #define tegra_cpu_edp_init(resume)
337 #define tegra_cpu_edp_exit()
338 #endif  /* CONFIG_TEGRA_EDP_LIMITS */
339
340 static int tegra_verify_speed(struct cpufreq_policy *policy)
341 {
342         return cpufreq_frequency_table_verify(policy, freq_table);
343 }
344
345 static unsigned int tegra_getspeed(unsigned int cpu)
346 {
347         unsigned long rate;
348
349         if (cpu >= CONFIG_NR_CPUS)
350                 return 0;
351
352         rate = clk_get_rate(cpu_clk) / 1000;
353         return rate;
354 }
355
356 static int tegra_update_cpu_speed(unsigned long rate)
357 {
358         int ret = 0;
359         struct cpufreq_freqs freqs;
360
361         freqs.old = tegra_getspeed(0);
362         freqs.new = rate;
363
364         rate = clk_round_rate(cpu_clk, rate * 1000);
365         if (!IS_ERR_VALUE(rate))
366                 freqs.new = rate / 1000;
367
368         if (freqs.old == freqs.new)
369                 return ret;
370
371         /*
372          * Vote on memory bus frequency based on cpu frequency
373          * This sets the minimum frequency, display or avp may request higher
374          */
375         clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
376
377         for_each_online_cpu(freqs.cpu)
378                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
379
380 #ifdef CONFIG_CPU_FREQ_DEBUG
381         printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
382                freqs.old, freqs.new);
383 #endif
384
385         ret = clk_set_rate(cpu_clk, freqs.new * 1000);
386         if (ret) {
387                 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
388                         freqs.new);
389                 return ret;
390         }
391
392         for_each_online_cpu(freqs.cpu)
393                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
394
395         return 0;
396 }
397
398 unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
399 {
400         unsigned int cnt = 0;
401         int i;
402
403         for_each_online_cpu(i)
404                 if (target_cpu_speed[i] <= speed_limit)
405                         cnt++;
406         return cnt;
407 }
408
409 unsigned int tegra_get_slowest_cpu_n(void) {
410         unsigned int cpu = nr_cpu_ids;
411         unsigned long rate = ULONG_MAX;
412         int i;
413
414         for_each_online_cpu(i)
415                 if ((i > 0) && (rate > target_cpu_speed[i])) {
416                         cpu = i;
417                         rate = target_cpu_speed[i];
418                 }
419         return cpu;
420 }
421
422 unsigned long tegra_cpu_lowest_speed(void) {
423         unsigned long rate = ULONG_MAX;
424         int i;
425
426         for_each_online_cpu(i)
427                 rate = min(rate, target_cpu_speed[i]);
428         return rate;
429 }
430
431 unsigned long tegra_cpu_highest_speed(void) {
432         unsigned long rate = 0;
433         int i;
434
435         for_each_online_cpu(i)
436                 rate = max(rate, target_cpu_speed[i]);
437         return rate;
438 }
439
440 int tegra_cpu_cap_highest_speed(unsigned int *speed_cap)
441 {
442         unsigned int new_speed = tegra_cpu_highest_speed();
443
444         new_speed = throttle_governor_speed(new_speed);
445         new_speed = edp_governor_speed(new_speed);
446         if (speed_cap)
447                 *speed_cap = new_speed;
448         return tegra_update_cpu_speed(new_speed);
449 }
450
451 static int tegra_target(struct cpufreq_policy *policy,
452                        unsigned int target_freq,
453                        unsigned int relation)
454 {
455         int idx;
456         unsigned int freq;
457         unsigned int new_speed;
458         int ret = 0;
459
460         mutex_lock(&tegra_cpu_lock);
461
462         if (is_suspended) {
463                 ret = -EBUSY;
464                 goto out;
465         }
466
467         cpufreq_frequency_table_target(policy, freq_table, target_freq,
468                 relation, &idx);
469
470         freq = freq_table[idx].frequency;
471
472         target_cpu_speed[policy->cpu] = freq;
473         ret = tegra_cpu_cap_highest_speed(&new_speed);
474         if (ret == 0)
475                 tegra_auto_hotplug_governor(new_speed);
476 out:
477         mutex_unlock(&tegra_cpu_lock);
478
479         return ret;
480 }
481
482
483 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
484         void *dummy)
485 {
486         mutex_lock(&tegra_cpu_lock);
487         if (event == PM_SUSPEND_PREPARE) {
488                 is_suspended = true;
489                 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
490                         freq_table[suspend_index].frequency);
491                 tegra_update_cpu_speed(freq_table[suspend_index].frequency);
492         } else if (event == PM_POST_SUSPEND) {
493                 is_suspended = false;
494                 tegra_cpu_edp_init(true);
495         }
496         mutex_unlock(&tegra_cpu_lock);
497
498         return NOTIFY_OK;
499 }
500
501 static struct notifier_block tegra_cpu_pm_notifier = {
502         .notifier_call = tegra_pm_notify,
503 };
504
505 static int tegra_cpu_init(struct cpufreq_policy *policy)
506 {
507         if (policy->cpu >= CONFIG_NR_CPUS)
508                 return -EINVAL;
509
510         cpu_clk = clk_get_sys(NULL, "cpu");
511         if (IS_ERR(cpu_clk))
512                 return PTR_ERR(cpu_clk);
513
514         emc_clk = clk_get_sys("cpu", "emc");
515         if (IS_ERR(emc_clk)) {
516                 clk_put(cpu_clk);
517                 return PTR_ERR(emc_clk);
518         }
519
520         clk_prepare_enable(emc_clk);
521         clk_prepare_enable(cpu_clk);
522
523         cpufreq_frequency_table_cpuinfo(policy, freq_table);
524         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
525         policy->cur = tegra_getspeed(policy->cpu);
526         target_cpu_speed[policy->cpu] = policy->cur;
527
528         /* FIXME: what's the actual transition time? */
529         policy->cpuinfo.transition_latency = 300 * 1000;
530
531         cpumask_copy(policy->cpus, cpu_possible_mask);
532
533         if (policy->cpu == 0) {
534                 register_pm_notifier(&tegra_cpu_pm_notifier);
535         }
536
537         return 0;
538 }
539
540 static int tegra_cpu_exit(struct cpufreq_policy *policy)
541 {
542         cpufreq_frequency_table_cpuinfo(policy, freq_table);
543         clk_disable_unprepare(emc_clk);
544         clk_put(emc_clk);
545         clk_put(cpu_clk);
546         return 0;
547 }
548
549 static struct freq_attr *tegra_cpufreq_attr[] = {
550         &cpufreq_freq_attr_scaling_available_freqs,
551 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
552         &throttle,
553 #endif
554         NULL,
555 };
556
557 static struct cpufreq_driver tegra_cpufreq_driver = {
558         .verify         = tegra_verify_speed,
559         .target         = tegra_target,
560         .get            = tegra_getspeed,
561         .init           = tegra_cpu_init,
562         .exit           = tegra_cpu_exit,
563         .name           = "tegra",
564         .attr           = tegra_cpufreq_attr,
565 };
566
567 static int __init tegra_cpufreq_init(void)
568 {
569         int ret = 0;
570
571         struct tegra_cpufreq_table_data *table_data =
572                 tegra_cpufreq_table_get();
573         BUG_ON(!table_data);
574         suspend_index = table_data->suspend_index;
575
576 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
577         /*
578          * High-priority, others flags default: not bound to a specific
579          * CPU, has rescue worker task (in case of allocation deadlock,
580          * etc.).  Single-threaded.
581          */
582         workqueue = alloc_workqueue("cpu-tegra",
583                                     WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
584         if (!workqueue)
585                 return -ENOMEM;
586         INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
587
588         throttle_lowest_index = table_data->throttle_lowest_index;
589         throttle_highest_index = table_data->throttle_highest_index;
590 #endif
591         ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
592         if (ret)
593                 return ret;
594
595         freq_table = table_data->freq_table;
596         tegra_cpu_edp_init(false);
597         return cpufreq_register_driver(&tegra_cpufreq_driver);
598 }
599
600 static void __exit tegra_cpufreq_exit(void)
601 {
602 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
603         destroy_workqueue(workqueue);
604 #endif
605         tegra_cpu_edp_exit();
606         tegra_auto_hotplug_exit();
607         cpufreq_unregister_driver(&tegra_cpufreq_driver);
608 }
609
610
611 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
612 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
613 MODULE_LICENSE("GPL");
614 module_init(tegra_cpufreq_init);
615 module_exit(tegra_cpufreq_exit);