ARM: tegra: power: Added global EDP Capping table
[linux-3.10.git] / arch / arm / mach-tegra / cpu-tegra.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * Author:
7  *      Colin Cross <ccross@google.com>
8  *      Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9  *
10  * Copyright (C) 2010-2011 NVIDIA Corporation
11  *
12  * This software is licensed under the terms of the GNU General Public
13  * License version 2, as published by the Free Software Foundation, and
14  * may be copied, distributed, and modified under those terms.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/clk.h>
32 #include <linux/io.h>
33 #include <linux/suspend.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpu.h>
36
37 #include <mach/edp.h>
38
39 #include "clock.h"
40 #include "pm.h"
41
42 /* tegra throttling and edp governors require frequencies in the table
43    to be in ascending order */
44 static struct cpufreq_frequency_table *freq_table;
45
46 static struct clk *cpu_clk;
47 static struct clk *emc_clk;
48
49 static unsigned long target_cpu_speed[CONFIG_NR_CPUS];
50 static DEFINE_MUTEX(tegra_cpu_lock);
51 static bool is_suspended;
52 static int suspend_index;
53
54 static unsigned int tegra_getspeed(unsigned int cpu);
55 static int tegra_update_cpu_speed(unsigned long rate);
56
57 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
58 /* CPU frequency is gradually lowered when throttling is enabled */
59 #define THROTTLE_DELAY          msecs_to_jiffies(2000)
60
61 static bool is_throttling;
62 static int throttle_lowest_index;
63 static int throttle_highest_index;
64 static int throttle_index;
65 static int throttle_next_index;
66 static struct delayed_work throttle_work;
67 static struct workqueue_struct *workqueue;
68
69 #define tegra_cpu_is_throttling() (is_throttling)
70
71 static void tegra_throttle_work_func(struct work_struct *work)
72 {
73         unsigned int current_freq;
74
75         mutex_lock(&tegra_cpu_lock);
76         current_freq = tegra_getspeed(0);
77         throttle_index = throttle_next_index;
78
79         if (freq_table[throttle_index].frequency < current_freq)
80                 tegra_update_cpu_speed(freq_table[throttle_index].frequency);
81
82         if (throttle_index > throttle_lowest_index) {
83                 throttle_next_index = throttle_index - 1;
84                 queue_delayed_work(workqueue, &throttle_work, THROTTLE_DELAY);
85         }
86
87         mutex_unlock(&tegra_cpu_lock);
88 }
89
90 /*
91  * tegra_throttling_enable
92  * This function may sleep
93  */
94 void tegra_throttling_enable(bool enable)
95 {
96         mutex_lock(&tegra_cpu_lock);
97
98         if (enable && !is_throttling) {
99                 unsigned int current_freq = tegra_getspeed(0);
100
101                 is_throttling = true;
102
103                 for (throttle_index = throttle_highest_index;
104                      throttle_index >= throttle_lowest_index;
105                      throttle_index--)
106                         if (freq_table[throttle_index].frequency
107                             < current_freq)
108                                 break;
109
110                 throttle_index = max(throttle_index, throttle_lowest_index);
111                 throttle_next_index = throttle_index;
112                 queue_delayed_work(workqueue, &throttle_work, 0);
113         } else if (!enable && is_throttling) {
114                 cancel_delayed_work_sync(&throttle_work);
115                 is_throttling = false;
116                 /* restore speed requested by governor */
117                 tegra_cpu_cap_highest_speed(NULL);
118         }
119
120         mutex_unlock(&tegra_cpu_lock);
121 }
122 EXPORT_SYMBOL_GPL(tegra_throttling_enable);
123
124 static unsigned int throttle_governor_speed(unsigned int requested_speed)
125 {
126         return tegra_cpu_is_throttling() ?
127                 min(requested_speed, freq_table[throttle_index].frequency) :
128                 requested_speed;
129 }
130
131 static ssize_t show_throttle(struct cpufreq_policy *policy, char *buf)
132 {
133         return sprintf(buf, "%u\n", is_throttling);
134 }
135
136 cpufreq_freq_attr_ro(throttle);
137
138 #ifdef CONFIG_DEBUG_FS
139 static int throttle_debug_set(void *data, u64 val)
140 {
141         tegra_throttling_enable(val);
142         return 0;
143 }
144 static int throttle_debug_get(void *data, u64 *val)
145 {
146         *val = (u64) is_throttling;
147         return 0;
148 }
149
150 DEFINE_SIMPLE_ATTRIBUTE(throttle_fops, throttle_debug_get, throttle_debug_set, "%llu\n");
151
152 static struct dentry *cpu_tegra_debugfs_root;
153
154 static int __init tegra_cpu_debug_init(void)
155 {
156         cpu_tegra_debugfs_root = debugfs_create_dir("cpu-tegra", 0);
157
158         if (!cpu_tegra_debugfs_root)
159                 return -ENOMEM;
160
161         if (!debugfs_create_file("throttle", 0644, cpu_tegra_debugfs_root, NULL, &throttle_fops))
162                 goto err_out;
163
164         return 0;
165
166 err_out:
167         debugfs_remove_recursive(cpu_tegra_debugfs_root);
168         return -ENOMEM;
169
170 }
171
172 static void __exit tegra_cpu_debug_exit(void)
173 {
174         debugfs_remove_recursive(cpu_tegra_debugfs_root);
175 }
176
177 late_initcall(tegra_cpu_debug_init);
178 module_exit(tegra_cpu_debug_exit);
179 #endif /* CONFIG_DEBUG_FS */
180
181 #else /* CONFIG_TEGRA_THERMAL_THROTTLE */
182 #define tegra_cpu_is_throttling() (0)
183 #define throttle_governor_speed(requested_speed) (requested_speed)
184 #endif /* CONFIG_TEGRA_THERMAL_THROTTLE */
185
186 #ifdef CONFIG_TEGRA_EDP_LIMITS
187
188 static const struct tegra_edp_limits *cpu_edp_limits;
189 static int cpu_edp_limits_size;
190 static int edp_thermal_index;
191 static cpumask_t edp_cpumask;
192 static unsigned int edp_limit;
193
194 static void edp_update_limit(void)
195 {
196         int i;
197         unsigned int limit = cpumask_weight(&edp_cpumask);
198
199         if (!cpu_edp_limits)
200                 return;
201
202         BUG_ON((edp_thermal_index >= cpu_edp_limits_size) || (limit == 0));
203         limit = cpu_edp_limits[edp_thermal_index].freq_limits[limit - 1];
204
205         for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
206                 if (freq_table[i].frequency > limit) {
207                         break;
208                 }
209         }
210         BUG_ON(i == 0); /* min freq above the limit or table empty */
211         edp_limit = freq_table[i-1].frequency;
212 }
213
214 static unsigned int edp_governor_speed(unsigned int requested_speed)
215 {
216         if ((!cpu_edp_limits) || (requested_speed <= edp_limit))
217                 return requested_speed;
218         else
219                 return edp_limit;
220 }
221
222 int tegra_edp_update_thermal_zone(int temperature)
223 {
224         int i;
225         int ret = 0;
226         int nlimits = cpu_edp_limits_size;
227         int index;
228
229         if (!cpu_edp_limits)
230                 return -EINVAL;
231
232         index = nlimits - 1;
233
234         if (temperature < cpu_edp_limits[0].temperature) {
235                 index = 0;
236         } else {
237                 for (i = 0; i < (nlimits - 1); i++) {
238                         if (temperature >= cpu_edp_limits[i].temperature &&
239                            temperature < cpu_edp_limits[i + 1].temperature) {
240                                 index = i + 1;
241                                 break;
242                         }
243                 }
244         }
245
246         mutex_lock(&tegra_cpu_lock);
247         edp_thermal_index = index;
248
249         /* Update cpu rate if cpufreq (at least on cpu0) is already started */
250         if (target_cpu_speed[0]) {
251                 edp_update_limit();
252                 tegra_cpu_cap_highest_speed(NULL);
253         }
254         mutex_unlock(&tegra_cpu_lock);
255
256         return ret;
257 }
258 EXPORT_SYMBOL_GPL(tegra_edp_update_thermal_zone);
259
260 static int tegra_cpu_edp_notify(
261         struct notifier_block *nb, unsigned long event, void *hcpu)
262 {
263         int ret = 0;
264         unsigned int cpu_speed, new_speed;
265         int cpu = (long)hcpu;
266
267         switch (event) {
268         case CPU_UP_PREPARE:
269                 mutex_lock(&tegra_cpu_lock);
270                 cpu_set(cpu, edp_cpumask);
271                 edp_update_limit();
272
273                 cpu_speed = tegra_getspeed(0);
274                 new_speed = edp_governor_speed(cpu_speed);
275                 if (new_speed < cpu_speed) {
276                         ret = tegra_update_cpu_speed(new_speed);
277                         if (ret) {
278                                 cpu_clear(cpu, edp_cpumask);
279                                 edp_update_limit();
280                         }
281
282                         printk(KERN_DEBUG "tegra CPU:%sforce EDP limit %u kHz"
283                                 "\n", ret ? " failed to " : " ", new_speed);
284                 }
285                 mutex_unlock(&tegra_cpu_lock);
286                 break;
287         case CPU_DEAD:
288                 mutex_lock(&tegra_cpu_lock);
289                 cpu_clear(cpu, edp_cpumask);
290                 edp_update_limit();
291                 tegra_cpu_cap_highest_speed(NULL);
292                 mutex_unlock(&tegra_cpu_lock);
293                 break;
294         }
295         return notifier_from_errno(ret);
296 }
297
298 static struct notifier_block tegra_cpu_edp_notifier = {
299         .notifier_call = tegra_cpu_edp_notify,
300 };
301
302 static void tegra_cpu_edp_init(bool resume)
303 {
304         tegra_get_cpu_edp_limits(&cpu_edp_limits, &cpu_edp_limits_size);
305
306         if (!cpu_edp_limits) {
307                 if (!resume)
308                         pr_info("cpu-tegra: no EDP table is provided\n");
309                 return;
310         }
311
312         /* FIXME: use the highest temperature limits if sensor is not on-line?
313          * If thermal zone is not set yet by the sensor, edp_thermal_index = 0.
314          * Boot frequency allowed SoC to get here, should work till sensor is
315          * initialized.
316          */
317         edp_cpumask = *cpu_online_mask;
318         edp_update_limit();
319
320         if (!resume) {
321                 register_hotcpu_notifier(&tegra_cpu_edp_notifier);
322                 pr_info("cpu-tegra: init EDP limit: %u MHz\n", edp_limit/1000);
323         }
324 }
325
326 static void tegra_cpu_edp_exit(void)
327 {
328         if (!cpu_edp_limits)
329                 return;
330
331         unregister_hotcpu_notifier(&tegra_cpu_edp_notifier);
332 }
333
334 #else   /* CONFIG_TEGRA_EDP_LIMITS */
335
336 #define edp_governor_speed(requested_speed) (requested_speed)
337 #define tegra_cpu_edp_init(resume)
338 #define tegra_cpu_edp_exit()
339 #endif  /* CONFIG_TEGRA_EDP_LIMITS */
340
341 static int tegra_verify_speed(struct cpufreq_policy *policy)
342 {
343         return cpufreq_frequency_table_verify(policy, freq_table);
344 }
345
346 static unsigned int tegra_getspeed(unsigned int cpu)
347 {
348         unsigned long rate;
349
350         if (cpu >= CONFIG_NR_CPUS)
351                 return 0;
352
353         rate = clk_get_rate(cpu_clk) / 1000;
354         return rate;
355 }
356
357 static int tegra_update_cpu_speed(unsigned long rate)
358 {
359         int ret = 0;
360         struct cpufreq_freqs freqs;
361
362         freqs.old = tegra_getspeed(0);
363         freqs.new = rate;
364
365         rate = clk_round_rate(cpu_clk, rate * 1000);
366         if (!IS_ERR_VALUE(rate))
367                 freqs.new = rate / 1000;
368
369         if (freqs.old == freqs.new)
370                 return ret;
371
372         /*
373          * Vote on memory bus frequency based on cpu frequency
374          * This sets the minimum frequency, display or avp may request higher
375          */
376         clk_set_rate(emc_clk, tegra_emc_to_cpu_ratio(freqs.new));
377
378         for_each_online_cpu(freqs.cpu)
379                 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
380
381 #ifdef CONFIG_CPU_FREQ_DEBUG
382         printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
383                freqs.old, freqs.new);
384 #endif
385
386         ret = clk_set_rate(cpu_clk, freqs.new * 1000);
387         if (ret) {
388                 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
389                         freqs.new);
390                 return ret;
391         }
392
393         for_each_online_cpu(freqs.cpu)
394                 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
395
396         return 0;
397 }
398
399 unsigned int tegra_count_slow_cpus(unsigned long speed_limit)
400 {
401         unsigned int cnt = 0;
402         int i;
403
404         for_each_online_cpu(i)
405                 if (target_cpu_speed[i] <= speed_limit)
406                         cnt++;
407         return cnt;
408 }
409
410 unsigned int tegra_get_slowest_cpu_n(void) {
411         unsigned int cpu = nr_cpu_ids;
412         unsigned long rate = ULONG_MAX;
413         int i;
414
415         for_each_online_cpu(i)
416                 if ((i > 0) && (rate > target_cpu_speed[i])) {
417                         cpu = i;
418                         rate = target_cpu_speed[i];
419                 }
420         return cpu;
421 }
422
423 unsigned long tegra_cpu_lowest_speed(void) {
424         unsigned long rate = ULONG_MAX;
425         int i;
426
427         for_each_online_cpu(i)
428                 rate = min(rate, target_cpu_speed[i]);
429         return rate;
430 }
431
432 unsigned long tegra_cpu_highest_speed(void) {
433         unsigned long rate = 0;
434         int i;
435
436         for_each_online_cpu(i)
437                 rate = max(rate, target_cpu_speed[i]);
438         return rate;
439 }
440
441 int tegra_cpu_cap_highest_speed(unsigned int *speed_cap)
442 {
443         unsigned int new_speed = tegra_cpu_highest_speed();
444
445         new_speed = throttle_governor_speed(new_speed);
446         new_speed = edp_governor_speed(new_speed);
447         if (speed_cap)
448                 *speed_cap = new_speed;
449         return tegra_update_cpu_speed(new_speed);
450 }
451
452 static int tegra_target(struct cpufreq_policy *policy,
453                        unsigned int target_freq,
454                        unsigned int relation)
455 {
456         int idx;
457         unsigned int freq;
458         unsigned int new_speed;
459         int ret = 0;
460
461         mutex_lock(&tegra_cpu_lock);
462
463         if (is_suspended) {
464                 ret = -EBUSY;
465                 goto out;
466         }
467
468         cpufreq_frequency_table_target(policy, freq_table, target_freq,
469                 relation, &idx);
470
471         freq = freq_table[idx].frequency;
472
473         target_cpu_speed[policy->cpu] = freq;
474         ret = tegra_cpu_cap_highest_speed(&new_speed);
475         if (ret == 0)
476                 tegra_auto_hotplug_governor(new_speed, false);
477 out:
478         mutex_unlock(&tegra_cpu_lock);
479
480         return ret;
481 }
482
483
484 static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
485         void *dummy)
486 {
487         mutex_lock(&tegra_cpu_lock);
488         if (event == PM_SUSPEND_PREPARE) {
489                 is_suspended = true;
490                 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
491                         freq_table[suspend_index].frequency);
492                 tegra_update_cpu_speed(freq_table[suspend_index].frequency);
493                 tegra_auto_hotplug_governor(
494                         freq_table[suspend_index].frequency, true);
495         } else if (event == PM_POST_SUSPEND) {
496                 unsigned int freq;
497                 is_suspended = false;
498                 tegra_cpu_edp_init(true);
499                 tegra_cpu_cap_highest_speed(&freq);
500                 pr_info("Tegra cpufreq resume: restoring frequency to %d kHz\n",
501                         freq);
502         }
503         mutex_unlock(&tegra_cpu_lock);
504
505         return NOTIFY_OK;
506 }
507
508 static struct notifier_block tegra_cpu_pm_notifier = {
509         .notifier_call = tegra_pm_notify,
510 };
511
512 static int tegra_cpu_init(struct cpufreq_policy *policy)
513 {
514         if (policy->cpu >= CONFIG_NR_CPUS)
515                 return -EINVAL;
516
517         cpu_clk = clk_get_sys(NULL, "cpu");
518         if (IS_ERR(cpu_clk))
519                 return PTR_ERR(cpu_clk);
520
521         emc_clk = clk_get_sys("cpu", "emc");
522         if (IS_ERR(emc_clk)) {
523                 clk_put(cpu_clk);
524                 return PTR_ERR(emc_clk);
525         }
526
527         clk_prepare_enable(emc_clk);
528         clk_prepare_enable(cpu_clk);
529
530         cpufreq_frequency_table_cpuinfo(policy, freq_table);
531         cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
532         policy->cur = tegra_getspeed(policy->cpu);
533         target_cpu_speed[policy->cpu] = policy->cur;
534
535         /* FIXME: what's the actual transition time? */
536         policy->cpuinfo.transition_latency = 300 * 1000;
537
538         cpumask_copy(policy->cpus, cpu_possible_mask);
539
540         if (policy->cpu == 0) {
541                 register_pm_notifier(&tegra_cpu_pm_notifier);
542         }
543
544         return 0;
545 }
546
547 static int tegra_cpu_exit(struct cpufreq_policy *policy)
548 {
549         cpufreq_frequency_table_cpuinfo(policy, freq_table);
550         clk_disable_unprepare(emc_clk);
551         clk_put(emc_clk);
552         clk_put(cpu_clk);
553         return 0;
554 }
555
556 static struct freq_attr *tegra_cpufreq_attr[] = {
557         &cpufreq_freq_attr_scaling_available_freqs,
558 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
559         &throttle,
560 #endif
561         NULL,
562 };
563
564 static struct cpufreq_driver tegra_cpufreq_driver = {
565         .verify         = tegra_verify_speed,
566         .target         = tegra_target,
567         .get            = tegra_getspeed,
568         .init           = tegra_cpu_init,
569         .exit           = tegra_cpu_exit,
570         .name           = "tegra",
571         .attr           = tegra_cpufreq_attr,
572 };
573
574 static int __init tegra_cpufreq_init(void)
575 {
576         int ret = 0;
577
578         struct tegra_cpufreq_table_data *table_data =
579                 tegra_cpufreq_table_get();
580         if (IS_ERR_OR_NULL(table_data))
581                 return -EINVAL;
582
583         suspend_index = table_data->suspend_index;
584
585 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
586         /*
587          * High-priority, others flags default: not bound to a specific
588          * CPU, has rescue worker task (in case of allocation deadlock,
589          * etc.).  Single-threaded.
590          */
591         workqueue = alloc_workqueue("cpu-tegra",
592                                     WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
593         if (!workqueue)
594                 return -ENOMEM;
595         INIT_DELAYED_WORK(&throttle_work, tegra_throttle_work_func);
596
597         throttle_lowest_index = table_data->throttle_lowest_index;
598         throttle_highest_index = table_data->throttle_highest_index;
599 #endif
600         ret = tegra_auto_hotplug_init(&tegra_cpu_lock);
601         if (ret)
602                 return ret;
603
604         freq_table = table_data->freq_table;
605         tegra_cpu_edp_init(false);
606         return cpufreq_register_driver(&tegra_cpufreq_driver);
607 }
608
609 static void __exit tegra_cpufreq_exit(void)
610 {
611 #ifdef CONFIG_TEGRA_THERMAL_THROTTLE
612         destroy_workqueue(workqueue);
613 #endif
614         tegra_cpu_edp_exit();
615         tegra_auto_hotplug_exit();
616         cpufreq_unregister_driver(&tegra_cpufreq_driver);
617 }
618
619
620 MODULE_AUTHOR("Colin Cross <ccross@android.com>");
621 MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
622 MODULE_LICENSE("GPL");
623 module_init(tegra_cpufreq_init);
624 module_exit(tegra_cpufreq_exit);