fcc9d85fdafed47f52da57a0ccffe5dea9c6e69b
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra3.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra3.c
3  *
4  * CPU auto-hotplug for Tegra3 CPUs
5  *
6  * Copyright (c) 2011-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/cpu.h>
32 #include <linux/clk.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/pm_qos.h>
36
37 #include "pm.h"
38 #include "cpu-tegra.h"
39 #include "clock.h"
40
41 #define INITIAL_STATE           TEGRA_HP_DISABLED
42 #define UP2G0_DELAY_MS          70
43 #define UP2Gn_DELAY_MS          100
44 #define DOWN_DELAY_MS           2000
45
46 static struct mutex *tegra3_cpu_lock;
47
48 static struct workqueue_struct *hotplug_wq;
49 static struct delayed_work hotplug_work;
50
51 static bool no_lp;
52 module_param(no_lp, bool, 0644);
53
54 static unsigned long up2gn_delay;
55 static unsigned long up2g0_delay;
56 static unsigned long down_delay;
57 module_param(up2gn_delay, ulong, 0644);
58 module_param(up2g0_delay, ulong, 0644);
59 module_param(down_delay, ulong, 0644);
60
61 static unsigned int idle_top_freq;
62 static unsigned int idle_bottom_freq;
63 module_param(idle_top_freq, uint, 0644);
64 module_param(idle_bottom_freq, uint, 0644);
65
66 static int mp_overhead = 10;
67 module_param(mp_overhead, int, 0644);
68
69 static int balance_level = 75;
70 module_param(balance_level, int, 0644);
71
72 static struct clk *cpu_clk;
73 static struct clk *cpu_g_clk;
74 static struct clk *cpu_lp_clk;
75
76 static struct {
77         cputime64_t time_up_total;
78         u64 last_update;
79         unsigned int up_down_count;
80 } hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
81
82 static void hp_init_stats(void)
83 {
84         int i;
85         u64 cur_jiffies = get_jiffies_64();
86
87         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
88                 hp_stats[i].time_up_total = 0;
89                 hp_stats[i].last_update = cur_jiffies;
90
91                 hp_stats[i].up_down_count = 0;
92                 if (is_lp_cluster()) {
93                         if (i == CONFIG_NR_CPUS)
94                                 hp_stats[i].up_down_count = 1;
95                 } else {
96                         if ((i < nr_cpu_ids) && cpu_online(i))
97                                 hp_stats[i].up_down_count = 1;
98                 }
99         }
100
101 }
102
103 static void hp_stats_update(unsigned int cpu, bool up)
104 {
105         u64 cur_jiffies = get_jiffies_64();
106         bool was_up = hp_stats[cpu].up_down_count & 0x1;
107
108         if (was_up)
109                 hp_stats[cpu].time_up_total =
110                         hp_stats[cpu].time_up_total +
111                         (cur_jiffies - hp_stats[cpu].last_update);
112
113         if (was_up != up) {
114                 hp_stats[cpu].up_down_count++;
115                 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
116                         /* FIXME: sysfs user space CPU control breaks stats */
117                         pr_err("tegra hotplug stats out of sync with %s CPU%d",
118                                (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
119                                (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
120                         hp_stats[cpu].up_down_count ^=  0x1;
121                 }
122         }
123         hp_stats[cpu].last_update = cur_jiffies;
124 }
125
126
127 enum {
128         TEGRA_HP_DISABLED = 0,
129         TEGRA_HP_IDLE,
130         TEGRA_HP_DOWN,
131         TEGRA_HP_UP,
132 };
133 static int hp_state;
134
135 static int hp_state_set(const char *arg, const struct kernel_param *kp)
136 {
137         int ret = 0;
138         int old_state;
139
140         if (!tegra3_cpu_lock)
141                 return ret;
142
143         mutex_lock(tegra3_cpu_lock);
144
145         old_state = hp_state;
146         ret = param_set_bool(arg, kp);  /* set idle or disabled only */
147
148         if (ret == 0) {
149                 if ((hp_state == TEGRA_HP_DISABLED) &&
150                     (old_state != TEGRA_HP_DISABLED)) {
151                         mutex_unlock(tegra3_cpu_lock);
152                         cancel_delayed_work_sync(&hotplug_work);
153                         mutex_lock(tegra3_cpu_lock);
154                         pr_info("Tegra auto-hotplug disabled\n");
155                 } else if (hp_state != TEGRA_HP_DISABLED) {
156                         if (old_state == TEGRA_HP_DISABLED) {
157                                 pr_info("Tegra auto-hotplug enabled\n");
158                                 hp_init_stats();
159                         }
160                         /* catch-up with governor target speed */
161                         tegra_cpu_set_speed_cap(NULL);
162                 }
163         } else
164                 pr_warn("%s: unable to set tegra hotplug state %s\n",
165                                 __func__, arg);
166
167         mutex_unlock(tegra3_cpu_lock);
168         return ret;
169 }
170
171 static int hp_state_get(char *buffer, const struct kernel_param *kp)
172 {
173         return param_get_int(buffer, kp);
174 }
175
176 static struct kernel_param_ops tegra_hp_state_ops = {
177         .set = hp_state_set,
178         .get = hp_state_get,
179 };
180 module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
181
182
183 enum {
184         TEGRA_CPU_SPEED_BALANCED,
185         TEGRA_CPU_SPEED_BIASED,
186         TEGRA_CPU_SPEED_SKEWED,
187 };
188
189 static noinline int tegra_cpu_speed_balance(void)
190 {
191         unsigned long highest_speed = tegra_cpu_highest_speed();
192         unsigned long balanced_speed = highest_speed * balance_level / 100;
193         unsigned long skewed_speed = balanced_speed / 2;
194         unsigned int nr_cpus = num_online_cpus();
195         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
196         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
197
198         /* balanced: freq targets for all CPUs are above 50% of highest speed
199            biased: freq target for at least one CPU is below 50% threshold
200            skewed: freq targets for at least 2 CPUs are below 25% threshold */
201         if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
202              tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
203              (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
204             (nr_cpus > min_cpus))
205                 return TEGRA_CPU_SPEED_SKEWED;
206
207         if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
208              (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
209              (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
210             (nr_cpus >= min_cpus))
211                 return TEGRA_CPU_SPEED_BIASED;
212
213         return TEGRA_CPU_SPEED_BALANCED;
214 }
215
216 static void tegra_auto_hotplug_work_func(struct work_struct *work)
217 {
218         bool up = false;
219         unsigned int cpu = nr_cpu_ids;
220
221         mutex_lock(tegra3_cpu_lock);
222
223         switch (hp_state) {
224         case TEGRA_HP_DISABLED:
225         case TEGRA_HP_IDLE:
226                 break;
227         case TEGRA_HP_DOWN:
228                 cpu = tegra_get_slowest_cpu_n();
229                 if (cpu < nr_cpu_ids) {
230                         up = false;
231                         queue_delayed_work(
232                                 hotplug_wq, &hotplug_work, down_delay);
233                         hp_stats_update(cpu, false);
234                 } else if (!is_lp_cluster() && !no_lp) {
235                         if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
236                                 hp_stats_update(CONFIG_NR_CPUS, true);
237                                 hp_stats_update(0, false);
238                                 /* catch-up with governor target speed */
239                                 tegra_cpu_set_speed_cap(NULL);
240                         } else
241                                 queue_delayed_work(
242                                         hotplug_wq, &hotplug_work, down_delay);
243                 }
244                 break;
245         case TEGRA_HP_UP:
246                 if (is_lp_cluster() && !no_lp) {
247                         if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
248                                 hp_stats_update(CONFIG_NR_CPUS, false);
249                                 hp_stats_update(0, true);
250                                 /* catch-up with governor target speed */
251                                 tegra_cpu_set_speed_cap(NULL);
252                         }
253                 } else {
254                         switch (tegra_cpu_speed_balance()) {
255                         /* cpu speed is up and balanced - one more on-line */
256                         case TEGRA_CPU_SPEED_BALANCED:
257                                 cpu = cpumask_next_zero(0, cpu_online_mask);
258                                 if (cpu < nr_cpu_ids) {
259                                         up = true;
260                                         hp_stats_update(cpu, true);
261                                 }
262                                 break;
263                         /* cpu speed is up, but skewed - remove one core */
264                         case TEGRA_CPU_SPEED_SKEWED:
265                                 cpu = tegra_get_slowest_cpu_n();
266                                 if (cpu < nr_cpu_ids) {
267                                         up = false;
268                                         hp_stats_update(cpu, false);
269                                 }
270                                 break;
271                         /* cpu speed is up, but under-utilized - do nothing */
272                         case TEGRA_CPU_SPEED_BIASED:
273                         default:
274                                 break;
275                         }
276                 }
277                 queue_delayed_work(
278                         hotplug_wq, &hotplug_work, up2gn_delay);
279                 break;
280         default:
281                 pr_err("%s: invalid tegra hotplug state %d\n",
282                        __func__, hp_state);
283         }
284         mutex_unlock(tegra3_cpu_lock);
285
286         if (cpu < nr_cpu_ids) {
287                 if (up)
288                         cpu_up(cpu);
289                 else
290                         cpu_down(cpu);
291         }
292 }
293
294 static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
295 {
296         mutex_lock(tegra3_cpu_lock);
297
298         if ((n >= 2) && is_lp_cluster()) {
299                 /* make sure cpu rate is within g-mode range before switching */
300                 unsigned int speed = max(
301                         tegra_getspeed(0), clk_get_min_rate(cpu_g_clk) / 1000);
302                 tegra_update_cpu_speed(speed);
303
304                 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
305                         hp_stats_update(CONFIG_NR_CPUS, false);
306                         hp_stats_update(0, true);
307                 }
308         }
309         /* update governor state machine */
310         tegra_cpu_set_speed_cap(NULL);
311         mutex_unlock(tegra3_cpu_lock);
312         return NOTIFY_OK;
313 }
314
315 static struct notifier_block min_cpus_notifier = {
316         .notifier_call = min_cpus_notify,
317 };
318
319 void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
320 {
321         unsigned long up_delay, top_freq, bottom_freq;
322
323         if (!is_g_cluster_present())
324                 return;
325
326         if (suspend && (hp_state != TEGRA_HP_DISABLED)) {
327                 hp_state = TEGRA_HP_IDLE;
328
329                 /* Switch to G-mode if suspend rate is high enough */
330                 if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
331                         if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
332                                 hp_stats_update(CONFIG_NR_CPUS, false);
333                                 hp_stats_update(0, true);
334                         }
335                 }
336                 return;
337         }
338
339         if (is_lp_cluster()) {
340                 up_delay = up2g0_delay;
341                 top_freq = idle_top_freq;
342                 bottom_freq = 0;
343         } else {
344                 up_delay = up2gn_delay;
345                 top_freq = idle_bottom_freq;
346                 bottom_freq = idle_bottom_freq;
347         }
348
349         if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
350                 if (hp_state != TEGRA_HP_UP) {
351                         hp_state = TEGRA_HP_UP;
352                         queue_delayed_work(
353                                 hotplug_wq, &hotplug_work, up_delay);
354                 }
355                 return;
356         }
357
358         switch (hp_state) {
359         case TEGRA_HP_DISABLED:
360                 break;
361         case TEGRA_HP_IDLE:
362                 if (cpu_freq > top_freq) {
363                         hp_state = TEGRA_HP_UP;
364                         queue_delayed_work(
365                                 hotplug_wq, &hotplug_work, up_delay);
366                 } else if (cpu_freq <= bottom_freq) {
367                         hp_state = TEGRA_HP_DOWN;
368                         queue_delayed_work(
369                                 hotplug_wq, &hotplug_work, down_delay);
370                 }
371                 break;
372         case TEGRA_HP_DOWN:
373                 if (cpu_freq > top_freq) {
374                         hp_state = TEGRA_HP_UP;
375                         queue_delayed_work(
376                                 hotplug_wq, &hotplug_work, up_delay);
377                 } else if (cpu_freq > bottom_freq) {
378                         hp_state = TEGRA_HP_IDLE;
379                 }
380                 break;
381         case TEGRA_HP_UP:
382                 if (cpu_freq <= bottom_freq) {
383                         hp_state = TEGRA_HP_DOWN;
384                         queue_delayed_work(
385                                 hotplug_wq, &hotplug_work, down_delay);
386                 } else if (cpu_freq <= top_freq) {
387                         hp_state = TEGRA_HP_IDLE;
388                 }
389                 break;
390         default:
391                 pr_err("%s: invalid tegra hotplug state %d\n",
392                        __func__, hp_state);
393                 BUG();
394         }
395 }
396
397 int tegra_auto_hotplug_init(struct mutex *cpu_lock)
398 {
399         /*
400          * Not bound to the issuer CPU (=> high-priority), has rescue worker
401          * task, single-threaded, freezable.
402          */
403         hotplug_wq = alloc_workqueue(
404                 "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
405         if (!hotplug_wq)
406                 return -ENOMEM;
407         INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
408
409         cpu_clk = clk_get_sys(NULL, "cpu");
410         cpu_g_clk = clk_get_sys(NULL, "cpu_g");
411         cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
412         if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
413                 return -ENOENT;
414
415         idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
416         idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
417
418         up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
419         up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
420         down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
421
422         tegra3_cpu_lock = cpu_lock;
423         hp_state = INITIAL_STATE;
424         hp_init_stats();
425         pr_info("Tegra auto-hotplug initialized: %s\n",
426                 (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
427
428         if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
429                 pr_err("%s: Failed to register min cpus PM QoS notifier\n",
430                         __func__);
431
432         return 0;
433 }
434
435 #ifdef CONFIG_DEBUG_FS
436
437 static struct dentry *hp_debugfs_root;
438
439 struct pm_qos_request min_cpu_req;
440 struct pm_qos_request max_cpu_req;
441
442 static int hp_stats_show(struct seq_file *s, void *data)
443 {
444         int i;
445         u64 cur_jiffies = get_jiffies_64();
446
447         mutex_lock(tegra3_cpu_lock);
448         if (hp_state != TEGRA_HP_DISABLED) {
449                 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
450                         bool was_up = (hp_stats[i].up_down_count & 0x1);
451                         hp_stats_update(i, was_up);
452                 }
453         }
454         mutex_unlock(tegra3_cpu_lock);
455
456         seq_printf(s, "%-15s ", "cpu:");
457         for (i = 0; i < CONFIG_NR_CPUS; i++) {
458                 seq_printf(s, "G%-9d ", i);
459         }
460         seq_printf(s, "LP\n");
461
462         seq_printf(s, "%-15s ", "transitions:");
463         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
464                 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
465         }
466         seq_printf(s, "\n");
467
468         seq_printf(s, "%-15s ", "time plugged:");
469         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
470                 seq_printf(s, "%-10llu ",
471                            cputime64_to_clock_t(hp_stats[i].time_up_total));
472         }
473         seq_printf(s, "\n");
474
475         seq_printf(s, "%-15s %llu\n", "time-stamp:",
476                    cputime64_to_clock_t(cur_jiffies));
477
478         return 0;
479 }
480
481 static int hp_stats_open(struct inode *inode, struct file *file)
482 {
483         return single_open(file, hp_stats_show, inode->i_private);
484 }
485
486 static const struct file_operations hp_stats_fops = {
487         .open           = hp_stats_open,
488         .read           = seq_read,
489         .llseek         = seq_lseek,
490         .release        = single_release,
491 };
492
493 static int min_cpus_get(void *data, u64 *val)
494 {
495         *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
496         return 0;
497 }
498 static int min_cpus_set(void *data, u64 val)
499 {
500         pm_qos_update_request(&min_cpu_req, (s32)val);
501         return 0;
502 }
503 DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n");
504
505 static int max_cpus_get(void *data, u64 *val)
506 {
507         *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
508         return 0;
509 }
510 static int max_cpus_set(void *data, u64 val)
511 {
512         pm_qos_update_request(&max_cpu_req, (s32)val);
513         return 0;
514 }
515 DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
516
517 static int __init tegra_auto_hotplug_debug_init(void)
518 {
519         if (!tegra3_cpu_lock)
520                 return -ENOENT;
521
522         hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
523         if (!hp_debugfs_root)
524                 return -ENOMEM;
525
526         pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS,
527                            PM_QOS_DEFAULT_VALUE);
528         pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
529                            PM_QOS_DEFAULT_VALUE);
530
531         if (!debugfs_create_file(
532                 "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops))
533                 goto err_out;
534
535         if (!debugfs_create_file(
536                 "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
537                 goto err_out;
538
539         if (!debugfs_create_file(
540                 "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
541                 goto err_out;
542
543         return 0;
544
545 err_out:
546         debugfs_remove_recursive(hp_debugfs_root);
547         pm_qos_remove_request(&min_cpu_req);
548         pm_qos_remove_request(&max_cpu_req);
549         return -ENOMEM;
550 }
551
552 late_initcall(tegra_auto_hotplug_debug_init);
553 #endif
554
555 void tegra_auto_hotplug_exit(void)
556 {
557         destroy_workqueue(hotplug_wq);
558 #ifdef CONFIG_DEBUG_FS
559         debugfs_remove_recursive(hp_debugfs_root);
560         pm_qos_remove_request(&min_cpu_req);
561         pm_qos_remove_request(&max_cpu_req);
562 #endif
563 }