ARM: tegra: power: Apply down delay to balancing CPUs
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra3.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra3.c
3  *
4  * CPU auto-hotplug for Tegra3 CPUs
5  *
6  * Copyright (c) 2011-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/cpu.h>
32 #include <linux/clk.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/pm_qos.h>
36
37 #include "pm.h"
38 #include "cpu-tegra.h"
39 #include "clock.h"
40
41 #define INITIAL_STATE           TEGRA_HP_DISABLED
42 #define UP2G0_DELAY_MS          70
43 #define UP2Gn_DELAY_MS          100
44 #define DOWN_DELAY_MS           2000
45
46 static struct mutex *tegra3_cpu_lock;
47
48 static struct workqueue_struct *hotplug_wq;
49 static struct delayed_work hotplug_work;
50
51 static bool no_lp;
52 module_param(no_lp, bool, 0644);
53
54 static unsigned long up2gn_delay;
55 static unsigned long up2g0_delay;
56 static unsigned long down_delay;
57 module_param(up2gn_delay, ulong, 0644);
58 module_param(up2g0_delay, ulong, 0644);
59 module_param(down_delay, ulong, 0644);
60
61 static unsigned int idle_top_freq;
62 static unsigned int idle_bottom_freq;
63 module_param(idle_top_freq, uint, 0644);
64 module_param(idle_bottom_freq, uint, 0644);
65
66 static int mp_overhead = 10;
67 module_param(mp_overhead, int, 0644);
68
69 static int balance_level = 75;
70 module_param(balance_level, int, 0644);
71
72 static struct clk *cpu_clk;
73 static struct clk *cpu_g_clk;
74 static struct clk *cpu_lp_clk;
75
76 static struct {
77         cputime64_t time_up_total;
78         u64 last_update;
79         unsigned int up_down_count;
80 } hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
81
82 static void hp_init_stats(void)
83 {
84         int i;
85         u64 cur_jiffies = get_jiffies_64();
86
87         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
88                 hp_stats[i].time_up_total = 0;
89                 hp_stats[i].last_update = cur_jiffies;
90
91                 hp_stats[i].up_down_count = 0;
92                 if (is_lp_cluster()) {
93                         if (i == CONFIG_NR_CPUS)
94                                 hp_stats[i].up_down_count = 1;
95                 } else {
96                         if ((i < nr_cpu_ids) && cpu_online(i))
97                                 hp_stats[i].up_down_count = 1;
98                 }
99         }
100
101 }
102
103 static void hp_stats_update(unsigned int cpu, bool up)
104 {
105         u64 cur_jiffies = get_jiffies_64();
106         bool was_up = hp_stats[cpu].up_down_count & 0x1;
107
108         if (was_up)
109                 hp_stats[cpu].time_up_total =
110                         hp_stats[cpu].time_up_total +
111                         (cur_jiffies - hp_stats[cpu].last_update);
112
113         if (was_up != up) {
114                 hp_stats[cpu].up_down_count++;
115                 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
116                         /* FIXME: sysfs user space CPU control breaks stats */
117                         pr_err("tegra hotplug stats out of sync with %s CPU%d",
118                                (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
119                                (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
120                         hp_stats[cpu].up_down_count ^=  0x1;
121                 }
122         }
123         hp_stats[cpu].last_update = cur_jiffies;
124 }
125
126
127 enum {
128         TEGRA_HP_DISABLED = 0,
129         TEGRA_HP_IDLE,
130         TEGRA_HP_DOWN,
131         TEGRA_HP_UP,
132 };
133 static int hp_state;
134
135 static int hp_state_set(const char *arg, const struct kernel_param *kp)
136 {
137         int ret = 0;
138         int old_state;
139
140         if (!tegra3_cpu_lock)
141                 return ret;
142
143         mutex_lock(tegra3_cpu_lock);
144
145         old_state = hp_state;
146         ret = param_set_bool(arg, kp);  /* set idle or disabled only */
147
148         if (ret == 0) {
149                 if ((hp_state == TEGRA_HP_DISABLED) &&
150                     (old_state != TEGRA_HP_DISABLED)) {
151                         mutex_unlock(tegra3_cpu_lock);
152                         cancel_delayed_work_sync(&hotplug_work);
153                         mutex_lock(tegra3_cpu_lock);
154                         pr_info("Tegra auto-hotplug disabled\n");
155                 } else if (hp_state != TEGRA_HP_DISABLED) {
156                         if (old_state == TEGRA_HP_DISABLED) {
157                                 pr_info("Tegra auto-hotplug enabled\n");
158                                 hp_init_stats();
159                         }
160                         /* catch-up with governor target speed */
161                         tegra_cpu_set_speed_cap(NULL);
162                 }
163         } else
164                 pr_warn("%s: unable to set tegra hotplug state %s\n",
165                                 __func__, arg);
166
167         mutex_unlock(tegra3_cpu_lock);
168         return ret;
169 }
170
171 static int hp_state_get(char *buffer, const struct kernel_param *kp)
172 {
173         return param_get_int(buffer, kp);
174 }
175
176 static struct kernel_param_ops tegra_hp_state_ops = {
177         .set = hp_state_set,
178         .get = hp_state_get,
179 };
180 module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
181
182
183 enum {
184         TEGRA_CPU_SPEED_BALANCED,
185         TEGRA_CPU_SPEED_BIASED,
186         TEGRA_CPU_SPEED_SKEWED,
187 };
188
189 static noinline int tegra_cpu_speed_balance(void)
190 {
191         unsigned long highest_speed = tegra_cpu_highest_speed();
192         unsigned long balanced_speed = highest_speed * balance_level / 100;
193         unsigned long skewed_speed = balanced_speed / 2;
194         unsigned int nr_cpus = num_online_cpus();
195         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
196         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
197
198         /* balanced: freq targets for all CPUs are above 50% of highest speed
199            biased: freq target for at least one CPU is below 50% threshold
200            skewed: freq targets for at least 2 CPUs are below 25% threshold */
201         if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
202              tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
203              (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
204             (nr_cpus > min_cpus))
205                 return TEGRA_CPU_SPEED_SKEWED;
206
207         if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
208              (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
209              (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
210             (nr_cpus >= min_cpus))
211                 return TEGRA_CPU_SPEED_BIASED;
212
213         return TEGRA_CPU_SPEED_BALANCED;
214 }
215
216 static void tegra_auto_hotplug_work_func(struct work_struct *work)
217 {
218         bool up = false;
219         unsigned int cpu = nr_cpu_ids;
220         unsigned long now = jiffies;
221         static unsigned long last_change_time;
222
223         mutex_lock(tegra3_cpu_lock);
224
225         switch (hp_state) {
226         case TEGRA_HP_DISABLED:
227         case TEGRA_HP_IDLE:
228                 break;
229         case TEGRA_HP_DOWN:
230                 cpu = tegra_get_slowest_cpu_n();
231                 if (cpu < nr_cpu_ids) {
232                         up = false;
233                 } else if (!is_lp_cluster() && !no_lp) {
234                         if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
235                                 hp_stats_update(CONFIG_NR_CPUS, true);
236                                 hp_stats_update(0, false);
237                                 /* catch-up with governor target speed */
238                                 tegra_cpu_set_speed_cap(NULL);
239                                 break;
240                         }
241                 }
242                 queue_delayed_work(
243                         hotplug_wq, &hotplug_work, down_delay);
244                 break;
245         case TEGRA_HP_UP:
246                 if (is_lp_cluster() && !no_lp) {
247                         if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
248                                 hp_stats_update(CONFIG_NR_CPUS, false);
249                                 hp_stats_update(0, true);
250                                 /* catch-up with governor target speed */
251                                 tegra_cpu_set_speed_cap(NULL);
252                         }
253                 } else {
254                         switch (tegra_cpu_speed_balance()) {
255                         /* cpu speed is up and balanced - one more on-line */
256                         case TEGRA_CPU_SPEED_BALANCED:
257                                 cpu = cpumask_next_zero(0, cpu_online_mask);
258                                 if (cpu < nr_cpu_ids)
259                                         up = true;
260                                 break;
261                         /* cpu speed is up, but skewed - remove one core */
262                         case TEGRA_CPU_SPEED_SKEWED:
263                                 cpu = tegra_get_slowest_cpu_n();
264                                 if (cpu < nr_cpu_ids)
265                                         up = false;
266                                 break;
267                         /* cpu speed is up, but under-utilized - do nothing */
268                         case TEGRA_CPU_SPEED_BIASED:
269                         default:
270                                 break;
271                         }
272                 }
273                 queue_delayed_work(
274                         hotplug_wq, &hotplug_work, up2gn_delay);
275                 break;
276         default:
277                 pr_err("%s: invalid tegra hotplug state %d\n",
278                        __func__, hp_state);
279         }
280
281         if (!up && ((now - last_change_time) < down_delay))
282                         cpu = nr_cpu_ids;
283
284         if (cpu < nr_cpu_ids) {
285                 last_change_time = now;
286                 hp_stats_update(cpu, up);
287         }
288         mutex_unlock(tegra3_cpu_lock);
289
290         if (cpu < nr_cpu_ids) {
291                 if (up)
292                         cpu_up(cpu);
293                 else
294                         cpu_down(cpu);
295         }
296 }
297
298 static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
299 {
300         mutex_lock(tegra3_cpu_lock);
301
302         if ((n >= 2) && is_lp_cluster()) {
303                 /* make sure cpu rate is within g-mode range before switching */
304                 unsigned int speed = max(
305                         tegra_getspeed(0), clk_get_min_rate(cpu_g_clk) / 1000);
306                 tegra_update_cpu_speed(speed);
307
308                 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
309                         hp_stats_update(CONFIG_NR_CPUS, false);
310                         hp_stats_update(0, true);
311                 }
312         }
313         /* update governor state machine */
314         tegra_cpu_set_speed_cap(NULL);
315         mutex_unlock(tegra3_cpu_lock);
316         return NOTIFY_OK;
317 }
318
319 static struct notifier_block min_cpus_notifier = {
320         .notifier_call = min_cpus_notify,
321 };
322
323 void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
324 {
325         unsigned long up_delay, top_freq, bottom_freq;
326
327         if (!is_g_cluster_present())
328                 return;
329
330         if (suspend && (hp_state != TEGRA_HP_DISABLED)) {
331                 hp_state = TEGRA_HP_IDLE;
332
333                 /* Switch to G-mode if suspend rate is high enough */
334                 if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
335                         if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
336                                 hp_stats_update(CONFIG_NR_CPUS, false);
337                                 hp_stats_update(0, true);
338                         }
339                 }
340                 return;
341         }
342
343         if (is_lp_cluster()) {
344                 up_delay = up2g0_delay;
345                 top_freq = idle_top_freq;
346                 bottom_freq = 0;
347         } else {
348                 up_delay = up2gn_delay;
349                 top_freq = idle_bottom_freq;
350                 bottom_freq = idle_bottom_freq;
351         }
352
353         if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
354                 if (hp_state != TEGRA_HP_UP) {
355                         hp_state = TEGRA_HP_UP;
356                         queue_delayed_work(
357                                 hotplug_wq, &hotplug_work, up_delay);
358                 }
359                 return;
360         }
361
362         switch (hp_state) {
363         case TEGRA_HP_DISABLED:
364                 break;
365         case TEGRA_HP_IDLE:
366                 if (cpu_freq > top_freq) {
367                         hp_state = TEGRA_HP_UP;
368                         queue_delayed_work(
369                                 hotplug_wq, &hotplug_work, up_delay);
370                 } else if (cpu_freq <= bottom_freq) {
371                         hp_state = TEGRA_HP_DOWN;
372                         queue_delayed_work(
373                                 hotplug_wq, &hotplug_work, down_delay);
374                 }
375                 break;
376         case TEGRA_HP_DOWN:
377                 if (cpu_freq > top_freq) {
378                         hp_state = TEGRA_HP_UP;
379                         queue_delayed_work(
380                                 hotplug_wq, &hotplug_work, up_delay);
381                 } else if (cpu_freq > bottom_freq) {
382                         hp_state = TEGRA_HP_IDLE;
383                 }
384                 break;
385         case TEGRA_HP_UP:
386                 if (cpu_freq <= bottom_freq) {
387                         hp_state = TEGRA_HP_DOWN;
388                         queue_delayed_work(
389                                 hotplug_wq, &hotplug_work, down_delay);
390                 } else if (cpu_freq <= top_freq) {
391                         hp_state = TEGRA_HP_IDLE;
392                 }
393                 break;
394         default:
395                 pr_err("%s: invalid tegra hotplug state %d\n",
396                        __func__, hp_state);
397                 BUG();
398         }
399 }
400
401 int tegra_auto_hotplug_init(struct mutex *cpu_lock)
402 {
403         /*
404          * Not bound to the issuer CPU (=> high-priority), has rescue worker
405          * task, single-threaded, freezable.
406          */
407         hotplug_wq = alloc_workqueue(
408                 "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
409         if (!hotplug_wq)
410                 return -ENOMEM;
411         INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
412
413         cpu_clk = clk_get_sys(NULL, "cpu");
414         cpu_g_clk = clk_get_sys(NULL, "cpu_g");
415         cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
416         if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
417                 return -ENOENT;
418
419         idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
420         idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
421
422         up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
423         up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
424         down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
425
426         tegra3_cpu_lock = cpu_lock;
427         hp_state = INITIAL_STATE;
428         hp_init_stats();
429         pr_info("Tegra auto-hotplug initialized: %s\n",
430                 (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
431
432         if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
433                 pr_err("%s: Failed to register min cpus PM QoS notifier\n",
434                         __func__);
435
436         return 0;
437 }
438
439 #ifdef CONFIG_DEBUG_FS
440
441 static struct dentry *hp_debugfs_root;
442
443 struct pm_qos_request min_cpu_req;
444 struct pm_qos_request max_cpu_req;
445
446 static int hp_stats_show(struct seq_file *s, void *data)
447 {
448         int i;
449         u64 cur_jiffies = get_jiffies_64();
450
451         mutex_lock(tegra3_cpu_lock);
452         if (hp_state != TEGRA_HP_DISABLED) {
453                 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
454                         bool was_up = (hp_stats[i].up_down_count & 0x1);
455                         hp_stats_update(i, was_up);
456                 }
457         }
458         mutex_unlock(tegra3_cpu_lock);
459
460         seq_printf(s, "%-15s ", "cpu:");
461         for (i = 0; i < CONFIG_NR_CPUS; i++) {
462                 seq_printf(s, "G%-9d ", i);
463         }
464         seq_printf(s, "LP\n");
465
466         seq_printf(s, "%-15s ", "transitions:");
467         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
468                 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
469         }
470         seq_printf(s, "\n");
471
472         seq_printf(s, "%-15s ", "time plugged:");
473         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
474                 seq_printf(s, "%-10llu ",
475                            cputime64_to_clock_t(hp_stats[i].time_up_total));
476         }
477         seq_printf(s, "\n");
478
479         seq_printf(s, "%-15s %llu\n", "time-stamp:",
480                    cputime64_to_clock_t(cur_jiffies));
481
482         return 0;
483 }
484
485 static int hp_stats_open(struct inode *inode, struct file *file)
486 {
487         return single_open(file, hp_stats_show, inode->i_private);
488 }
489
490 static const struct file_operations hp_stats_fops = {
491         .open           = hp_stats_open,
492         .read           = seq_read,
493         .llseek         = seq_lseek,
494         .release        = single_release,
495 };
496
497 static int min_cpus_get(void *data, u64 *val)
498 {
499         *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
500         return 0;
501 }
502 static int min_cpus_set(void *data, u64 val)
503 {
504         pm_qos_update_request(&min_cpu_req, (s32)val);
505         return 0;
506 }
507 DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n");
508
509 static int max_cpus_get(void *data, u64 *val)
510 {
511         *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
512         return 0;
513 }
514 static int max_cpus_set(void *data, u64 val)
515 {
516         pm_qos_update_request(&max_cpu_req, (s32)val);
517         return 0;
518 }
519 DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
520
521 static int __init tegra_auto_hotplug_debug_init(void)
522 {
523         if (!tegra3_cpu_lock)
524                 return -ENOENT;
525
526         hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
527         if (!hp_debugfs_root)
528                 return -ENOMEM;
529
530         pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS,
531                            PM_QOS_DEFAULT_VALUE);
532         pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
533                            PM_QOS_DEFAULT_VALUE);
534
535         if (!debugfs_create_file(
536                 "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops))
537                 goto err_out;
538
539         if (!debugfs_create_file(
540                 "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
541                 goto err_out;
542
543         if (!debugfs_create_file(
544                 "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
545                 goto err_out;
546
547         return 0;
548
549 err_out:
550         debugfs_remove_recursive(hp_debugfs_root);
551         pm_qos_remove_request(&min_cpu_req);
552         pm_qos_remove_request(&max_cpu_req);
553         return -ENOMEM;
554 }
555
556 late_initcall(tegra_auto_hotplug_debug_init);
557 #endif
558
559 void tegra_auto_hotplug_exit(void)
560 {
561         destroy_workqueue(hotplug_wq);
562 #ifdef CONFIG_DEBUG_FS
563         debugfs_remove_recursive(hp_debugfs_root);
564         pm_qos_remove_request(&min_cpu_req);
565         pm_qos_remove_request(&max_cpu_req);
566 #endif
567 }