ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / cpu-tegra3.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra3.c
3  *
4  * CPU auto-hotplug for Tegra3 CPUs
5  *
6  * Copyright (c) 2011-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/cpu.h>
32 #include <linux/clk.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/pm_qos.h>
36
37 #include "pm.h"
38 #include "cpu-tegra.h"
39 #include "clock.h"
40
41 #define INITIAL_STATE           TEGRA_HP_DISABLED
42 #define UP2G0_DELAY_MS          70
43 #define UP2Gn_DELAY_MS          100
44 #define DOWN_DELAY_MS           2000
45
46 /* tegra3_cpu_lock is tegra_cpu_lock from cpu-tegra.c */
47 static struct mutex *tegra3_cpu_lock;
48
49 static struct workqueue_struct *hotplug_wq;
50 static struct delayed_work hotplug_work;
51
52 static bool no_lp;
53 module_param(no_lp, bool, 0644);
54
55 static unsigned long up2gn_delay;
56 static unsigned long up2g0_delay;
57 static unsigned long down_delay;
58 module_param(up2gn_delay, ulong, 0644);
59 module_param(up2g0_delay, ulong, 0644);
60 module_param(down_delay, ulong, 0644);
61
62 static unsigned int idle_top_freq;
63 static unsigned int idle_bottom_freq;
64 module_param(idle_top_freq, uint, 0644);
65 module_param(idle_bottom_freq, uint, 0644);
66
67 static int mp_overhead = 10;
68 module_param(mp_overhead, int, 0644);
69
70 static int balance_level = 60;
71 module_param(balance_level, int, 0644);
72
73 static struct clk *cpu_clk;
74 static struct clk *cpu_g_clk;
75 static struct clk *cpu_lp_clk;
76
77 static unsigned long last_change_time;
78
79 static struct {
80         cputime64_t time_up_total;
81         u64 last_update;
82         unsigned int up_down_count;
83 } hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
84
85 static void hp_init_stats(void)
86 {
87         int i;
88         u64 cur_jiffies = get_jiffies_64();
89
90         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
91                 hp_stats[i].time_up_total = 0;
92                 hp_stats[i].last_update = cur_jiffies;
93
94                 hp_stats[i].up_down_count = 0;
95                 if (is_lp_cluster()) {
96                         if (i == CONFIG_NR_CPUS)
97                                 hp_stats[i].up_down_count = 1;
98                 } else {
99                         if ((i < nr_cpu_ids) && cpu_online(i))
100                                 hp_stats[i].up_down_count = 1;
101                 }
102         }
103
104 }
105
106 static void hp_stats_update(unsigned int cpu, bool up)
107 {
108         u64 cur_jiffies = get_jiffies_64();
109         bool was_up = hp_stats[cpu].up_down_count & 0x1;
110
111         if (was_up)
112                 hp_stats[cpu].time_up_total =
113                         hp_stats[cpu].time_up_total +
114                         (cur_jiffies - hp_stats[cpu].last_update);
115
116         if (was_up != up) {
117                 hp_stats[cpu].up_down_count++;
118                 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
119                         /* FIXME: sysfs user space CPU control breaks stats */
120                         pr_err("tegra hotplug stats out of sync with %s CPU%d",
121                                (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
122                                (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
123                         hp_stats[cpu].up_down_count ^=  0x1;
124                 }
125         }
126         hp_stats[cpu].last_update = cur_jiffies;
127 }
128
129
130 enum {
131         TEGRA_HP_DISABLED = 0,
132         TEGRA_HP_IDLE,
133         TEGRA_HP_DOWN,
134         TEGRA_HP_UP,
135 };
136 static int hp_state;
137
138 static int hp_state_set(const char *arg, const struct kernel_param *kp)
139 {
140         int ret = 0;
141         int old_state;
142
143         if (!tegra3_cpu_lock)
144                 return ret;
145
146         mutex_lock(tegra3_cpu_lock);
147
148         old_state = hp_state;
149         ret = param_set_bool(arg, kp);  /* set idle or disabled only */
150
151         if (ret == 0) {
152                 if ((hp_state == TEGRA_HP_DISABLED) &&
153                     (old_state != TEGRA_HP_DISABLED)) {
154                         mutex_unlock(tegra3_cpu_lock);
155                         cancel_delayed_work_sync(&hotplug_work);
156                         mutex_lock(tegra3_cpu_lock);
157                         pr_info("Tegra auto-hotplug disabled\n");
158                 } else if (hp_state != TEGRA_HP_DISABLED) {
159                         if (old_state == TEGRA_HP_DISABLED) {
160                                 pr_info("Tegra auto-hotplug enabled\n");
161                                 hp_init_stats();
162                         }
163                         /* catch-up with governor target speed */
164                         tegra_cpu_set_speed_cap_locked(NULL);
165                 }
166         } else
167                 pr_warn("%s: unable to set tegra hotplug state %s\n",
168                                 __func__, arg);
169
170         mutex_unlock(tegra3_cpu_lock);
171         return ret;
172 }
173
174 static int hp_state_get(char *buffer, const struct kernel_param *kp)
175 {
176         return param_get_int(buffer, kp);
177 }
178
179 static struct kernel_param_ops tegra_hp_state_ops = {
180         .set = hp_state_set,
181         .get = hp_state_get,
182 };
183 module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
184
185
186 enum {
187         TEGRA_CPU_SPEED_BALANCED,
188         TEGRA_CPU_SPEED_BIASED,
189         TEGRA_CPU_SPEED_SKEWED,
190 };
191
192 #define NR_FSHIFT       2
193
194 static unsigned int rt_profile_sel;
195
196 /* avg run threads * 4 (e.g., 9 = 2.25 threads) */
197
198 static unsigned int rt_profile_default[] = {
199 /*      1,  2,  3,  4 - on-line cpus target */
200         5,  9, 10, UINT_MAX
201 };
202
203 static unsigned int rt_profile_1[] = {
204 /*      1,  2,  3,  4 - on-line cpus target */
205         8,  9, 10, UINT_MAX
206 };
207
208 static unsigned int rt_profile_2[] = {
209 /*      1,  2,  3,  4 - on-line cpus target */
210         5,  13, 14, UINT_MAX
211 };
212
213 static unsigned int rt_profile_off[] = { /* disables runable thread */
214         0,  0,  0, UINT_MAX
215 };
216
217 static unsigned int *rt_profiles[] = {
218         rt_profile_default,
219         rt_profile_1,
220         rt_profile_2,
221         rt_profile_off
222 };
223
224
225 static unsigned int nr_run_hysteresis = 2;      /* 0.5 thread */
226 static unsigned int nr_run_last;
227
228 static noinline int tegra_cpu_speed_balance(void)
229 {
230         unsigned long highest_speed = tegra_cpu_highest_speed();
231         unsigned long balanced_speed = highest_speed * balance_level / 100;
232         unsigned long skewed_speed = balanced_speed / 2;
233         unsigned int nr_cpus = num_online_cpus();
234         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
235         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
236         unsigned int avg_nr_run = avg_nr_running();
237         unsigned int nr_run;
238
239         /* Evaluate:
240          * - distribution of freq targets for already on-lined CPUs
241          * - average number of runnable threads
242          * - effective MIPS available within EDP frequency limits,
243          * and return:
244          * TEGRA_CPU_SPEED_BALANCED to bring one more CPU core on-line
245          * TEGRA_CPU_SPEED_BIASED to keep CPU core composition unchanged
246          * TEGRA_CPU_SPEED_SKEWED to remove CPU core off-line
247          */
248
249         unsigned int *current_profile = rt_profiles[rt_profile_sel];
250         for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile_default); nr_run++) {
251                 unsigned int nr_threshold = current_profile[nr_run - 1];
252                 if (nr_run_last <= nr_run)
253                         nr_threshold += nr_run_hysteresis;
254                 if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
255                         break;
256         }
257         nr_run_last = nr_run;
258
259         if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
260              (nr_run < nr_cpus) ||
261              tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
262              (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
263             (nr_cpus > min_cpus))
264                 return TEGRA_CPU_SPEED_SKEWED;
265
266         if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
267              (nr_run <= nr_cpus) ||
268              (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
269              (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
270             (nr_cpus >= min_cpus))
271                 return TEGRA_CPU_SPEED_BIASED;
272
273         return TEGRA_CPU_SPEED_BALANCED;
274 }
275
276 static void __cpuinit tegra_auto_hotplug_work_func(struct work_struct *work)
277 {
278         bool up = false;
279         unsigned int cpu = nr_cpu_ids;
280         unsigned long now = jiffies;
281
282         mutex_lock(tegra3_cpu_lock);
283
284         switch (hp_state) {
285         case TEGRA_HP_DISABLED:
286         case TEGRA_HP_IDLE:
287                 break;
288         case TEGRA_HP_DOWN:
289                 cpu = tegra_get_slowest_cpu_n();
290                 if (cpu < nr_cpu_ids) {
291                         up = false;
292                 } else if (!is_lp_cluster() && !no_lp &&
293                            !pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) &&
294                            ((now - last_change_time) >= down_delay)) {
295                         if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
296                                 hp_stats_update(CONFIG_NR_CPUS, true);
297                                 hp_stats_update(0, false);
298                                 /* catch-up with governor target speed */
299                                 tegra_cpu_set_speed_cap_locked(NULL);
300                                 break;
301                         }
302                 }
303                 queue_delayed_work(
304                         hotplug_wq, &hotplug_work, up2gn_delay);
305                 break;
306         case TEGRA_HP_UP:
307                 if (is_lp_cluster() && !no_lp) {
308                         if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
309                                 last_change_time = now;
310                                 hp_stats_update(CONFIG_NR_CPUS, false);
311                                 hp_stats_update(0, true);
312                                 /* catch-up with governor target speed */
313                                 tegra_cpu_set_speed_cap_locked(NULL);
314                         }
315                 } else {
316                         switch (tegra_cpu_speed_balance()) {
317                         /* cpu speed is up and balanced - one more on-line */
318                         case TEGRA_CPU_SPEED_BALANCED:
319                                 cpu = cpumask_next_zero(0, cpu_online_mask);
320                                 if (cpu < nr_cpu_ids)
321                                         up = true;
322                                 break;
323                         /* cpu speed is up, but skewed - remove one core */
324                         case TEGRA_CPU_SPEED_SKEWED:
325                                 cpu = tegra_get_slowest_cpu_n();
326                                 if (cpu < nr_cpu_ids)
327                                         up = false;
328                                 break;
329                         /* cpu speed is up, but under-utilized - do nothing */
330                         case TEGRA_CPU_SPEED_BIASED:
331                         default:
332                                 break;
333                         }
334                 }
335                 queue_delayed_work(
336                         hotplug_wq, &hotplug_work, up2gn_delay);
337                 break;
338         default:
339                 pr_err("%s: invalid tegra hotplug state %d\n",
340                        __func__, hp_state);
341         }
342
343         if (!up && ((now - last_change_time) < down_delay))
344                         cpu = nr_cpu_ids;
345
346         if (cpu < nr_cpu_ids) {
347                 last_change_time = now;
348                 hp_stats_update(cpu, up);
349         }
350         mutex_unlock(tegra3_cpu_lock);
351
352         if (cpu < nr_cpu_ids) {
353                 if (up)
354                         cpu_up(cpu);
355                 else
356                         cpu_down(cpu);
357         }
358 }
359
360 static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
361 {
362         mutex_lock(tegra3_cpu_lock);
363
364         if ((n >= 1) && is_lp_cluster() && !no_lp) {
365                 /* make sure cpu rate is within g-mode range before switching */
366                 unsigned int speed = max((unsigned long)tegra_getspeed(0),
367                         clk_get_min_rate(cpu_g_clk) / 1000);
368                 tegra_update_cpu_speed(speed);
369
370                 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
371                         last_change_time = jiffies;
372                         hp_stats_update(CONFIG_NR_CPUS, false);
373                         hp_stats_update(0, true);
374                 }
375         }
376         /* update governor state machine */
377         tegra_cpu_set_speed_cap_locked(NULL);
378         mutex_unlock(tegra3_cpu_lock);
379         return NOTIFY_OK;
380 }
381
382 static struct notifier_block min_cpus_notifier = {
383         .notifier_call = min_cpus_notify,
384 };
385
386 void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
387 {
388         unsigned long up_delay, top_freq, bottom_freq;
389
390         if (!is_g_cluster_present())
391                 return;
392
393         if (hp_state == TEGRA_HP_DISABLED)
394                 return;
395
396         if (suspend) {
397                 hp_state = TEGRA_HP_IDLE;
398
399                 /* Switch to G-mode if suspend rate is high enough */
400                 if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
401                         if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
402                                 hp_stats_update(CONFIG_NR_CPUS, false);
403                                 hp_stats_update(0, true);
404                         }
405                 }
406                 return;
407         }
408
409         if (is_lp_cluster()) {
410                 up_delay = up2g0_delay;
411                 top_freq = idle_top_freq;
412                 bottom_freq = 0;
413         } else {
414                 up_delay = up2gn_delay;
415                 top_freq = idle_bottom_freq;
416                 bottom_freq = idle_bottom_freq;
417         }
418
419         if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
420                 if (hp_state != TEGRA_HP_UP) {
421                         hp_state = TEGRA_HP_UP;
422                         queue_delayed_work(
423                                 hotplug_wq, &hotplug_work, up_delay);
424                 }
425                 return;
426         }
427
428         switch (hp_state) {
429         case TEGRA_HP_IDLE:
430                 if (cpu_freq > top_freq) {
431                         hp_state = TEGRA_HP_UP;
432                         queue_delayed_work(
433                                 hotplug_wq, &hotplug_work, up_delay);
434                 } else if (cpu_freq <= bottom_freq) {
435                         hp_state = TEGRA_HP_DOWN;
436                         queue_delayed_work(
437                                 hotplug_wq, &hotplug_work, up_delay);
438                 }
439                 break;
440         case TEGRA_HP_DOWN:
441                 if (cpu_freq > top_freq) {
442                         hp_state = TEGRA_HP_UP;
443                         queue_delayed_work(
444                                 hotplug_wq, &hotplug_work, up_delay);
445                 } else if (cpu_freq > bottom_freq) {
446                         hp_state = TEGRA_HP_IDLE;
447                 }
448                 break;
449         case TEGRA_HP_UP:
450                 if (cpu_freq <= bottom_freq) {
451                         hp_state = TEGRA_HP_DOWN;
452                         queue_delayed_work(
453                                 hotplug_wq, &hotplug_work, up_delay);
454                 } else if (cpu_freq <= top_freq) {
455                         hp_state = TEGRA_HP_IDLE;
456                 }
457                 break;
458         default:
459                 pr_err("%s: invalid tegra hotplug state %d\n",
460                        __func__, hp_state);
461                 BUG();
462         }
463 }
464
465 int __cpuinit tegra_auto_hotplug_init(struct mutex *cpu_lock)
466 {
467         /*
468          * Not bound to the issuer CPU (=> high-priority), has rescue worker
469          * task, single-threaded, freezable.
470          */
471         hotplug_wq = alloc_workqueue(
472                 "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
473         if (!hotplug_wq)
474                 return -ENOMEM;
475         INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
476
477         cpu_clk = clk_get_sys(NULL, "cpu");
478         cpu_g_clk = clk_get_sys(NULL, "cpu_g");
479         cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
480         if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
481                 return -ENOENT;
482
483         idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
484         idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
485
486         up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
487         up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
488         down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
489
490         tegra3_cpu_lock = cpu_lock;
491         hp_state = INITIAL_STATE;
492         hp_init_stats();
493         pr_info("Tegra auto-hotplug initialized: %s\n",
494                 (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
495
496         if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
497                 pr_err("%s: Failed to register min cpus PM QoS notifier\n",
498                         __func__);
499
500         return 0;
501 }
502
503 #ifdef CONFIG_DEBUG_FS
504
505 static struct dentry *hp_debugfs_root;
506
507 struct pm_qos_request min_cpu_req;
508 struct pm_qos_request max_cpu_req;
509
510 static int hp_stats_show(struct seq_file *s, void *data)
511 {
512         int i;
513         u64 cur_jiffies = get_jiffies_64();
514
515         mutex_lock(tegra3_cpu_lock);
516         if (hp_state != TEGRA_HP_DISABLED) {
517                 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
518                         bool was_up = (hp_stats[i].up_down_count & 0x1);
519                         hp_stats_update(i, was_up);
520                 }
521         }
522         mutex_unlock(tegra3_cpu_lock);
523
524         seq_printf(s, "%-15s ", "cpu:");
525         for (i = 0; i < CONFIG_NR_CPUS; i++) {
526                 seq_printf(s, "G%-9d ", i);
527         }
528         seq_printf(s, "LP\n");
529
530         seq_printf(s, "%-15s ", "transitions:");
531         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
532                 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
533         }
534         seq_printf(s, "\n");
535
536         seq_printf(s, "%-15s ", "time plugged:");
537         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
538                 seq_printf(s, "%-10llu ",
539                            cputime64_to_clock_t(hp_stats[i].time_up_total));
540         }
541         seq_printf(s, "\n");
542
543         seq_printf(s, "%-15s %llu\n", "time-stamp:",
544                    cputime64_to_clock_t(cur_jiffies));
545
546         return 0;
547 }
548
549 static int hp_stats_open(struct inode *inode, struct file *file)
550 {
551         return single_open(file, hp_stats_show, inode->i_private);
552 }
553
554 static const struct file_operations hp_stats_fops = {
555         .open           = hp_stats_open,
556         .read           = seq_read,
557         .llseek         = seq_lseek,
558         .release        = single_release,
559 };
560
561 static int rt_bias_get(void *data, u64 *val)
562 {
563         *val = rt_profile_sel;
564         return 0;
565 }
566 static int rt_bias_set(void *data, u64 val)
567 {
568         if (val < ARRAY_SIZE(rt_profiles))
569                 rt_profile_sel = (u32)val;
570
571         pr_debug("rt_profile_sel set to %d\nthresholds are now [%d, %d, %d]\n",
572                 rt_profile_sel,
573                 rt_profiles[rt_profile_sel][0],
574                 rt_profiles[rt_profile_sel][1],
575                 rt_profiles[rt_profile_sel][2]);
576         return 0;
577 }
578 DEFINE_SIMPLE_ATTRIBUTE(rt_bias_fops, rt_bias_get, rt_bias_set, "%llu\n");
579
580 static int min_cpus_get(void *data, u64 *val)
581 {
582         *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
583         return 0;
584 }
585 static int min_cpus_set(void *data, u64 val)
586 {
587         pm_qos_update_request(&min_cpu_req, (s32)val);
588         return 0;
589 }
590 DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n");
591
592 static int max_cpus_get(void *data, u64 *val)
593 {
594         *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
595         return 0;
596 }
597 static int max_cpus_set(void *data, u64 val)
598 {
599         pm_qos_update_request(&max_cpu_req, (s32)val);
600         return 0;
601 }
602 DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
603
604 static int __init tegra_auto_hotplug_debug_init(void)
605 {
606         if (!tegra3_cpu_lock)
607                 return -ENOENT;
608
609         hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
610         if (!hp_debugfs_root)
611                 return -ENOMEM;
612
613         pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS,
614                            PM_QOS_DEFAULT_VALUE);
615         pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
616                            PM_QOS_DEFAULT_VALUE);
617
618         if (!debugfs_create_file(
619                 "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops))
620                 goto err_out;
621
622         if (!debugfs_create_file(
623                 "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
624                 goto err_out;
625
626         if (!debugfs_create_file(
627                 "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
628                 goto err_out;
629
630         if (!debugfs_create_file(
631                 "core_bias", S_IRUGO, hp_debugfs_root, NULL, &rt_bias_fops))
632                 goto err_out;
633
634         return 0;
635
636 err_out:
637         debugfs_remove_recursive(hp_debugfs_root);
638         pm_qos_remove_request(&min_cpu_req);
639         pm_qos_remove_request(&max_cpu_req);
640         return -ENOMEM;
641 }
642
643 late_initcall(tegra_auto_hotplug_debug_init);
644 #endif
645
646 void tegra_auto_hotplug_exit(void)
647 {
648         destroy_workqueue(hotplug_wq);
649 #ifdef CONFIG_DEBUG_FS
650         debugfs_remove_recursive(hp_debugfs_root);
651         pm_qos_remove_request(&min_cpu_req);
652         pm_qos_remove_request(&max_cpu_req);
653 #endif
654 }