Merge commit 'main-jb-2012.08.03-B4' into t114-0806
[linux-2.6.git] / arch / arm / mach-tegra / cpu-tegra3.c
1 /*
2  * arch/arm/mach-tegra/cpu-tegra3.c
3  *
4  * CPU auto-hotplug for Tegra3 CPUs
5  *
6  * Copyright (c) 2011-2012, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/cpufreq.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/cpu.h>
32 #include <linux/clk.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include <linux/pm_qos.h>
36
37 #include "pm.h"
38 #include "cpu-tegra.h"
39 #include "clock.h"
40
41 #define INITIAL_STATE           TEGRA_HP_DISABLED
42 #define UP2G0_DELAY_MS          70
43 #define UP2Gn_DELAY_MS          100
44 #define DOWN_DELAY_MS           2000
45
46 static struct mutex *tegra3_cpu_lock;
47
48 static struct workqueue_struct *hotplug_wq;
49 static struct delayed_work hotplug_work;
50
51 static bool no_lp;
52 module_param(no_lp, bool, 0644);
53
54 static unsigned long up2gn_delay;
55 static unsigned long up2g0_delay;
56 static unsigned long down_delay;
57 module_param(up2gn_delay, ulong, 0644);
58 module_param(up2g0_delay, ulong, 0644);
59 module_param(down_delay, ulong, 0644);
60
61 static unsigned int idle_top_freq;
62 static unsigned int idle_bottom_freq;
63 module_param(idle_top_freq, uint, 0644);
64 module_param(idle_bottom_freq, uint, 0644);
65
66 static int mp_overhead = 10;
67 module_param(mp_overhead, int, 0644);
68
69 static int balance_level = 60;
70 module_param(balance_level, int, 0644);
71
72 static struct clk *cpu_clk;
73 static struct clk *cpu_g_clk;
74 static struct clk *cpu_lp_clk;
75
76 static unsigned long last_change_time;
77
78 static struct {
79         cputime64_t time_up_total;
80         u64 last_update;
81         unsigned int up_down_count;
82 } hp_stats[CONFIG_NR_CPUS + 1]; /* Append LP CPU entry at the end */
83
84 static void hp_init_stats(void)
85 {
86         int i;
87         u64 cur_jiffies = get_jiffies_64();
88
89         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
90                 hp_stats[i].time_up_total = 0;
91                 hp_stats[i].last_update = cur_jiffies;
92
93                 hp_stats[i].up_down_count = 0;
94                 if (is_lp_cluster()) {
95                         if (i == CONFIG_NR_CPUS)
96                                 hp_stats[i].up_down_count = 1;
97                 } else {
98                         if ((i < nr_cpu_ids) && cpu_online(i))
99                                 hp_stats[i].up_down_count = 1;
100                 }
101         }
102
103 }
104
105 static void hp_stats_update(unsigned int cpu, bool up)
106 {
107         u64 cur_jiffies = get_jiffies_64();
108         bool was_up = hp_stats[cpu].up_down_count & 0x1;
109
110         if (was_up)
111                 hp_stats[cpu].time_up_total =
112                         hp_stats[cpu].time_up_total +
113                         (cur_jiffies - hp_stats[cpu].last_update);
114
115         if (was_up != up) {
116                 hp_stats[cpu].up_down_count++;
117                 if ((hp_stats[cpu].up_down_count & 0x1) != up) {
118                         /* FIXME: sysfs user space CPU control breaks stats */
119                         pr_err("tegra hotplug stats out of sync with %s CPU%d",
120                                (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
121                                (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
122                         hp_stats[cpu].up_down_count ^=  0x1;
123                 }
124         }
125         hp_stats[cpu].last_update = cur_jiffies;
126 }
127
128
129 enum {
130         TEGRA_HP_DISABLED = 0,
131         TEGRA_HP_IDLE,
132         TEGRA_HP_DOWN,
133         TEGRA_HP_UP,
134 };
135 static int hp_state;
136
137 static int hp_state_set(const char *arg, const struct kernel_param *kp)
138 {
139         int ret = 0;
140         int old_state;
141
142         if (!tegra3_cpu_lock)
143                 return ret;
144
145         mutex_lock(tegra3_cpu_lock);
146
147         old_state = hp_state;
148         ret = param_set_bool(arg, kp);  /* set idle or disabled only */
149
150         if (ret == 0) {
151                 if ((hp_state == TEGRA_HP_DISABLED) &&
152                     (old_state != TEGRA_HP_DISABLED)) {
153                         mutex_unlock(tegra3_cpu_lock);
154                         cancel_delayed_work_sync(&hotplug_work);
155                         mutex_lock(tegra3_cpu_lock);
156                         pr_info("Tegra auto-hotplug disabled\n");
157                 } else if (hp_state != TEGRA_HP_DISABLED) {
158                         if (old_state == TEGRA_HP_DISABLED) {
159                                 pr_info("Tegra auto-hotplug enabled\n");
160                                 hp_init_stats();
161                         }
162                         /* catch-up with governor target speed */
163                         tegra_cpu_set_speed_cap(NULL);
164                 }
165         } else
166                 pr_warn("%s: unable to set tegra hotplug state %s\n",
167                                 __func__, arg);
168
169         mutex_unlock(tegra3_cpu_lock);
170         return ret;
171 }
172
173 static int hp_state_get(char *buffer, const struct kernel_param *kp)
174 {
175         return param_get_int(buffer, kp);
176 }
177
178 static struct kernel_param_ops tegra_hp_state_ops = {
179         .set = hp_state_set,
180         .get = hp_state_get,
181 };
182 module_param_cb(auto_hotplug, &tegra_hp_state_ops, &hp_state, 0644);
183
184
185 enum {
186         TEGRA_CPU_SPEED_BALANCED,
187         TEGRA_CPU_SPEED_BIASED,
188         TEGRA_CPU_SPEED_SKEWED,
189 };
190
191 #define NR_FSHIFT       2
192
193 static unsigned int rt_profile_sel;
194
195 /* avg run threads * 4 (e.g., 9 = 2.25 threads) */
196
197 static unsigned int rt_profile_default[] = {
198 /*      1,  2,  3,  4 - on-line cpus target */
199         5,  9, 10, UINT_MAX
200 };
201
202 static unsigned int rt_profile_1[] = {
203 /*      1,  2,  3,  4 - on-line cpus target */
204         8,  9, 10, UINT_MAX
205 };
206
207 static unsigned int rt_profile_2[] = {
208 /*      1,  2,  3,  4 - on-line cpus target */
209         5,  13, 14, UINT_MAX
210 };
211
212 static unsigned int rt_profile_off[] = { /* disables runable thread */
213         0,  0,  0, UINT_MAX
214 };
215
216 static unsigned int *rt_profiles[] = {
217         rt_profile_default,
218         rt_profile_1,
219         rt_profile_2,
220         rt_profile_off
221 };
222
223
224 static unsigned int nr_run_hysteresis = 2;      /* 0.5 thread */
225 static unsigned int nr_run_last;
226
227 static noinline int tegra_cpu_speed_balance(void)
228 {
229         unsigned long highest_speed = tegra_cpu_highest_speed();
230         unsigned long balanced_speed = highest_speed * balance_level / 100;
231         unsigned long skewed_speed = balanced_speed / 2;
232         unsigned int nr_cpus = num_online_cpus();
233         unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
234         unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
235         unsigned int avg_nr_run = avg_nr_running();
236         unsigned int nr_run;
237
238         /* Evaluate:
239          * - distribution of freq targets for already on-lined CPUs
240          * - average number of runnable threads
241          * - effective MIPS available within EDP frequency limits,
242          * and return:
243          * TEGRA_CPU_SPEED_BALANCED to bring one more CPU core on-line
244          * TEGRA_CPU_SPEED_BIASED to keep CPU core composition unchanged
245          * TEGRA_CPU_SPEED_SKEWED to remove CPU core off-line
246          */
247
248         unsigned int *current_profile = rt_profiles[rt_profile_sel];
249         for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile_default); nr_run++) {
250                 unsigned int nr_threshold = current_profile[nr_run - 1];
251                 if (nr_run_last <= nr_run)
252                         nr_threshold += nr_run_hysteresis;
253                 if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
254                         break;
255         }
256         nr_run_last = nr_run;
257
258         if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
259              (nr_run < nr_cpus) ||
260              tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
261              (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
262             (nr_cpus > min_cpus))
263                 return TEGRA_CPU_SPEED_SKEWED;
264
265         if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
266              (nr_run <= nr_cpus) ||
267              (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
268              (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
269             (nr_cpus >= min_cpus))
270                 return TEGRA_CPU_SPEED_BIASED;
271
272         return TEGRA_CPU_SPEED_BALANCED;
273 }
274
275 static void __cpuinit tegra_auto_hotplug_work_func(struct work_struct *work)
276 {
277         bool up = false;
278         unsigned int cpu = nr_cpu_ids;
279         unsigned long now = jiffies;
280
281         mutex_lock(tegra3_cpu_lock);
282
283         switch (hp_state) {
284         case TEGRA_HP_DISABLED:
285         case TEGRA_HP_IDLE:
286                 break;
287         case TEGRA_HP_DOWN:
288                 cpu = tegra_get_slowest_cpu_n();
289                 if (cpu < nr_cpu_ids) {
290                         up = false;
291                 } else if (!is_lp_cluster() && !no_lp &&
292                            !pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) &&
293                            ((now - last_change_time) >= down_delay)) {
294                         if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
295                                 hp_stats_update(CONFIG_NR_CPUS, true);
296                                 hp_stats_update(0, false);
297                                 /* catch-up with governor target speed */
298                                 tegra_cpu_set_speed_cap(NULL);
299                                 break;
300                         }
301                 }
302                 queue_delayed_work(
303                         hotplug_wq, &hotplug_work, up2gn_delay);
304                 break;
305         case TEGRA_HP_UP:
306                 if (is_lp_cluster() && !no_lp) {
307                         if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
308                                 last_change_time = now;
309                                 hp_stats_update(CONFIG_NR_CPUS, false);
310                                 hp_stats_update(0, true);
311                                 /* catch-up with governor target speed */
312                                 tegra_cpu_set_speed_cap(NULL);
313                         }
314                 } else {
315                         switch (tegra_cpu_speed_balance()) {
316                         /* cpu speed is up and balanced - one more on-line */
317                         case TEGRA_CPU_SPEED_BALANCED:
318                                 cpu = cpumask_next_zero(0, cpu_online_mask);
319                                 if (cpu < nr_cpu_ids)
320                                         up = true;
321                                 break;
322                         /* cpu speed is up, but skewed - remove one core */
323                         case TEGRA_CPU_SPEED_SKEWED:
324                                 cpu = tegra_get_slowest_cpu_n();
325                                 if (cpu < nr_cpu_ids)
326                                         up = false;
327                                 break;
328                         /* cpu speed is up, but under-utilized - do nothing */
329                         case TEGRA_CPU_SPEED_BIASED:
330                         default:
331                                 break;
332                         }
333                 }
334                 queue_delayed_work(
335                         hotplug_wq, &hotplug_work, up2gn_delay);
336                 break;
337         default:
338                 pr_err("%s: invalid tegra hotplug state %d\n",
339                        __func__, hp_state);
340         }
341
342         if (!up && ((now - last_change_time) < down_delay))
343                         cpu = nr_cpu_ids;
344
345         if (cpu < nr_cpu_ids) {
346                 last_change_time = now;
347                 hp_stats_update(cpu, up);
348         }
349         mutex_unlock(tegra3_cpu_lock);
350
351         if (cpu < nr_cpu_ids) {
352                 if (up)
353                         cpu_up(cpu);
354                 else
355                         cpu_down(cpu);
356         }
357 }
358
359 static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
360 {
361         mutex_lock(tegra3_cpu_lock);
362
363         if ((n >= 1) && is_lp_cluster()) {
364                 /* make sure cpu rate is within g-mode range before switching */
365                 unsigned int speed = max((unsigned long)tegra_getspeed(0),
366                         clk_get_min_rate(cpu_g_clk) / 1000);
367                 tegra_update_cpu_speed(speed);
368
369                 if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
370                         last_change_time = jiffies;
371                         hp_stats_update(CONFIG_NR_CPUS, false);
372                         hp_stats_update(0, true);
373                 }
374         }
375         /* update governor state machine */
376         tegra_cpu_set_speed_cap(NULL);
377         mutex_unlock(tegra3_cpu_lock);
378         return NOTIFY_OK;
379 }
380
381 static struct notifier_block min_cpus_notifier = {
382         .notifier_call = min_cpus_notify,
383 };
384
385 void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
386 {
387         unsigned long up_delay, top_freq, bottom_freq;
388
389         if (!is_g_cluster_present())
390                 return;
391
392         if (hp_state == TEGRA_HP_DISABLED)
393                 return;
394
395         if (suspend) {
396                 hp_state = TEGRA_HP_IDLE;
397
398                 /* Switch to G-mode if suspend rate is high enough */
399                 if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
400                         if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
401                                 hp_stats_update(CONFIG_NR_CPUS, false);
402                                 hp_stats_update(0, true);
403                         }
404                 }
405                 return;
406         }
407
408         if (is_lp_cluster()) {
409                 up_delay = up2g0_delay;
410                 top_freq = idle_top_freq;
411                 bottom_freq = 0;
412         } else {
413                 up_delay = up2gn_delay;
414                 top_freq = idle_bottom_freq;
415                 bottom_freq = idle_bottom_freq;
416         }
417
418         if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
419                 if (hp_state != TEGRA_HP_UP) {
420                         hp_state = TEGRA_HP_UP;
421                         queue_delayed_work(
422                                 hotplug_wq, &hotplug_work, up_delay);
423                 }
424                 return;
425         }
426
427         switch (hp_state) {
428         case TEGRA_HP_IDLE:
429                 if (cpu_freq > top_freq) {
430                         hp_state = TEGRA_HP_UP;
431                         queue_delayed_work(
432                                 hotplug_wq, &hotplug_work, up_delay);
433                 } else if (cpu_freq <= bottom_freq) {
434                         hp_state = TEGRA_HP_DOWN;
435                         queue_delayed_work(
436                                 hotplug_wq, &hotplug_work, up_delay);
437                 }
438                 break;
439         case TEGRA_HP_DOWN:
440                 if (cpu_freq > top_freq) {
441                         hp_state = TEGRA_HP_UP;
442                         queue_delayed_work(
443                                 hotplug_wq, &hotplug_work, up_delay);
444                 } else if (cpu_freq > bottom_freq) {
445                         hp_state = TEGRA_HP_IDLE;
446                 }
447                 break;
448         case TEGRA_HP_UP:
449                 if (cpu_freq <= bottom_freq) {
450                         hp_state = TEGRA_HP_DOWN;
451                         queue_delayed_work(
452                                 hotplug_wq, &hotplug_work, up_delay);
453                 } else if (cpu_freq <= top_freq) {
454                         hp_state = TEGRA_HP_IDLE;
455                 }
456                 break;
457         default:
458                 pr_err("%s: invalid tegra hotplug state %d\n",
459                        __func__, hp_state);
460                 BUG();
461         }
462 }
463
464 int __cpuinit tegra_auto_hotplug_init(struct mutex *cpu_lock)
465 {
466         /*
467          * Not bound to the issuer CPU (=> high-priority), has rescue worker
468          * task, single-threaded, freezable.
469          */
470         hotplug_wq = alloc_workqueue(
471                 "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
472         if (!hotplug_wq)
473                 return -ENOMEM;
474         INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
475
476         cpu_clk = clk_get_sys(NULL, "cpu");
477         cpu_g_clk = clk_get_sys(NULL, "cpu_g");
478         cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
479         if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
480                 return -ENOENT;
481
482         idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
483         idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;
484
485         up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
486         up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
487         down_delay = msecs_to_jiffies(DOWN_DELAY_MS);
488
489         tegra3_cpu_lock = cpu_lock;
490         hp_state = INITIAL_STATE;
491         hp_init_stats();
492         pr_info("Tegra auto-hotplug initialized: %s\n",
493                 (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");
494
495         if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
496                 pr_err("%s: Failed to register min cpus PM QoS notifier\n",
497                         __func__);
498
499         return 0;
500 }
501
502 #ifdef CONFIG_DEBUG_FS
503
504 static struct dentry *hp_debugfs_root;
505
506 struct pm_qos_request min_cpu_req;
507 struct pm_qos_request max_cpu_req;
508
509 static int hp_stats_show(struct seq_file *s, void *data)
510 {
511         int i;
512         u64 cur_jiffies = get_jiffies_64();
513
514         mutex_lock(tegra3_cpu_lock);
515         if (hp_state != TEGRA_HP_DISABLED) {
516                 for (i = 0; i <= CONFIG_NR_CPUS; i++) {
517                         bool was_up = (hp_stats[i].up_down_count & 0x1);
518                         hp_stats_update(i, was_up);
519                 }
520         }
521         mutex_unlock(tegra3_cpu_lock);
522
523         seq_printf(s, "%-15s ", "cpu:");
524         for (i = 0; i < CONFIG_NR_CPUS; i++) {
525                 seq_printf(s, "G%-9d ", i);
526         }
527         seq_printf(s, "LP\n");
528
529         seq_printf(s, "%-15s ", "transitions:");
530         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
531                 seq_printf(s, "%-10u ", hp_stats[i].up_down_count);
532         }
533         seq_printf(s, "\n");
534
535         seq_printf(s, "%-15s ", "time plugged:");
536         for (i = 0; i <= CONFIG_NR_CPUS; i++) {
537                 seq_printf(s, "%-10llu ",
538                            cputime64_to_clock_t(hp_stats[i].time_up_total));
539         }
540         seq_printf(s, "\n");
541
542         seq_printf(s, "%-15s %llu\n", "time-stamp:",
543                    cputime64_to_clock_t(cur_jiffies));
544
545         return 0;
546 }
547
548 static int hp_stats_open(struct inode *inode, struct file *file)
549 {
550         return single_open(file, hp_stats_show, inode->i_private);
551 }
552
553 static const struct file_operations hp_stats_fops = {
554         .open           = hp_stats_open,
555         .read           = seq_read,
556         .llseek         = seq_lseek,
557         .release        = single_release,
558 };
559
560 static int rt_bias_get(void *data, u64 *val)
561 {
562         *val = rt_profile_sel;
563         return 0;
564 }
565 static int rt_bias_set(void *data, u64 val)
566 {
567         if (val < ARRAY_SIZE(rt_profiles))
568                 rt_profile_sel = (u32)val;
569
570         pr_debug("rt_profile_sel set to %d\nthresholds are now [%d, %d, %d]\n",
571                 rt_profile_sel,
572                 rt_profiles[rt_profile_sel][0],
573                 rt_profiles[rt_profile_sel][1],
574                 rt_profiles[rt_profile_sel][2]);
575         return 0;
576 }
577 DEFINE_SIMPLE_ATTRIBUTE(rt_bias_fops, rt_bias_get, rt_bias_set, "%llu\n");
578
579 static int min_cpus_get(void *data, u64 *val)
580 {
581         *val = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
582         return 0;
583 }
584 static int min_cpus_set(void *data, u64 val)
585 {
586         pm_qos_update_request(&min_cpu_req, (s32)val);
587         return 0;
588 }
589 DEFINE_SIMPLE_ATTRIBUTE(min_cpus_fops, min_cpus_get, min_cpus_set, "%llu\n");
590
591 static int max_cpus_get(void *data, u64 *val)
592 {
593         *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
594         return 0;
595 }
596 static int max_cpus_set(void *data, u64 val)
597 {
598         pm_qos_update_request(&max_cpu_req, (s32)val);
599         return 0;
600 }
601 DEFINE_SIMPLE_ATTRIBUTE(max_cpus_fops, max_cpus_get, max_cpus_set, "%llu\n");
602
603 static int __init tegra_auto_hotplug_debug_init(void)
604 {
605         if (!tegra3_cpu_lock)
606                 return -ENOENT;
607
608         hp_debugfs_root = debugfs_create_dir("tegra_hotplug", NULL);
609         if (!hp_debugfs_root)
610                 return -ENOMEM;
611
612         pm_qos_add_request(&min_cpu_req, PM_QOS_MIN_ONLINE_CPUS,
613                            PM_QOS_DEFAULT_VALUE);
614         pm_qos_add_request(&max_cpu_req, PM_QOS_MAX_ONLINE_CPUS,
615                            PM_QOS_DEFAULT_VALUE);
616
617         if (!debugfs_create_file(
618                 "min_cpus", S_IRUGO, hp_debugfs_root, NULL, &min_cpus_fops))
619                 goto err_out;
620
621         if (!debugfs_create_file(
622                 "max_cpus", S_IRUGO, hp_debugfs_root, NULL, &max_cpus_fops))
623                 goto err_out;
624
625         if (!debugfs_create_file(
626                 "stats", S_IRUGO, hp_debugfs_root, NULL, &hp_stats_fops))
627                 goto err_out;
628
629         if (!debugfs_create_file(
630                 "core_bias", S_IRUGO, hp_debugfs_root, NULL, &rt_bias_fops))
631                 goto err_out;
632
633         return 0;
634
635 err_out:
636         debugfs_remove_recursive(hp_debugfs_root);
637         pm_qos_remove_request(&min_cpu_req);
638         pm_qos_remove_request(&max_cpu_req);
639         return -ENOMEM;
640 }
641
642 late_initcall(tegra_auto_hotplug_debug_init);
643 #endif
644
645 void tegra_auto_hotplug_exit(void)
646 {
647         destroy_workqueue(hotplug_wq);
648 #ifdef CONFIG_DEBUG_FS
649         debugfs_remove_recursive(hp_debugfs_root);
650         pm_qos_remove_request(&min_cpu_req);
651         pm_qos_remove_request(&max_cpu_req);
652 #endif
653 }