cb122756e949589992bd3b432780d1bb3b3df025
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/cputype.h>
49
50 #include <mach/iomap.h>
51 #include <mach/irqs.h>
52 #include <mach/hardware.h>
53
54 #include <trace/events/power.h>
55
56 #include "clock.h"
57 #include "cpuidle.h"
58 #include "dvfs.h"
59 #include "fuse.h"
60 #include "gic.h"
61 #include "pm.h"
62 #include "reset.h"
63 #include "sleep.h"
64 #include "timer.h"
65 #include "fuse.h"
66
67 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
69 #define PMC_POWERGATE_STATUS \
70         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
71
72 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
73 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
74
75 #ifdef CONFIG_SMP
76 static s64 tegra_cpu_wake_by_time[4] = {
77         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
78 #endif
79
80 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
81 module_param(cpu_power_gating_in_idle, ulong, 0644);
82
83 static bool slow_cluster_power_gating_noncpu __read_mostly;
84 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
85
86 static uint fast_cluster_power_down_mode __read_mostly;
87 module_param(fast_cluster_power_down_mode, uint, 0644);
88
89 static struct clk *cpu_clk_for_dvfs;
90
91 static int pd_exit_latencies[5];
92
93 static struct {
94         unsigned int cpu_ready_count[5];
95         unsigned int tear_down_count[5];
96         unsigned long long cpu_wants_pd_time[5];
97         unsigned long long cpu_pg_time[5];
98         unsigned long long rail_pd_time;
99         unsigned long long c0nc_pg_time;
100         unsigned long long c1nc_pg_time;
101         unsigned int rail_gating_count;
102         unsigned int rail_gating_bin[32];
103         unsigned int rail_gating_done_count;
104         unsigned int rail_gating_done_count_bin[32];
105         unsigned int c0nc_gating_count;
106         unsigned int c0nc_gating_bin[32];
107         unsigned int c0nc_gating_done_count;
108         unsigned int c0nc_gating_done_count_bin[32];
109         unsigned int c1nc_gating_count;
110         unsigned int c1nc_gating_bin[32];
111         unsigned int c1nc_gating_done_count;
112         unsigned int c1nc_gating_done_count_bin[32];
113         unsigned int pd_int_count[NR_IRQS];
114         unsigned int last_pd_int_count[NR_IRQS];
115 } idle_stats;
116
117 static inline unsigned int time_to_bin(unsigned int time)
118 {
119         return fls(time);
120 }
121
122 static inline void tegra_irq_unmask(int irq)
123 {
124         struct irq_data *data = irq_get_irq_data(irq);
125         data->chip->irq_unmask(data);
126 }
127
128 static inline unsigned int cpu_number(unsigned int n)
129 {
130         return is_lp_cluster() ? 4 : n;
131 }
132
133 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
134 {
135         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
136 }
137
138 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
139 {
140         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
141 }
142
143 /* Allow rail off only if all secondary CPUs are power gated, and no
144    rail update is in progress */
145 static bool tegra_rail_off_is_allowed(void)
146 {
147         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
148         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
149
150         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
151                 return false;
152
153         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
154                 return false;
155
156         return true;
157 }
158
159 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
160         struct cpuidle_state *state)
161 {
162         s64 request;
163
164         if (!cpumask_test_cpu(cpu_number(dev->cpu),
165                                 to_cpumask(&cpu_power_gating_in_idle)))
166                 return false;
167
168         if (tegra_cpu_timer_get_remain(&request))
169                 return false;
170
171         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
172                 /* possible on the 1st entry after cluster switch*/
173                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
174                 tegra_pd_update_target_residency(state);
175         }
176         if (request < state->target_residency) {
177                 /* Not enough time left to enter LP2 */
178                 return false;
179         }
180
181         return true;
182 }
183
184 static inline void tegra11_irq_restore_affinity(void)
185 {
186 #ifdef CONFIG_SMP
187         /* Disable the distributor. */
188         tegra_gic_dist_disable();
189
190         /* Restore the other CPU's interrupt affinity. */
191         tegra_gic_restore_affinity();
192
193         /* Re-enable the distributor. */
194         tegra_gic_dist_enable();
195 #endif
196 }
197
198 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
199                            struct cpuidle_state *state, s64 request)
200 {
201         ktime_t entry_time;
202         ktime_t exit_time;
203         bool sleep_completed = false;
204         bool multi_cpu_entry = false;
205         int bin;
206         unsigned int flag = 0;
207         s64 sleep_time;
208
209         /* LP2 entry time */
210         entry_time = ktime_get();
211
212         if (request < state->target_residency) {
213                 /* Not enough time left to enter LP2 */
214                 cpu_do_idle();
215                 return false;
216         }
217
218 #ifdef CONFIG_SMP
219         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
220         if (multi_cpu_entry) {
221                 s64 wake_time;
222                 unsigned int i;
223
224                 /* Disable the distributor -- this is the only way to
225                    prevent the other CPUs from responding to interrupts
226                    and potentially fiddling with the distributor
227                    registers while we're fiddling with them. */
228                 tegra_gic_dist_disable();
229
230                 /* Did an interrupt come in for another CPU before we
231                    could disable the distributor? */
232                 if (!tegra_rail_off_is_allowed()) {
233                         /* Yes, re-enable the distributor and clock gating. */
234                         tegra_gic_dist_enable();
235                         cpu_do_idle();
236                         return false;
237                 }
238
239                 /* LP2 initial targeted wake time */
240                 wake_time = ktime_to_us(entry_time) + request;
241
242                 /* CPU0 must wake up before any of the other CPUs. */
243                 smp_rmb();
244                 for (i = 1; i < CONFIG_NR_CPUS; i++)
245                         wake_time = min_t(s64, wake_time,
246                                 tegra_cpu_wake_by_time[i]);
247
248                 /* LP2 actual targeted wake time */
249                 request = wake_time - ktime_to_us(entry_time);
250                 BUG_ON(wake_time < 0LL);
251
252                 if (request < state->target_residency) {
253                         /* Not enough time left to enter LP2 */
254                         tegra_gic_dist_enable();
255                         cpu_do_idle();
256                         return false;
257                 }
258
259                 /* Cancel power gating wake timers for all secondary CPUs */
260                 tegra_pd_timer_cancel_secondary();
261
262                 /* Save and disable the affinity setting for the other
263                    CPUs and route all interrupts to CPU0. */
264                 tegra_gic_disable_affinity();
265
266                 /* Re-enable the distributor. */
267                 tegra_gic_dist_enable();
268         }
269 #endif
270         cpu_pm_enter();
271
272         sleep_time = request -
273                 pd_exit_latencies[cpu_number(dev->cpu)];
274
275         bin = time_to_bin((u32)request / 1000);
276         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
277
278         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
279         if (is_lp_cluster()) {
280                 /* here we are not supporting emulation mode, for now */
281                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
282                 idle_stats.c1nc_gating_count++;
283                 idle_stats.c1nc_gating_bin[bin]++;
284         } else {
285                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
286                 flag = (fast_cluster_power_down_mode
287                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
288                         & TEGRA_POWER_CLUSTER_PART_MASK;
289
290                 if (((request < tegra_min_residency_crail()) &&
291                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
292                         ((fast_cluster_power_down_mode &
293                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
294                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
295
296                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
297                         idle_stats.rail_gating_count++;
298                         idle_stats.rail_gating_bin[bin]++;
299                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
300                         idle_stats.c0nc_gating_count++;
301                         idle_stats.c0nc_gating_bin[bin]++;
302                 }
303         }
304
305         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
306                 sleep_completed = true;
307         else {
308                 int irq = tegra_gic_pending_interrupt();
309                 idle_stats.pd_int_count[irq]++;
310         }
311
312         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
313         exit_time = ktime_get();
314         if (!is_lp_cluster())
315                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
316
317         if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL)
318                 idle_stats.rail_pd_time +=
319                         ktime_to_us(ktime_sub(exit_time, entry_time));
320         else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
321                 if (is_lp_cluster())
322                         idle_stats.c1nc_pg_time +=
323                                 ktime_to_us(ktime_sub(exit_time, entry_time));
324                 else
325                         idle_stats.c0nc_pg_time +=
326                                 ktime_to_us(ktime_sub(exit_time, entry_time));
327         }
328
329         if (multi_cpu_entry)
330                 tegra11_irq_restore_affinity();
331
332         if (sleep_completed) {
333                 /*
334                  * Stayed in LP2 for the full time until the next tick,
335                  * adjust the exit latency based on measurement
336                  */
337                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
338                         - request;
339                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
340                         offset / 16;
341                 latency = clamp(latency, 0, 10000);
342                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
343                 state->exit_latency = latency;          /* for idle governor */
344                 smp_wmb();
345
346                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
347                         idle_stats.rail_gating_done_count++;
348                         idle_stats.rail_gating_done_count_bin[bin]++;
349                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
350                         if (is_lp_cluster()) {
351                                 idle_stats.c1nc_gating_done_count++;
352                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
353                         } else {
354                                 idle_stats.c0nc_gating_done_count++;
355                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
356                         }
357                 }
358
359                 pr_debug("%lld %lld %d %d\n", request,
360                         ktime_to_us(ktime_sub(exit_time, entry_time)),
361                         offset, bin);
362         }
363
364         cpu_pm_exit();
365
366         return true;
367 }
368
369 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
370                            struct cpuidle_state *state, s64 request)
371 {
372 #ifdef CONFIG_SMP
373         s64 sleep_time;
374         ktime_t entry_time;
375         struct arch_timer_context timer_context;
376         bool sleep_completed = false;
377         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
378 #ifdef CONFIG_TRUSTED_FOUNDATIONS
379         unsigned int cpu = cpu_number(dev->cpu);
380 #endif
381
382         if (!arch_timer_get_state(&timer_context)) {
383                 if ((timer_context.cntp_ctl & ARCH_TIMER_CTRL_ENABLE) &&
384                     !(timer_context.cntp_ctl & ARCH_TIMER_CTRL_IT_MASK)) {
385                         if (timer_context.cntp_tval <= 0) {
386                                 cpu_do_idle();
387                                 return false;
388                         }
389                         request = div_u64((u64)timer_context.cntp_tval *
390                                         1000000, timer_context.cntfrq);
391 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
392                         if (request >= state->target_residency) {
393                                 timer_context.cntp_tval -= state->exit_latency *
394                                         (timer_context.cntfrq / 1000000);
395                                 __asm__("mcr p15, 0, %0, c14, c2, 0\n"
396                                         :
397                                         :
398                                         "r"(timer_context.cntp_tval));
399                         }
400 #endif
401                 }
402         }
403
404         if (!tegra_is_cpu_wake_timer_ready(dev->cpu) ||
405             (request < state->target_residency) ||
406             (!ts) || (ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
407                 /*
408                  * Not enough time left to enter LP2, or wake timer not ready
409                  */
410                 cpu_do_idle();
411                 return false;
412         }
413
414         cpu_pm_enter();
415
416 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
417         sleep_time = request - state->exit_latency;
418         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
419         tegra_pd_set_trigger(sleep_time);
420 #endif
421         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
422
423         entry_time = ktime_get();
424
425         /* Save time this CPU must be awakened by. */
426         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
427         smp_wmb();
428
429 #ifdef CONFIG_TRUSTED_FOUNDATIONS
430         if ((cpu == 0) || (cpu == 4)) {
431                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE7,
432                                 (TEGRA_RESET_HANDLER_BASE +
433                                 tegra_cpu_reset_handler_offset));
434         }
435 #endif
436         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
437
438         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
439
440 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
441         if (!arch_timer_get_state(&timer_context))
442                 sleep_completed = (timer_context.cntp_tval <= 0);
443 #else
444         sleep_completed = !tegra_pd_timer_remain();
445         tegra_pd_set_trigger(0);
446         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
447 #endif
448         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
449         idle_stats.cpu_pg_time[cpu_number(dev->cpu)] += sleep_time;
450         if (sleep_completed) {
451                 /*
452                  * Stayed in LP2 for the full time until timer expires,
453                  * adjust the exit latency based on measurement
454                  */
455                 int offset = sleep_time - request;
456                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
457                         offset / 16;
458                 latency = clamp(latency, 0, 10000);
459                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
460                 state->exit_latency = latency;          /* for idle governor */
461                 smp_wmb();
462         }
463 #endif
464         cpu_pm_exit();
465
466         return true;
467 }
468
469 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
470                            struct cpuidle_state *state)
471 {
472         bool power_down;
473         bool cpu_gating_only = false;
474         bool power_gating_cpu_only = true;
475         int status = -1;
476         unsigned long rate = ULONG_MAX;
477         s64 request;
478
479         if (tegra_cpu_timer_get_remain(&request)) {
480                 cpu_do_idle();
481                 return false;
482         }
483
484         tegra_set_cpu_in_pd(dev->cpu);
485         cpu_gating_only = (((fast_cluster_power_down_mode
486                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
487                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
488
489         if (is_lp_cluster()) {
490                 if (slow_cluster_power_gating_noncpu &&
491                         (request > tegra_min_residency_noncpu()))
492                                 power_gating_cpu_only = false;
493                 else
494                         power_gating_cpu_only = true;
495         } else if (!cpu_gating_only &&
496                 (num_online_cpus() == 1) &&
497                 tegra_rail_off_is_allowed()) {
498                 if (fast_cluster_power_down_mode &&
499                         TEGRA_POWER_CLUSTER_FORCE_MASK)
500                         power_gating_cpu_only = cpu_gating_only;
501                 else if (request > tegra_min_residency_noncpu())
502                         power_gating_cpu_only = false;
503                 else
504                         power_gating_cpu_only = true;
505         } else
506                 power_gating_cpu_only = true;
507
508         if (power_gating_cpu_only)
509                 power_down = tegra_cpu_core_power_down(dev, state, request);
510         else {
511                 if (is_lp_cluster())
512                         status = tegra_cpu_backup_rate_exchange(&rate);
513
514                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
515
516                 /* restore cpu clock after cluster power ungating */
517                 if (status == 0)
518                         tegra_cpu_backup_rate_exchange(&rate);
519         }
520
521         tegra_clear_cpu_in_pd(dev->cpu);
522
523         return power_down;
524 }
525
526 #ifdef CONFIG_DEBUG_FS
527 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
528 {
529         int bin;
530         int i;
531         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
532         seq_printf(s, "-----------------------------------------------------------------------------\n");
533         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
534                 idle_stats.cpu_ready_count[0],
535                 idle_stats.cpu_ready_count[1],
536                 idle_stats.cpu_ready_count[2],
537                 idle_stats.cpu_ready_count[3],
538                 idle_stats.cpu_ready_count[4]);
539         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
540                 idle_stats.tear_down_count[0],
541                 idle_stats.tear_down_count[1],
542                 idle_stats.tear_down_count[2],
543                 idle_stats.tear_down_count[3],
544                 idle_stats.tear_down_count[4]);
545         seq_printf(s, "rail gating count:      %8u\n",
546                 idle_stats.rail_gating_count);
547         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
548                 idle_stats.rail_gating_done_count,
549                 idle_stats.rail_gating_done_count * 100 /
550                         (idle_stats.rail_gating_count ?: 1));
551
552         seq_printf(s, "c0nc gating count:      %8u\n",
553                 idle_stats.c0nc_gating_count);
554         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
555                 idle_stats.c0nc_gating_done_count,
556                 idle_stats.c0nc_gating_done_count * 100 /
557                         (idle_stats.c0nc_gating_count ?: 1));
558
559         seq_printf(s, "c1nc gating count:      %8u\n",
560                 idle_stats.c1nc_gating_count);
561         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
562                 idle_stats.c1nc_gating_done_count,
563                 idle_stats.c1nc_gating_done_count * 100 /
564                         (idle_stats.c1nc_gating_count ?: 1));
565
566         seq_printf(s, "\n");
567         seq_printf(s, "cpu ready time:                 " \
568                         "%8llu %8llu %8llu %8llu %8llu ms\n",
569                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
570                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
571                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
572                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
573                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
574
575         seq_printf(s, "cpu power gating time:          " \
576                         "%8llu %8llu %8llu %8llu %8llu ms\n",
577                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
578                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
579                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
580                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
581                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
582
583         seq_printf(s, "power gated %%:                 " \
584                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
585                 (int)(idle_stats.cpu_wants_pd_time[0] ?
586                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
587                         idle_stats.cpu_wants_pd_time[0]) : 0),
588                 (int)(idle_stats.cpu_wants_pd_time[1] ?
589                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
590                         idle_stats.cpu_wants_pd_time[1]) : 0),
591                 (int)(idle_stats.cpu_wants_pd_time[2] ?
592                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
593                         idle_stats.cpu_wants_pd_time[2]) : 0),
594                 (int)(idle_stats.cpu_wants_pd_time[3] ?
595                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
596                         idle_stats.cpu_wants_pd_time[3]) : 0),
597                 (int)(idle_stats.cpu_wants_pd_time[4] ?
598                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
599                         idle_stats.cpu_wants_pd_time[4]) : 0));
600
601         seq_printf(s, "\n");
602         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
603         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
604                 div64_u64(idle_stats.rail_pd_time, 1000),
605                 div64_u64(idle_stats.c0nc_pg_time, 1000),
606                 div64_u64(idle_stats.c1nc_pg_time, 1000));
607         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
608                 (int)(idle_stats.cpu_wants_pd_time[0] ?
609                         div64_u64(idle_stats.rail_pd_time * 100,
610                         idle_stats.cpu_wants_pd_time[0]) : 0),
611                 (int)(idle_stats.cpu_wants_pd_time[0] ?
612                         div64_u64(idle_stats.c0nc_pg_time * 100,
613                         idle_stats.cpu_wants_pd_time[0]) : 0),
614                 (int)(idle_stats.cpu_wants_pd_time[4] ?
615                         div64_u64(idle_stats.c1nc_pg_time * 100,
616                         idle_stats.cpu_wants_pd_time[4]) : 0));
617
618         seq_printf(s, "\n");
619
620         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
621         seq_printf(s, "-------------------------------------------------\n");
622         for (bin = 0; bin < 32; bin++) {
623                 if (idle_stats.rail_gating_bin[bin] == 0)
624                         continue;
625                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
626                         1 << (bin - 1), 1 << bin,
627                         idle_stats.rail_gating_bin[bin],
628                         idle_stats.rail_gating_done_count_bin[bin],
629                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
630                                 idle_stats.rail_gating_bin[bin]);
631         }
632         seq_printf(s, "\n");
633
634         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
635         seq_printf(s, "-------------------------------------------------\n");
636         for (bin = 0; bin < 32; bin++) {
637                 if (idle_stats.c0nc_gating_bin[bin] == 0)
638                         continue;
639                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
640                         1 << (bin - 1), 1 << bin,
641                         idle_stats.c0nc_gating_bin[bin],
642                         idle_stats.c0nc_gating_done_count_bin[bin],
643                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
644                                 idle_stats.c0nc_gating_bin[bin]);
645         }
646         seq_printf(s, "\n");
647
648         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
649         seq_printf(s, "-------------------------------------------------\n");
650         for (bin = 0; bin < 32; bin++) {
651                 if (idle_stats.c1nc_gating_bin[bin] == 0)
652                         continue;
653                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
654                         1 << (bin - 1), 1 << bin,
655                         idle_stats.c1nc_gating_bin[bin],
656                         idle_stats.c1nc_gating_done_count_bin[bin],
657                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
658                                 idle_stats.c1nc_gating_bin[bin]);
659         }
660
661         seq_printf(s, "\n");
662         seq_printf(s, "%3s %20s %6s %10s\n",
663                 "int", "name", "count", "last count");
664         seq_printf(s, "--------------------------------------------\n");
665         for (i = 0; i < NR_IRQS; i++) {
666                 if (idle_stats.pd_int_count[i] == 0)
667                         continue;
668                 seq_printf(s, "%3d %20s %6d %10d\n",
669                         i, irq_to_desc(i)->action ?
670                                 irq_to_desc(i)->action->name ?: "???" : "???",
671                         idle_stats.pd_int_count[i],
672                         idle_stats.pd_int_count[i] -
673                                 idle_stats.last_pd_int_count[i]);
674                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
675         };
676         return 0;
677 }
678 #endif
679
680 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
681 {
682         int i;
683         struct tegra_cpuidle_ops ops = {
684                 tegra11x_idle_power_down,
685                 tegra11x_cpu_idle_stats_pd_ready,
686                 tegra11x_cpu_idle_stats_pd_time,
687                 tegra11x_pd_is_allowed,
688 #ifdef CONFIG_DEBUG_FS
689                 tegra11x_pd_debug_show
690 #endif
691         };
692
693         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
694
695         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
696                 pd_exit_latencies[i] = tegra_pg_exit_latency;
697
698         *idle_ops = ops;
699         return 0;
700 }