ARM: tegra11x: Define ncpu residency for 2 clusters
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/cputype.h>
49
50 #include <mach/irqs.h>
51 #include <mach/hardware.h>
52
53 #include <trace/events/power.h>
54
55 #include "clock.h"
56 #include "cpuidle.h"
57 #include "dvfs.h"
58 #include "fuse.h"
59 #include "gic.h"
60 #include "iomap.h"
61 #include "pm.h"
62 #include "reset.h"
63 #include "sleep.h"
64 #include "timer.h"
65 #include "fuse.h"
66
67 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
69 #define PMC_POWERGATE_STATUS \
70         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
71
72 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
73 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
74
75 #define TEGRA_MIN_RESIDENCY_NCPU_SLOW 2000
76 #define TEGRA_MIN_RESIDENCY_NCPU_FAST 13000
77
78 #ifdef CONFIG_SMP
79 static s64 tegra_cpu_wake_by_time[4] = {
80         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
81 #endif
82
83 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
84 module_param(cpu_power_gating_in_idle, ulong, 0644);
85
86 static bool slow_cluster_power_gating_noncpu __read_mostly;
87 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
88
89 static uint fast_cluster_power_down_mode __read_mostly;
90 module_param(fast_cluster_power_down_mode, uint, 0644);
91
92 static struct clk *cpu_clk_for_dvfs;
93
94 static int pd_exit_latencies[5];
95
96 static struct {
97         unsigned int cpu_ready_count[5];
98         unsigned int tear_down_count[5];
99         unsigned long long cpu_wants_pd_time[5];
100         unsigned long long cpu_pg_time[5];
101         unsigned long long rail_pd_time;
102         unsigned long long c0nc_pg_time;
103         unsigned long long c1nc_pg_time;
104         unsigned int rail_gating_count;
105         unsigned int rail_gating_bin[32];
106         unsigned int rail_gating_done_count;
107         unsigned int rail_gating_done_count_bin[32];
108         unsigned int c0nc_gating_count;
109         unsigned int c0nc_gating_bin[32];
110         unsigned int c0nc_gating_done_count;
111         unsigned int c0nc_gating_done_count_bin[32];
112         unsigned int c1nc_gating_count;
113         unsigned int c1nc_gating_bin[32];
114         unsigned int c1nc_gating_done_count;
115         unsigned int c1nc_gating_done_count_bin[32];
116         unsigned int pd_int_count[NR_IRQS];
117         unsigned int last_pd_int_count[NR_IRQS];
118 } idle_stats;
119
120 static inline unsigned int time_to_bin(unsigned int time)
121 {
122         return fls(time);
123 }
124
125 static inline void tegra_irq_unmask(int irq)
126 {
127         struct irq_data *data = irq_get_irq_data(irq);
128         data->chip->irq_unmask(data);
129 }
130
131 static inline unsigned int cpu_number(unsigned int n)
132 {
133         return is_lp_cluster() ? 4 : n;
134 }
135
136 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
137 {
138         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
139 }
140
141 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
142 {
143         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
144 }
145
146 /* Allow rail off only if all secondary CPUs are power gated, and no
147    rail update is in progress */
148 static bool tegra_rail_off_is_allowed(void)
149 {
150         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
151         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
152
153         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
154                 return false;
155
156         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
157                 return false;
158
159         return true;
160 }
161
162 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
163         struct cpuidle_state *state)
164 {
165         s64 request;
166
167         if (!cpumask_test_cpu(cpu_number(dev->cpu),
168                                 to_cpumask(&cpu_power_gating_in_idle)))
169                 return false;
170
171         if (tegra_cpu_timer_get_remain(&request))
172                 return false;
173
174         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
175                 /* possible on the 1st entry after cluster switch*/
176                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
177                 tegra_pd_update_target_residency(state);
178         }
179         if (request < state->target_residency) {
180                 /* Not enough time left to enter LP2 */
181                 return false;
182         }
183
184         return true;
185 }
186
187 static inline void tegra11_irq_restore_affinity(void)
188 {
189 #ifdef CONFIG_SMP
190         /* Disable the distributor. */
191         tegra_gic_dist_disable();
192
193         /* Restore the other CPU's interrupt affinity. */
194         tegra_gic_restore_affinity();
195
196         /* Re-enable the distributor. */
197         tegra_gic_dist_enable();
198 #endif
199 }
200
201 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
202                            struct cpuidle_state *state, s64 request)
203 {
204         ktime_t entry_time;
205         ktime_t exit_time;
206         bool sleep_completed = false;
207         bool multi_cpu_entry = false;
208         int bin;
209         unsigned int flag = 0;
210         s64 sleep_time;
211
212         /* LP2 entry time */
213         entry_time = ktime_get();
214
215         if (request < state->target_residency) {
216                 /* Not enough time left to enter LP2 */
217                 cpu_do_idle();
218                 return false;
219         }
220
221 #ifdef CONFIG_SMP
222         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
223         if (multi_cpu_entry) {
224                 s64 wake_time;
225                 unsigned int i;
226
227                 /* Disable the distributor -- this is the only way to
228                    prevent the other CPUs from responding to interrupts
229                    and potentially fiddling with the distributor
230                    registers while we're fiddling with them. */
231                 tegra_gic_dist_disable();
232
233                 /* Did an interrupt come in for another CPU before we
234                    could disable the distributor? */
235                 if (!tegra_rail_off_is_allowed()) {
236                         /* Yes, re-enable the distributor and clock gating. */
237                         tegra_gic_dist_enable();
238                         cpu_do_idle();
239                         return false;
240                 }
241
242                 /* LP2 initial targeted wake time */
243                 wake_time = ktime_to_us(entry_time) + request;
244
245                 /* CPU0 must wake up before any of the other CPUs. */
246                 smp_rmb();
247                 for (i = 1; i < CONFIG_NR_CPUS; i++)
248                         wake_time = min_t(s64, wake_time,
249                                 tegra_cpu_wake_by_time[i]);
250
251                 /* LP2 actual targeted wake time */
252                 request = wake_time - ktime_to_us(entry_time);
253                 BUG_ON(wake_time < 0LL);
254
255                 if (request < state->target_residency) {
256                         /* Not enough time left to enter LP2 */
257                         tegra_gic_dist_enable();
258                         cpu_do_idle();
259                         return false;
260                 }
261
262                 /* Cancel power gating wake timers for all secondary CPUs */
263                 tegra_pd_timer_cancel_secondary();
264
265                 /* Save and disable the affinity setting for the other
266                    CPUs and route all interrupts to CPU0. */
267                 tegra_gic_disable_affinity();
268
269                 /* Re-enable the distributor. */
270                 tegra_gic_dist_enable();
271         }
272 #endif
273         cpu_pm_enter();
274
275         sleep_time = request -
276                 pd_exit_latencies[cpu_number(dev->cpu)];
277
278         bin = time_to_bin((u32)request / 1000);
279         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
280
281         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
282         if (is_lp_cluster()) {
283                 /* here we are not supporting emulation mode, for now */
284                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
285                 idle_stats.c1nc_gating_count++;
286                 idle_stats.c1nc_gating_bin[bin]++;
287         } else {
288                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
289                 flag = (fast_cluster_power_down_mode
290                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
291                         & TEGRA_POWER_CLUSTER_PART_MASK;
292
293                 if (((request < tegra_min_residency_crail()) &&
294                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
295                         ((fast_cluster_power_down_mode &
296                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
297                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
298
299                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
300                         idle_stats.rail_gating_count++;
301                         idle_stats.rail_gating_bin[bin]++;
302                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
303                         idle_stats.c0nc_gating_count++;
304                         idle_stats.c0nc_gating_bin[bin]++;
305                 }
306         }
307
308         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
309                 sleep_completed = true;
310         else {
311                 int irq = tegra_gic_pending_interrupt();
312                 idle_stats.pd_int_count[irq]++;
313         }
314
315         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
316         exit_time = ktime_get();
317         if (!is_lp_cluster())
318                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
319
320         if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL)
321                 idle_stats.rail_pd_time +=
322                         ktime_to_us(ktime_sub(exit_time, entry_time));
323         else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
324                 if (is_lp_cluster())
325                         idle_stats.c1nc_pg_time +=
326                                 ktime_to_us(ktime_sub(exit_time, entry_time));
327                 else
328                         idle_stats.c0nc_pg_time +=
329                                 ktime_to_us(ktime_sub(exit_time, entry_time));
330         }
331
332         if (multi_cpu_entry)
333                 tegra11_irq_restore_affinity();
334
335         if (sleep_completed) {
336                 /*
337                  * Stayed in LP2 for the full time until the next tick,
338                  * adjust the exit latency based on measurement
339                  */
340                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
341                         - request;
342                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
343                         offset / 16;
344                 latency = clamp(latency, 0, 10000);
345                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
346                 state->exit_latency = latency;          /* for idle governor */
347                 smp_wmb();
348
349                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
350                         idle_stats.rail_gating_done_count++;
351                         idle_stats.rail_gating_done_count_bin[bin]++;
352                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
353                         if (is_lp_cluster()) {
354                                 idle_stats.c1nc_gating_done_count++;
355                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
356                         } else {
357                                 idle_stats.c0nc_gating_done_count++;
358                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
359                         }
360                 }
361
362                 pr_debug("%lld %lld %d %d\n", request,
363                         ktime_to_us(ktime_sub(exit_time, entry_time)),
364                         offset, bin);
365         }
366
367         cpu_pm_exit();
368
369         return true;
370 }
371
372 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
373                            struct cpuidle_state *state, s64 request)
374 {
375 #ifdef CONFIG_SMP
376         s64 sleep_time;
377         u32 cntp_tval;
378         u32 cntfrq;
379         ktime_t entry_time;
380         bool sleep_completed = false;
381         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
382         unsigned int cpu = cpu_number(dev->cpu);
383
384         if ((tegra_cpu_timer_get_remain(&request) == -ETIME) ||
385                 (request <= state->target_residency) || (!ts) ||
386                 (ts->nohz_mode == NOHZ_MODE_INACTIVE) ||
387                 !tegra_is_cpu_wake_timer_ready(dev->cpu)) {
388                 /*
389                  * Not enough time left to enter LP2, or wake timer not ready
390                  */
391                 cpu_do_idle();
392                 return false;
393         }
394
395 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
396         asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (cntfrq));
397         cntp_tval = (request - state->exit_latency) * (cntfrq / 1000000);
398         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r"(cntp_tval));
399 #endif
400         cpu_pm_enter();
401
402 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
403         sleep_time = request - state->exit_latency;
404         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
405         tegra_pd_set_trigger(sleep_time);
406 #endif
407         idle_stats.tear_down_count[cpu]++;
408
409         entry_time = ktime_get();
410
411         /* Save time this CPU must be awakened by. */
412         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
413         smp_wmb();
414
415 #ifdef CONFIG_TRUSTED_FOUNDATIONS
416         if ((cpu == 0) || (cpu == 4)) {
417                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE7,
418                                 (TEGRA_RESET_HANDLER_BASE +
419                                 tegra_cpu_reset_handler_offset));
420         }
421 #endif
422         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
423
424         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
425
426 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
427         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (cntp_tval));
428         if ((s32)cntp_tval <= 0)
429                 sleep_completed = true;
430 #else
431         sleep_completed = !tegra_pd_timer_remain();
432         tegra_pd_set_trigger(0);
433         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
434 #endif
435         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
436         idle_stats.cpu_pg_time[cpu] += sleep_time;
437         if (sleep_completed) {
438                 /*
439                  * Stayed in LP2 for the full time until timer expires,
440                  * adjust the exit latency based on measurement
441                  */
442                 int offset = sleep_time - request;
443                 int latency = pd_exit_latencies[cpu] +
444                         offset / 16;
445                 latency = clamp(latency, 0, 10000);
446                 pd_exit_latencies[cpu] = latency;
447                 state->exit_latency = latency;          /* for idle governor */
448                 smp_wmb();
449         }
450 #endif
451         cpu_pm_exit();
452
453         return true;
454 }
455
456 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
457                            struct cpuidle_state *state)
458 {
459         bool power_down;
460         bool cpu_gating_only = false;
461         bool power_gating_cpu_only = true;
462         int status = -1;
463         unsigned long rate = ULONG_MAX;
464         s64 request;
465
466         if (tegra_cpu_timer_get_remain(&request)) {
467                 cpu_do_idle();
468                 return false;
469         }
470
471         tegra_set_cpu_in_pd(dev->cpu);
472         cpu_gating_only = (((fast_cluster_power_down_mode
473                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
474                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
475
476         if (is_lp_cluster()) {
477                 if (slow_cluster_power_gating_noncpu &&
478                         (request > TEGRA_MIN_RESIDENCY_NCPU_SLOW))
479                                 power_gating_cpu_only = false;
480                 else
481                         power_gating_cpu_only = true;
482         } else if (!cpu_gating_only &&
483                 (num_online_cpus() == 1) &&
484                 tegra_rail_off_is_allowed()) {
485                 if (fast_cluster_power_down_mode &&
486                         TEGRA_POWER_CLUSTER_FORCE_MASK)
487                         power_gating_cpu_only = cpu_gating_only;
488                 else if (request > TEGRA_MIN_RESIDENCY_NCPU_FAST)
489                         power_gating_cpu_only = false;
490                 else
491                         power_gating_cpu_only = true;
492         } else
493                 power_gating_cpu_only = true;
494
495         if (power_gating_cpu_only)
496                 power_down = tegra_cpu_core_power_down(dev, state, request);
497         else {
498                 if (is_lp_cluster())
499                         status = tegra_cpu_backup_rate_exchange(&rate);
500
501                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
502
503                 /* restore cpu clock after cluster power ungating */
504                 if (status == 0)
505                         tegra_cpu_backup_rate_exchange(&rate);
506         }
507
508         tegra_clear_cpu_in_pd(dev->cpu);
509
510         return power_down;
511 }
512
513 #ifdef CONFIG_DEBUG_FS
514 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
515 {
516         int bin;
517         int i;
518         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
519         seq_printf(s, "-----------------------------------------------------------------------------\n");
520         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
521                 idle_stats.cpu_ready_count[0],
522                 idle_stats.cpu_ready_count[1],
523                 idle_stats.cpu_ready_count[2],
524                 idle_stats.cpu_ready_count[3],
525                 idle_stats.cpu_ready_count[4]);
526         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
527                 idle_stats.tear_down_count[0],
528                 idle_stats.tear_down_count[1],
529                 idle_stats.tear_down_count[2],
530                 idle_stats.tear_down_count[3],
531                 idle_stats.tear_down_count[4]);
532         seq_printf(s, "rail gating count:      %8u\n",
533                 idle_stats.rail_gating_count);
534         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
535                 idle_stats.rail_gating_done_count,
536                 idle_stats.rail_gating_done_count * 100 /
537                         (idle_stats.rail_gating_count ?: 1));
538
539         seq_printf(s, "c0nc gating count:      %8u\n",
540                 idle_stats.c0nc_gating_count);
541         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
542                 idle_stats.c0nc_gating_done_count,
543                 idle_stats.c0nc_gating_done_count * 100 /
544                         (idle_stats.c0nc_gating_count ?: 1));
545
546         seq_printf(s, "c1nc gating count:      %8u\n",
547                 idle_stats.c1nc_gating_count);
548         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
549                 idle_stats.c1nc_gating_done_count,
550                 idle_stats.c1nc_gating_done_count * 100 /
551                         (idle_stats.c1nc_gating_count ?: 1));
552
553         seq_printf(s, "\n");
554         seq_printf(s, "cpu ready time:                 " \
555                         "%8llu %8llu %8llu %8llu %8llu ms\n",
556                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
557                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
558                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
559                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
560                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
561
562         seq_printf(s, "cpu power gating time:          " \
563                         "%8llu %8llu %8llu %8llu %8llu ms\n",
564                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
565                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
566                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
567                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
568                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
569
570         seq_printf(s, "power gated %%:                 " \
571                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
572                 (int)(idle_stats.cpu_wants_pd_time[0] ?
573                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
574                         idle_stats.cpu_wants_pd_time[0]) : 0),
575                 (int)(idle_stats.cpu_wants_pd_time[1] ?
576                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
577                         idle_stats.cpu_wants_pd_time[1]) : 0),
578                 (int)(idle_stats.cpu_wants_pd_time[2] ?
579                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
580                         idle_stats.cpu_wants_pd_time[2]) : 0),
581                 (int)(idle_stats.cpu_wants_pd_time[3] ?
582                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
583                         idle_stats.cpu_wants_pd_time[3]) : 0),
584                 (int)(idle_stats.cpu_wants_pd_time[4] ?
585                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
586                         idle_stats.cpu_wants_pd_time[4]) : 0));
587
588         seq_printf(s, "\n");
589         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
590         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
591                 div64_u64(idle_stats.rail_pd_time, 1000),
592                 div64_u64(idle_stats.c0nc_pg_time, 1000),
593                 div64_u64(idle_stats.c1nc_pg_time, 1000));
594         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
595                 (int)(idle_stats.cpu_wants_pd_time[0] ?
596                         div64_u64(idle_stats.rail_pd_time * 100,
597                         idle_stats.cpu_wants_pd_time[0]) : 0),
598                 (int)(idle_stats.cpu_wants_pd_time[0] ?
599                         div64_u64(idle_stats.c0nc_pg_time * 100,
600                         idle_stats.cpu_wants_pd_time[0]) : 0),
601                 (int)(idle_stats.cpu_wants_pd_time[4] ?
602                         div64_u64(idle_stats.c1nc_pg_time * 100,
603                         idle_stats.cpu_wants_pd_time[4]) : 0));
604
605         seq_printf(s, "\n");
606
607         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
608         seq_printf(s, "-------------------------------------------------\n");
609         for (bin = 0; bin < 32; bin++) {
610                 if (idle_stats.rail_gating_bin[bin] == 0)
611                         continue;
612                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
613                         1 << (bin - 1), 1 << bin,
614                         idle_stats.rail_gating_bin[bin],
615                         idle_stats.rail_gating_done_count_bin[bin],
616                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
617                                 idle_stats.rail_gating_bin[bin]);
618         }
619         seq_printf(s, "\n");
620
621         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
622         seq_printf(s, "-------------------------------------------------\n");
623         for (bin = 0; bin < 32; bin++) {
624                 if (idle_stats.c0nc_gating_bin[bin] == 0)
625                         continue;
626                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
627                         1 << (bin - 1), 1 << bin,
628                         idle_stats.c0nc_gating_bin[bin],
629                         idle_stats.c0nc_gating_done_count_bin[bin],
630                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
631                                 idle_stats.c0nc_gating_bin[bin]);
632         }
633         seq_printf(s, "\n");
634
635         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
636         seq_printf(s, "-------------------------------------------------\n");
637         for (bin = 0; bin < 32; bin++) {
638                 if (idle_stats.c1nc_gating_bin[bin] == 0)
639                         continue;
640                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
641                         1 << (bin - 1), 1 << bin,
642                         idle_stats.c1nc_gating_bin[bin],
643                         idle_stats.c1nc_gating_done_count_bin[bin],
644                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
645                                 idle_stats.c1nc_gating_bin[bin]);
646         }
647
648         seq_printf(s, "\n");
649         seq_printf(s, "%3s %20s %6s %10s\n",
650                 "int", "name", "count", "last count");
651         seq_printf(s, "--------------------------------------------\n");
652         for (i = 0; i < NR_IRQS; i++) {
653                 if (idle_stats.pd_int_count[i] == 0)
654                         continue;
655                 seq_printf(s, "%3d %20s %6d %10d\n",
656                         i, irq_to_desc(i)->action ?
657                                 irq_to_desc(i)->action->name ?: "???" : "???",
658                         idle_stats.pd_int_count[i],
659                         idle_stats.pd_int_count[i] -
660                                 idle_stats.last_pd_int_count[i]);
661                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
662         };
663         return 0;
664 }
665 #endif
666
667 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
668 {
669         int i;
670         struct tegra_cpuidle_ops ops = {
671                 tegra11x_idle_power_down,
672                 tegra11x_cpu_idle_stats_pd_ready,
673                 tegra11x_cpu_idle_stats_pd_time,
674                 tegra11x_pd_is_allowed,
675 #ifdef CONFIG_DEBUG_FS
676                 tegra11x_pd_debug_show
677 #endif
678         };
679
680         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
681
682         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
683                 pd_exit_latencies[i] = tegra_pg_exit_latency;
684
685         *idle_ops = ops;
686         return 0;
687 }