ARM: Tegra: Add CONFIG_TEGRA_USE_SECURE_KERNEL
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/cputype.h>
49
50 #include <mach/irqs.h>
51 #include <mach/hardware.h>
52
53 #include <trace/events/power.h>
54
55 #include "clock.h"
56 #include "cpuidle.h"
57 #include "dvfs.h"
58 #include "fuse.h"
59 #include "gic.h"
60 #include "iomap.h"
61 #include "pm.h"
62 #include "reset.h"
63 #include "sleep.h"
64 #include "timer.h"
65 #include "fuse.h"
66
67 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
69 #define PMC_POWERGATE_STATUS \
70         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
71
72 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
73 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
74
75 #ifdef CONFIG_SMP
76 static s64 tegra_cpu_wake_by_time[4] = {
77         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
78 #endif
79
80 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
81 module_param(cpu_power_gating_in_idle, ulong, 0644);
82
83 static bool slow_cluster_power_gating_noncpu __read_mostly;
84 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
85
86 static uint fast_cluster_power_down_mode __read_mostly;
87 module_param(fast_cluster_power_down_mode, uint, 0644);
88
89 static bool stop_mc_clk_in_idle __read_mostly = false;
90 module_param(stop_mc_clk_in_idle, bool, 0644);
91
92 static struct clk *cpu_clk_for_dvfs;
93
94 static int pd_exit_latencies[5];
95
96 static struct {
97         unsigned int cpu_ready_count[5];
98         unsigned int tear_down_count[5];
99         unsigned long long cpu_wants_pd_time[5];
100         unsigned long long cpu_pg_time[5];
101         unsigned long long rail_pd_time;
102         unsigned long long c0nc_pg_time;
103         unsigned long long c1nc_pg_time;
104         unsigned long long mc_clk_stop_time;
105         unsigned int rail_gating_count;
106         unsigned int rail_gating_bin[32];
107         unsigned int rail_gating_done_count;
108         unsigned int rail_gating_done_count_bin[32];
109         unsigned int c0nc_gating_count;
110         unsigned int c0nc_gating_bin[32];
111         unsigned int c0nc_gating_done_count;
112         unsigned int c0nc_gating_done_count_bin[32];
113         unsigned int c1nc_gating_count;
114         unsigned int c1nc_gating_bin[32];
115         unsigned int c1nc_gating_done_count;
116         unsigned int c1nc_gating_done_count_bin[32];
117         unsigned int mc_clk_stop_count;
118         unsigned int mc_clk_stop_bin[32];
119         unsigned int mc_clk_stop_done_count;
120         unsigned int mc_clk_stop_done_count_bin[32];
121         unsigned int pd_int_count[NR_IRQS];
122         unsigned int last_pd_int_count[NR_IRQS];
123         unsigned int clk_gating_vmin;
124 } idle_stats;
125
126 static inline unsigned int time_to_bin(unsigned int time)
127 {
128         return fls(time);
129 }
130
131 static inline void tegra_irq_unmask(int irq)
132 {
133         struct irq_data *data = irq_get_irq_data(irq);
134         data->chip->irq_unmask(data);
135 }
136
137 static inline unsigned int cpu_number(unsigned int n)
138 {
139         return is_lp_cluster() ? 4 : n;
140 }
141
142 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
143 {
144         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
145 }
146
147 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
148 {
149         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
150 }
151
152 /* Allow rail off only if all secondary CPUs are power gated, and no
153    rail update is in progress */
154 static bool tegra_rail_off_is_allowed(void)
155 {
156         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
157         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
158
159         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
160                 return false;
161
162         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
163                 return false;
164
165         return true;
166 }
167
168 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
169         struct cpuidle_state *state)
170 {
171         s64 request;
172
173         if (!cpumask_test_cpu(cpu_number(dev->cpu),
174                                 to_cpumask(&cpu_power_gating_in_idle)))
175                 return false;
176
177         if (tegra_cpu_timer_get_remain(&request))
178                 return false;
179
180         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
181                 /* possible on the 1st entry after cluster switch*/
182                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
183                 tegra_pd_update_target_residency(state);
184         }
185         if (request < state->target_residency) {
186                 /* Not enough time left to enter LP2 */
187                 return false;
188         }
189
190         return true;
191 }
192
193 static inline void tegra11_irq_restore_affinity(void)
194 {
195 #ifdef CONFIG_SMP
196         /* Disable the distributor. */
197         tegra_gic_dist_disable();
198
199         /* Restore the other CPU's interrupt affinity. */
200         tegra_gic_restore_affinity();
201
202         /* Re-enable the distributor. */
203         tegra_gic_dist_enable();
204 #endif
205 }
206
207 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
208                            struct cpuidle_state *state, s64 request)
209 {
210         ktime_t entry_time;
211         ktime_t exit_time;
212         bool sleep_completed = false;
213         bool multi_cpu_entry = false;
214         int bin;
215         unsigned int flag = 0;
216         s64 sleep_time;
217
218         /* LP2 entry time */
219         entry_time = ktime_get();
220
221         if (request < state->target_residency) {
222                 /* Not enough time left to enter LP2 */
223                 cpu_do_idle();
224                 return false;
225         }
226
227 #ifdef CONFIG_SMP
228         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
229         if (multi_cpu_entry) {
230                 s64 wake_time;
231                 unsigned int i;
232
233                 /* Disable the distributor -- this is the only way to
234                    prevent the other CPUs from responding to interrupts
235                    and potentially fiddling with the distributor
236                    registers while we're fiddling with them. */
237                 tegra_gic_dist_disable();
238
239                 /* Did an interrupt come in for another CPU before we
240                    could disable the distributor? */
241                 if (!tegra_rail_off_is_allowed()) {
242                         /* Yes, re-enable the distributor and clock gating. */
243                         tegra_gic_dist_enable();
244                         cpu_do_idle();
245                         return false;
246                 }
247
248                 /* LP2 initial targeted wake time */
249                 wake_time = ktime_to_us(entry_time) + request;
250
251                 /* CPU0 must wake up before any of the other CPUs. */
252                 smp_rmb();
253                 for (i = 1; i < CONFIG_NR_CPUS; i++)
254                         wake_time = min_t(s64, wake_time,
255                                 tegra_cpu_wake_by_time[i]);
256
257                 /* LP2 actual targeted wake time */
258                 request = wake_time - ktime_to_us(entry_time);
259                 BUG_ON(wake_time < 0LL);
260
261                 if (request < state->target_residency) {
262                         /* Not enough time left to enter LP2 */
263                         tegra_gic_dist_enable();
264                         cpu_do_idle();
265                         return false;
266                 }
267
268                 /* Cancel power gating wake timers for all secondary CPUs */
269                 tegra_pd_timer_cancel_secondary();
270
271                 /* Save and disable the affinity setting for the other
272                    CPUs and route all interrupts to CPU0. */
273                 tegra_gic_disable_affinity();
274
275                 /* Re-enable the distributor. */
276                 tegra_gic_dist_enable();
277         }
278 #endif
279         cpu_pm_enter();
280
281         sleep_time = request -
282                 pd_exit_latencies[cpu_number(dev->cpu)];
283
284         bin = time_to_bin((u32)request / 1000);
285         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
286
287         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
288         if (is_lp_cluster()) {
289                 /* here we are not supporting emulation mode, for now */
290                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
291                 idle_stats.c1nc_gating_count++;
292                 idle_stats.c1nc_gating_bin[bin]++;
293         } else {
294                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
295                 flag = (fast_cluster_power_down_mode
296                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
297                         & TEGRA_POWER_CLUSTER_PART_MASK;
298
299                 if (((request < tegra_min_residency_crail()) &&
300                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
301                         ((fast_cluster_power_down_mode &
302                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
303                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
304
305                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
306                         idle_stats.rail_gating_count++;
307                         idle_stats.rail_gating_bin[bin]++;
308                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
309                         idle_stats.c0nc_gating_count++;
310                         idle_stats.c0nc_gating_bin[bin]++;
311                 }
312         }
313
314         if (stop_mc_clk_in_idle && (state->power_usage == 0) &&
315             (request > tegra_mc_clk_stop_min_residency())) {
316                 flag |= TEGRA_POWER_STOP_MC_CLK;
317
318                 idle_stats.mc_clk_stop_count++;
319                 idle_stats.mc_clk_stop_bin[bin]++;
320
321                 tegra_mc_clk_prepare();
322         }
323
324         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
325                 sleep_completed = true;
326         else {
327                 int irq = tegra_gic_pending_interrupt();
328                 idle_stats.pd_int_count[irq]++;
329         }
330
331         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
332         exit_time = ktime_get();
333
334         if (flag & TEGRA_POWER_STOP_MC_CLK)
335                 tegra_mc_clk_finish();
336
337         if (!is_lp_cluster())
338                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
339
340         if (flag & TEGRA_POWER_STOP_MC_CLK)
341                 idle_stats.mc_clk_stop_time +=
342                         ktime_to_us(ktime_sub(exit_time, entry_time));
343         else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL)
344                 idle_stats.rail_pd_time +=
345                         ktime_to_us(ktime_sub(exit_time, entry_time));
346         else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
347                 if (is_lp_cluster())
348                         idle_stats.c1nc_pg_time +=
349                                 ktime_to_us(ktime_sub(exit_time, entry_time));
350                 else
351                         idle_stats.c0nc_pg_time +=
352                                 ktime_to_us(ktime_sub(exit_time, entry_time));
353         }
354
355         if (multi_cpu_entry)
356                 tegra11_irq_restore_affinity();
357
358         if (sleep_completed) {
359                 /*
360                  * Stayed in LP2 for the full time until the next tick,
361                  * adjust the exit latency based on measurement
362                  */
363                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
364                         - request;
365                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
366                         offset / 16;
367                 latency = clamp(latency, 0, 10000);
368                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
369                 state->exit_latency = latency;          /* for idle governor */
370                 smp_wmb();
371
372                 if (flag & TEGRA_POWER_STOP_MC_CLK) {
373                         idle_stats.mc_clk_stop_done_count++;
374                         idle_stats.mc_clk_stop_done_count_bin[bin]++;
375                 } else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL) {
376                         idle_stats.rail_gating_done_count++;
377                         idle_stats.rail_gating_done_count_bin[bin]++;
378                 } else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
379                         if (is_lp_cluster()) {
380                                 idle_stats.c1nc_gating_done_count++;
381                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
382                         } else {
383                                 idle_stats.c0nc_gating_done_count++;
384                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
385                         }
386                 }
387
388                 pr_debug("%lld %lld %d %d\n", request,
389                         ktime_to_us(ktime_sub(exit_time, entry_time)),
390                         offset, bin);
391         }
392
393         cpu_pm_exit();
394
395         return true;
396 }
397
398 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
399                            struct cpuidle_state *state, s64 request)
400 {
401 #ifdef CONFIG_SMP
402         s64 sleep_time;
403         u32 cntp_tval;
404         u32 cntfrq;
405         ktime_t entry_time;
406         bool sleep_completed = false;
407         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
408         unsigned int cpu = cpu_number(dev->cpu);
409
410         if ((tegra_cpu_timer_get_remain(&request) == -ETIME) ||
411                 (request <= state->target_residency) || (!ts) ||
412                 (ts->nohz_mode == NOHZ_MODE_INACTIVE) ||
413                 !tegra_is_cpu_wake_timer_ready(dev->cpu)) {
414                 /*
415                  * Not enough time left to enter LP2, or wake timer not ready
416                  */
417                 cpu_do_idle();
418                 return false;
419         }
420
421 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
422         asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (cntfrq));
423         cntp_tval = (request - state->exit_latency) * (cntfrq / 1000000);
424         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r"(cntp_tval));
425 #endif
426         cpu_pm_enter();
427
428 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
429         sleep_time = request - state->exit_latency;
430         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
431         tegra_pd_set_trigger(sleep_time);
432 #endif
433         idle_stats.tear_down_count[cpu]++;
434
435         entry_time = ktime_get();
436
437         /* Save time this CPU must be awakened by. */
438         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
439         smp_wmb();
440
441 #ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
442         if ((cpu == 0) || (cpu == 4)) {
443                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE7,
444                                 (TEGRA_RESET_HANDLER_BASE +
445                                 tegra_cpu_reset_handler_offset));
446         }
447 #endif
448         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
449
450         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
451
452 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
453         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (cntp_tval));
454         if ((s32)cntp_tval <= 0)
455                 sleep_completed = true;
456 #else
457         sleep_completed = !tegra_pd_timer_remain();
458         tegra_pd_set_trigger(0);
459         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
460 #endif
461         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
462         idle_stats.cpu_pg_time[cpu] += sleep_time;
463         if (sleep_completed) {
464                 /*
465                  * Stayed in LP2 for the full time until timer expires,
466                  * adjust the exit latency based on measurement
467                  */
468                 int offset = sleep_time - request;
469                 int latency = pd_exit_latencies[cpu] +
470                         offset / 16;
471                 latency = clamp(latency, 0, 10000);
472                 pd_exit_latencies[cpu] = latency;
473                 state->exit_latency = latency;          /* for idle governor */
474                 smp_wmb();
475         }
476 #endif
477         cpu_pm_exit();
478
479         return true;
480 }
481
482 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
483                            struct cpuidle_state *state)
484 {
485         bool power_down;
486         bool cpu_gating_only = false;
487         bool clkgt_at_vmin = false;
488         bool power_gating_cpu_only = true;
489         int status = -1;
490         unsigned long rate;
491         s64 request;
492
493         if (tegra_cpu_timer_get_remain(&request)) {
494                 cpu_do_idle();
495                 return false;
496         }
497
498         tegra_set_cpu_in_pd(dev->cpu);
499         cpu_gating_only = (((fast_cluster_power_down_mode
500                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
501                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
502
503         if (is_lp_cluster()) {
504                 if (slow_cluster_power_gating_noncpu &&
505                         (request > tegra_min_residency_ncpu()))
506                                 power_gating_cpu_only = false;
507                 else
508                         power_gating_cpu_only = true;
509         } else {
510                 if (num_online_cpus() > 1)
511                         power_gating_cpu_only = true;
512                 else {
513                         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
514                                 clkgt_at_vmin = false;
515                         else if (tegra_force_clkgt_at_vmin ==
516                                         TEGRA_CPUIDLE_FORCE_DO_CLKGT_VMIN)
517                                 clkgt_at_vmin = true;
518                         else if (tegra_force_clkgt_at_vmin ==
519                                         TEGRA_CPUIDLE_FORCE_NO_CLKGT_VMIN)
520                                 clkgt_at_vmin = false;
521                         else if ((request >= tegra_min_residency_vmin_fmin()) &&
522                                  ((request < tegra_min_residency_ncpu()) ||
523                                    cpu_gating_only))
524                                 clkgt_at_vmin = true;
525
526                         if (!cpu_gating_only && tegra_rail_off_is_allowed()) {
527                                 if (fast_cluster_power_down_mode &
528                                                 TEGRA_POWER_CLUSTER_FORCE_MASK)
529                                         power_gating_cpu_only = false;
530                                 else if (request >
531                                                 tegra_min_residency_ncpu())
532                                         power_gating_cpu_only = false;
533                                 else
534                                         power_gating_cpu_only = true;
535                         } else
536                                 power_gating_cpu_only = true;
537                 }
538         }
539
540         if (clkgt_at_vmin) {
541                 rate = 0;
542                 status = tegra_cpu_g_idle_rate_exchange(&rate);
543                 if (!status) {
544                         idle_stats.clk_gating_vmin++;
545                         cpu_do_idle();
546                         tegra_cpu_g_idle_rate_exchange(&rate);
547                         power_down = true;
548                 } else
549                         power_down = tegra_cpu_core_power_down(dev, state,
550                                                                 request);
551         } else if (!power_gating_cpu_only) {
552                 if (is_lp_cluster()) {
553                         rate = ULONG_MAX;
554                         status = tegra_cpu_lp_idle_rate_exchange(&rate);
555                 }
556
557                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
558
559                 /* restore cpu clock after cluster power ungating */
560                 if (status == 0)
561                         tegra_cpu_lp_idle_rate_exchange(&rate);
562         } else
563                 power_down = tegra_cpu_core_power_down(dev, state, request);
564
565         tegra_clear_cpu_in_pd(dev->cpu);
566
567         return power_down;
568 }
569
570 #ifdef CONFIG_DEBUG_FS
571 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
572 {
573         int bin;
574         int i;
575         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
576         seq_printf(s, "-----------------------------------------------------------------------------\n");
577         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
578                 idle_stats.cpu_ready_count[0],
579                 idle_stats.cpu_ready_count[1],
580                 idle_stats.cpu_ready_count[2],
581                 idle_stats.cpu_ready_count[3],
582                 idle_stats.cpu_ready_count[4]);
583         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
584                 idle_stats.tear_down_count[0],
585                 idle_stats.tear_down_count[1],
586                 idle_stats.tear_down_count[2],
587                 idle_stats.tear_down_count[3],
588                 idle_stats.tear_down_count[4]);
589         seq_printf(s, "clk gating @ Vmin count:      %8u\n",
590                 idle_stats.clk_gating_vmin);
591         seq_printf(s, "rail gating count:      %8u\n",
592                 idle_stats.rail_gating_count);
593         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
594                 idle_stats.rail_gating_done_count,
595                 idle_stats.rail_gating_done_count * 100 /
596                         (idle_stats.rail_gating_count ?: 1));
597
598         seq_printf(s, "c0nc gating count:      %8u\n",
599                 idle_stats.c0nc_gating_count);
600         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
601                 idle_stats.c0nc_gating_done_count,
602                 idle_stats.c0nc_gating_done_count * 100 /
603                         (idle_stats.c0nc_gating_count ?: 1));
604
605         seq_printf(s, "c1nc gating count:      %8u\n",
606                 idle_stats.c1nc_gating_count);
607         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
608                 idle_stats.c1nc_gating_done_count,
609                 idle_stats.c1nc_gating_done_count * 100 /
610                         (idle_stats.c1nc_gating_count ?: 1));
611
612         seq_printf(s, "\n");
613         seq_printf(s, "cpu ready time:                 " \
614                         "%8llu %8llu %8llu %8llu %8llu ms\n",
615                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
616                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
617                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
618                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
619                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
620
621         seq_printf(s, "cpu power gating time:          " \
622                         "%8llu %8llu %8llu %8llu %8llu ms\n",
623                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
624                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
625                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
626                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
627                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
628
629         seq_printf(s, "power gated %%:                 " \
630                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
631                 (int)(idle_stats.cpu_wants_pd_time[0] ?
632                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
633                         idle_stats.cpu_wants_pd_time[0]) : 0),
634                 (int)(idle_stats.cpu_wants_pd_time[1] ?
635                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
636                         idle_stats.cpu_wants_pd_time[1]) : 0),
637                 (int)(idle_stats.cpu_wants_pd_time[2] ?
638                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
639                         idle_stats.cpu_wants_pd_time[2]) : 0),
640                 (int)(idle_stats.cpu_wants_pd_time[3] ?
641                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
642                         idle_stats.cpu_wants_pd_time[3]) : 0),
643                 (int)(idle_stats.cpu_wants_pd_time[4] ?
644                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
645                         idle_stats.cpu_wants_pd_time[4]) : 0));
646
647         seq_printf(s, "\n");
648         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
649         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
650                 div64_u64(idle_stats.rail_pd_time, 1000),
651                 div64_u64(idle_stats.c0nc_pg_time, 1000),
652                 div64_u64(idle_stats.c1nc_pg_time, 1000));
653         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
654                 (int)(idle_stats.cpu_wants_pd_time[0] ?
655                         div64_u64(idle_stats.rail_pd_time * 100,
656                         idle_stats.cpu_wants_pd_time[0]) : 0),
657                 (int)(idle_stats.cpu_wants_pd_time[0] ?
658                         div64_u64(idle_stats.c0nc_pg_time * 100,
659                         idle_stats.cpu_wants_pd_time[0]) : 0),
660                 (int)(idle_stats.cpu_wants_pd_time[4] ?
661                         div64_u64(idle_stats.c1nc_pg_time * 100,
662                         idle_stats.cpu_wants_pd_time[4]) : 0));
663
664         seq_printf(s, "\n");
665
666         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
667         seq_printf(s, "-------------------------------------------------\n");
668         for (bin = 0; bin < 32; bin++) {
669                 if (idle_stats.rail_gating_bin[bin] == 0)
670                         continue;
671                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
672                         1 << (bin - 1), 1 << bin,
673                         idle_stats.rail_gating_bin[bin],
674                         idle_stats.rail_gating_done_count_bin[bin],
675                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
676                                 idle_stats.rail_gating_bin[bin]);
677         }
678         seq_printf(s, "\n");
679
680         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
681         seq_printf(s, "-------------------------------------------------\n");
682         for (bin = 0; bin < 32; bin++) {
683                 if (idle_stats.c0nc_gating_bin[bin] == 0)
684                         continue;
685                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
686                         1 << (bin - 1), 1 << bin,
687                         idle_stats.c0nc_gating_bin[bin],
688                         idle_stats.c0nc_gating_done_count_bin[bin],
689                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
690                                 idle_stats.c0nc_gating_bin[bin]);
691         }
692         seq_printf(s, "\n");
693
694         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
695         seq_printf(s, "-------------------------------------------------\n");
696         for (bin = 0; bin < 32; bin++) {
697                 if (idle_stats.c1nc_gating_bin[bin] == 0)
698                         continue;
699                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
700                         1 << (bin - 1), 1 << bin,
701                         idle_stats.c1nc_gating_bin[bin],
702                         idle_stats.c1nc_gating_done_count_bin[bin],
703                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
704                                 idle_stats.c1nc_gating_bin[bin]);
705         }
706         seq_printf(s, "\n");
707
708         seq_printf(s, "%19s %8s %8s %8s\n", "", "mc clk stop", "comp", "%");
709         seq_printf(s, "-------------------------------------------------\n");
710         for (bin = 0; bin < 32; bin++) {
711                 if (idle_stats.mc_clk_stop_bin[bin] == 0)
712                         continue;
713                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
714                         1 << (bin - 1), 1 << bin,
715                         idle_stats.mc_clk_stop_bin[bin],
716                         idle_stats.mc_clk_stop_done_count_bin[bin],
717                         idle_stats.mc_clk_stop_done_count_bin[bin] * 100 /
718                                 idle_stats.mc_clk_stop_bin[bin]);
719         }
720
721         seq_printf(s, "\n");
722         seq_printf(s, "%3s %20s %6s %10s\n",
723                 "int", "name", "count", "last count");
724         seq_printf(s, "--------------------------------------------\n");
725         for (i = 0; i < NR_IRQS; i++) {
726                 if (idle_stats.pd_int_count[i] == 0)
727                         continue;
728                 seq_printf(s, "%3d %20s %6d %10d\n",
729                         i, irq_to_desc(i)->action ?
730                                 irq_to_desc(i)->action->name ?: "???" : "???",
731                         idle_stats.pd_int_count[i],
732                         idle_stats.pd_int_count[i] -
733                                 idle_stats.last_pd_int_count[i]);
734                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
735         };
736         return 0;
737 }
738 #endif
739
740 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
741 {
742         int i;
743         struct tegra_cpuidle_ops ops = {
744                 tegra11x_idle_power_down,
745                 tegra11x_cpu_idle_stats_pd_ready,
746                 tegra11x_cpu_idle_stats_pd_time,
747                 tegra11x_pd_is_allowed,
748 #ifdef CONFIG_DEBUG_FS
749                 tegra11x_pd_debug_show
750 #endif
751         };
752
753         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
754
755         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
756                 pd_exit_latencies[i] = tegra_pg_exit_latency;
757
758         *idle_ops = ops;
759         return 0;
760 }