ARM: tegra11x: Add stats for clock gating@Vmin
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/cputype.h>
49
50 #include <mach/irqs.h>
51 #include <mach/hardware.h>
52
53 #include <trace/events/power.h>
54
55 #include "clock.h"
56 #include "cpuidle.h"
57 #include "dvfs.h"
58 #include "fuse.h"
59 #include "gic.h"
60 #include "iomap.h"
61 #include "pm.h"
62 #include "reset.h"
63 #include "sleep.h"
64 #include "timer.h"
65 #include "fuse.h"
66
67 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
69 #define PMC_POWERGATE_STATUS \
70         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
71
72 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
73 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
74
75 #define TEGRA_MIN_RESIDENCY_CLKGT_VMIN  2000
76 #define TEGRA_MIN_RESIDENCY_NCPU_SLOW   2000
77 #define TEGRA_MIN_RESIDENCY_NCPU_FAST   13000
78
79 #ifdef CONFIG_SMP
80 static s64 tegra_cpu_wake_by_time[4] = {
81         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
82 #endif
83
84 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
85 module_param(cpu_power_gating_in_idle, ulong, 0644);
86
87 static bool slow_cluster_power_gating_noncpu __read_mostly;
88 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
89
90 static uint fast_cluster_power_down_mode __read_mostly;
91 module_param(fast_cluster_power_down_mode, uint, 0644);
92
93 static struct clk *cpu_clk_for_dvfs;
94
95 static int pd_exit_latencies[5];
96
97 static struct {
98         unsigned int cpu_ready_count[5];
99         unsigned int tear_down_count[5];
100         unsigned long long cpu_wants_pd_time[5];
101         unsigned long long cpu_pg_time[5];
102         unsigned long long rail_pd_time;
103         unsigned long long c0nc_pg_time;
104         unsigned long long c1nc_pg_time;
105         unsigned int rail_gating_count;
106         unsigned int rail_gating_bin[32];
107         unsigned int rail_gating_done_count;
108         unsigned int rail_gating_done_count_bin[32];
109         unsigned int c0nc_gating_count;
110         unsigned int c0nc_gating_bin[32];
111         unsigned int c0nc_gating_done_count;
112         unsigned int c0nc_gating_done_count_bin[32];
113         unsigned int c1nc_gating_count;
114         unsigned int c1nc_gating_bin[32];
115         unsigned int c1nc_gating_done_count;
116         unsigned int c1nc_gating_done_count_bin[32];
117         unsigned int pd_int_count[NR_IRQS];
118         unsigned int last_pd_int_count[NR_IRQS];
119         unsigned int clk_gating_vmin;
120 } idle_stats;
121
122 static inline unsigned int time_to_bin(unsigned int time)
123 {
124         return fls(time);
125 }
126
127 static inline void tegra_irq_unmask(int irq)
128 {
129         struct irq_data *data = irq_get_irq_data(irq);
130         data->chip->irq_unmask(data);
131 }
132
133 static inline unsigned int cpu_number(unsigned int n)
134 {
135         return is_lp_cluster() ? 4 : n;
136 }
137
138 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
139 {
140         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
141 }
142
143 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
144 {
145         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
146 }
147
148 /* Allow rail off only if all secondary CPUs are power gated, and no
149    rail update is in progress */
150 static bool tegra_rail_off_is_allowed(void)
151 {
152         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
153         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
154
155         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
156                 return false;
157
158         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
159                 return false;
160
161         return true;
162 }
163
164 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
165         struct cpuidle_state *state)
166 {
167         s64 request;
168
169         if (!cpumask_test_cpu(cpu_number(dev->cpu),
170                                 to_cpumask(&cpu_power_gating_in_idle)))
171                 return false;
172
173         if (tegra_cpu_timer_get_remain(&request))
174                 return false;
175
176         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
177                 /* possible on the 1st entry after cluster switch*/
178                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
179                 tegra_pd_update_target_residency(state);
180         }
181         if (request < state->target_residency) {
182                 /* Not enough time left to enter LP2 */
183                 return false;
184         }
185
186         return true;
187 }
188
189 static inline void tegra11_irq_restore_affinity(void)
190 {
191 #ifdef CONFIG_SMP
192         /* Disable the distributor. */
193         tegra_gic_dist_disable();
194
195         /* Restore the other CPU's interrupt affinity. */
196         tegra_gic_restore_affinity();
197
198         /* Re-enable the distributor. */
199         tegra_gic_dist_enable();
200 #endif
201 }
202
203 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
204                            struct cpuidle_state *state, s64 request)
205 {
206         ktime_t entry_time;
207         ktime_t exit_time;
208         bool sleep_completed = false;
209         bool multi_cpu_entry = false;
210         int bin;
211         unsigned int flag = 0;
212         s64 sleep_time;
213
214         /* LP2 entry time */
215         entry_time = ktime_get();
216
217         if (request < state->target_residency) {
218                 /* Not enough time left to enter LP2 */
219                 cpu_do_idle();
220                 return false;
221         }
222
223 #ifdef CONFIG_SMP
224         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
225         if (multi_cpu_entry) {
226                 s64 wake_time;
227                 unsigned int i;
228
229                 /* Disable the distributor -- this is the only way to
230                    prevent the other CPUs from responding to interrupts
231                    and potentially fiddling with the distributor
232                    registers while we're fiddling with them. */
233                 tegra_gic_dist_disable();
234
235                 /* Did an interrupt come in for another CPU before we
236                    could disable the distributor? */
237                 if (!tegra_rail_off_is_allowed()) {
238                         /* Yes, re-enable the distributor and clock gating. */
239                         tegra_gic_dist_enable();
240                         cpu_do_idle();
241                         return false;
242                 }
243
244                 /* LP2 initial targeted wake time */
245                 wake_time = ktime_to_us(entry_time) + request;
246
247                 /* CPU0 must wake up before any of the other CPUs. */
248                 smp_rmb();
249                 for (i = 1; i < CONFIG_NR_CPUS; i++)
250                         wake_time = min_t(s64, wake_time,
251                                 tegra_cpu_wake_by_time[i]);
252
253                 /* LP2 actual targeted wake time */
254                 request = wake_time - ktime_to_us(entry_time);
255                 BUG_ON(wake_time < 0LL);
256
257                 if (request < state->target_residency) {
258                         /* Not enough time left to enter LP2 */
259                         tegra_gic_dist_enable();
260                         cpu_do_idle();
261                         return false;
262                 }
263
264                 /* Cancel power gating wake timers for all secondary CPUs */
265                 tegra_pd_timer_cancel_secondary();
266
267                 /* Save and disable the affinity setting for the other
268                    CPUs and route all interrupts to CPU0. */
269                 tegra_gic_disable_affinity();
270
271                 /* Re-enable the distributor. */
272                 tegra_gic_dist_enable();
273         }
274 #endif
275         cpu_pm_enter();
276
277         sleep_time = request -
278                 pd_exit_latencies[cpu_number(dev->cpu)];
279
280         bin = time_to_bin((u32)request / 1000);
281         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
282
283         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
284         if (is_lp_cluster()) {
285                 /* here we are not supporting emulation mode, for now */
286                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
287                 idle_stats.c1nc_gating_count++;
288                 idle_stats.c1nc_gating_bin[bin]++;
289         } else {
290                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
291                 flag = (fast_cluster_power_down_mode
292                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
293                         & TEGRA_POWER_CLUSTER_PART_MASK;
294
295                 if (((request < tegra_min_residency_crail()) &&
296                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
297                         ((fast_cluster_power_down_mode &
298                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
299                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
300
301                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
302                         idle_stats.rail_gating_count++;
303                         idle_stats.rail_gating_bin[bin]++;
304                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
305                         idle_stats.c0nc_gating_count++;
306                         idle_stats.c0nc_gating_bin[bin]++;
307                 }
308         }
309
310         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
311                 sleep_completed = true;
312         else {
313                 int irq = tegra_gic_pending_interrupt();
314                 idle_stats.pd_int_count[irq]++;
315         }
316
317         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
318         exit_time = ktime_get();
319         if (!is_lp_cluster())
320                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
321
322         if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL)
323                 idle_stats.rail_pd_time +=
324                         ktime_to_us(ktime_sub(exit_time, entry_time));
325         else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
326                 if (is_lp_cluster())
327                         idle_stats.c1nc_pg_time +=
328                                 ktime_to_us(ktime_sub(exit_time, entry_time));
329                 else
330                         idle_stats.c0nc_pg_time +=
331                                 ktime_to_us(ktime_sub(exit_time, entry_time));
332         }
333
334         if (multi_cpu_entry)
335                 tegra11_irq_restore_affinity();
336
337         if (sleep_completed) {
338                 /*
339                  * Stayed in LP2 for the full time until the next tick,
340                  * adjust the exit latency based on measurement
341                  */
342                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
343                         - request;
344                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
345                         offset / 16;
346                 latency = clamp(latency, 0, 10000);
347                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
348                 state->exit_latency = latency;          /* for idle governor */
349                 smp_wmb();
350
351                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
352                         idle_stats.rail_gating_done_count++;
353                         idle_stats.rail_gating_done_count_bin[bin]++;
354                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
355                         if (is_lp_cluster()) {
356                                 idle_stats.c1nc_gating_done_count++;
357                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
358                         } else {
359                                 idle_stats.c0nc_gating_done_count++;
360                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
361                         }
362                 }
363
364                 pr_debug("%lld %lld %d %d\n", request,
365                         ktime_to_us(ktime_sub(exit_time, entry_time)),
366                         offset, bin);
367         }
368
369         cpu_pm_exit();
370
371         return true;
372 }
373
374 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
375                            struct cpuidle_state *state, s64 request)
376 {
377 #ifdef CONFIG_SMP
378         s64 sleep_time;
379         u32 cntp_tval;
380         u32 cntfrq;
381         ktime_t entry_time;
382         bool sleep_completed = false;
383         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
384         unsigned int cpu = cpu_number(dev->cpu);
385
386         if ((tegra_cpu_timer_get_remain(&request) == -ETIME) ||
387                 (request <= state->target_residency) || (!ts) ||
388                 (ts->nohz_mode == NOHZ_MODE_INACTIVE) ||
389                 !tegra_is_cpu_wake_timer_ready(dev->cpu)) {
390                 /*
391                  * Not enough time left to enter LP2, or wake timer not ready
392                  */
393                 cpu_do_idle();
394                 return false;
395         }
396
397 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
398         asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (cntfrq));
399         cntp_tval = (request - state->exit_latency) * (cntfrq / 1000000);
400         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r"(cntp_tval));
401 #endif
402         cpu_pm_enter();
403
404 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
405         sleep_time = request - state->exit_latency;
406         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
407         tegra_pd_set_trigger(sleep_time);
408 #endif
409         idle_stats.tear_down_count[cpu]++;
410
411         entry_time = ktime_get();
412
413         /* Save time this CPU must be awakened by. */
414         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
415         smp_wmb();
416
417 #ifdef CONFIG_TRUSTED_FOUNDATIONS
418         if ((cpu == 0) || (cpu == 4)) {
419                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE7,
420                                 (TEGRA_RESET_HANDLER_BASE +
421                                 tegra_cpu_reset_handler_offset));
422         }
423 #endif
424         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
425
426         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
427
428 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
429         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (cntp_tval));
430         if ((s32)cntp_tval <= 0)
431                 sleep_completed = true;
432 #else
433         sleep_completed = !tegra_pd_timer_remain();
434         tegra_pd_set_trigger(0);
435         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
436 #endif
437         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
438         idle_stats.cpu_pg_time[cpu] += sleep_time;
439         if (sleep_completed) {
440                 /*
441                  * Stayed in LP2 for the full time until timer expires,
442                  * adjust the exit latency based on measurement
443                  */
444                 int offset = sleep_time - request;
445                 int latency = pd_exit_latencies[cpu] +
446                         offset / 16;
447                 latency = clamp(latency, 0, 10000);
448                 pd_exit_latencies[cpu] = latency;
449                 state->exit_latency = latency;          /* for idle governor */
450                 smp_wmb();
451         }
452 #endif
453         cpu_pm_exit();
454
455         return true;
456 }
457
458 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
459                            struct cpuidle_state *state)
460 {
461         bool power_down;
462         bool cpu_gating_only = false;
463         bool clkgt_at_vmin = false;
464         bool power_gating_cpu_only = true;
465         int status = -1;
466         unsigned long rate;
467         s64 request;
468
469         if (tegra_cpu_timer_get_remain(&request)) {
470                 cpu_do_idle();
471                 return false;
472         }
473
474         tegra_set_cpu_in_pd(dev->cpu);
475         cpu_gating_only = (((fast_cluster_power_down_mode
476                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
477                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
478
479         if (is_lp_cluster()) {
480                 if (slow_cluster_power_gating_noncpu &&
481                         (request > TEGRA_MIN_RESIDENCY_NCPU_SLOW))
482                                 power_gating_cpu_only = false;
483                 else
484                         power_gating_cpu_only = true;
485         } else {
486                 if (num_online_cpus() > 1)
487                         power_gating_cpu_only = true;
488                 else {
489                         if (tegra_force_clkgt_at_vmin ==
490                                         TEGRA_CPUIDLE_FORCE_DO_CLKGT_VMIN)
491                                 clkgt_at_vmin = true;
492                         else if (tegra_force_clkgt_at_vmin ==
493                                         TEGRA_CPUIDLE_FORCE_NO_CLKGT_VMIN)
494                                 clkgt_at_vmin = false;
495                         else if ((request >= TEGRA_MIN_RESIDENCY_CLKGT_VMIN) &&
496                                  (request < TEGRA_MIN_RESIDENCY_NCPU_FAST))
497                                 clkgt_at_vmin = true;
498
499                         if (!cpu_gating_only && tegra_rail_off_is_allowed()) {
500                                 if (fast_cluster_power_down_mode &
501                                                 TEGRA_POWER_CLUSTER_FORCE_MASK)
502                                         power_gating_cpu_only = false;
503                                 else if (request >
504                                                 TEGRA_MIN_RESIDENCY_NCPU_FAST)
505                                         power_gating_cpu_only = false;
506                                 else
507                                         power_gating_cpu_only = true;
508                         } else
509                                 power_gating_cpu_only = true;
510                 }
511         }
512
513         if (clkgt_at_vmin) {
514                 rate = 0;
515                 status = tegra11_cpu_dfll_rate_exchange(&rate);
516                 if (!status) {
517                         idle_stats.clk_gating_vmin++;
518                         cpu_do_idle();
519                         tegra11_cpu_dfll_rate_exchange(&rate);
520                         power_down = false;
521                 } else
522                         power_down = tegra_cpu_core_power_down(dev, state,
523                                                                 request);
524         } else if (!power_gating_cpu_only) {
525                 if (is_lp_cluster()) {
526                         rate = ULONG_MAX;
527                         status = tegra_cpu_backup_rate_exchange(&rate);
528                 }
529
530                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
531
532                 /* restore cpu clock after cluster power ungating */
533                 if (status == 0)
534                         tegra_cpu_backup_rate_exchange(&rate);
535         } else
536                 power_down = tegra_cpu_core_power_down(dev, state, request);
537
538         tegra_clear_cpu_in_pd(dev->cpu);
539
540         return power_down;
541 }
542
543 #ifdef CONFIG_DEBUG_FS
544 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
545 {
546         int bin;
547         int i;
548         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
549         seq_printf(s, "-----------------------------------------------------------------------------\n");
550         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
551                 idle_stats.cpu_ready_count[0],
552                 idle_stats.cpu_ready_count[1],
553                 idle_stats.cpu_ready_count[2],
554                 idle_stats.cpu_ready_count[3],
555                 idle_stats.cpu_ready_count[4]);
556         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
557                 idle_stats.tear_down_count[0],
558                 idle_stats.tear_down_count[1],
559                 idle_stats.tear_down_count[2],
560                 idle_stats.tear_down_count[3],
561                 idle_stats.tear_down_count[4]);
562         seq_printf(s, "clk gating @ Vmin count:      %8u\n",
563                 idle_stats.clk_gating_vmin);
564         seq_printf(s, "rail gating count:      %8u\n",
565                 idle_stats.rail_gating_count);
566         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
567                 idle_stats.rail_gating_done_count,
568                 idle_stats.rail_gating_done_count * 100 /
569                         (idle_stats.rail_gating_count ?: 1));
570
571         seq_printf(s, "c0nc gating count:      %8u\n",
572                 idle_stats.c0nc_gating_count);
573         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
574                 idle_stats.c0nc_gating_done_count,
575                 idle_stats.c0nc_gating_done_count * 100 /
576                         (idle_stats.c0nc_gating_count ?: 1));
577
578         seq_printf(s, "c1nc gating count:      %8u\n",
579                 idle_stats.c1nc_gating_count);
580         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
581                 idle_stats.c1nc_gating_done_count,
582                 idle_stats.c1nc_gating_done_count * 100 /
583                         (idle_stats.c1nc_gating_count ?: 1));
584
585         seq_printf(s, "\n");
586         seq_printf(s, "cpu ready time:                 " \
587                         "%8llu %8llu %8llu %8llu %8llu ms\n",
588                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
589                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
590                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
591                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
592                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
593
594         seq_printf(s, "cpu power gating time:          " \
595                         "%8llu %8llu %8llu %8llu %8llu ms\n",
596                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
597                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
598                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
599                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
600                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
601
602         seq_printf(s, "power gated %%:                 " \
603                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
604                 (int)(idle_stats.cpu_wants_pd_time[0] ?
605                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
606                         idle_stats.cpu_wants_pd_time[0]) : 0),
607                 (int)(idle_stats.cpu_wants_pd_time[1] ?
608                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
609                         idle_stats.cpu_wants_pd_time[1]) : 0),
610                 (int)(idle_stats.cpu_wants_pd_time[2] ?
611                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
612                         idle_stats.cpu_wants_pd_time[2]) : 0),
613                 (int)(idle_stats.cpu_wants_pd_time[3] ?
614                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
615                         idle_stats.cpu_wants_pd_time[3]) : 0),
616                 (int)(idle_stats.cpu_wants_pd_time[4] ?
617                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
618                         idle_stats.cpu_wants_pd_time[4]) : 0));
619
620         seq_printf(s, "\n");
621         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
622         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
623                 div64_u64(idle_stats.rail_pd_time, 1000),
624                 div64_u64(idle_stats.c0nc_pg_time, 1000),
625                 div64_u64(idle_stats.c1nc_pg_time, 1000));
626         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
627                 (int)(idle_stats.cpu_wants_pd_time[0] ?
628                         div64_u64(idle_stats.rail_pd_time * 100,
629                         idle_stats.cpu_wants_pd_time[0]) : 0),
630                 (int)(idle_stats.cpu_wants_pd_time[0] ?
631                         div64_u64(idle_stats.c0nc_pg_time * 100,
632                         idle_stats.cpu_wants_pd_time[0]) : 0),
633                 (int)(idle_stats.cpu_wants_pd_time[4] ?
634                         div64_u64(idle_stats.c1nc_pg_time * 100,
635                         idle_stats.cpu_wants_pd_time[4]) : 0));
636
637         seq_printf(s, "\n");
638
639         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
640         seq_printf(s, "-------------------------------------------------\n");
641         for (bin = 0; bin < 32; bin++) {
642                 if (idle_stats.rail_gating_bin[bin] == 0)
643                         continue;
644                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
645                         1 << (bin - 1), 1 << bin,
646                         idle_stats.rail_gating_bin[bin],
647                         idle_stats.rail_gating_done_count_bin[bin],
648                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
649                                 idle_stats.rail_gating_bin[bin]);
650         }
651         seq_printf(s, "\n");
652
653         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
654         seq_printf(s, "-------------------------------------------------\n");
655         for (bin = 0; bin < 32; bin++) {
656                 if (idle_stats.c0nc_gating_bin[bin] == 0)
657                         continue;
658                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
659                         1 << (bin - 1), 1 << bin,
660                         idle_stats.c0nc_gating_bin[bin],
661                         idle_stats.c0nc_gating_done_count_bin[bin],
662                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
663                                 idle_stats.c0nc_gating_bin[bin]);
664         }
665         seq_printf(s, "\n");
666
667         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
668         seq_printf(s, "-------------------------------------------------\n");
669         for (bin = 0; bin < 32; bin++) {
670                 if (idle_stats.c1nc_gating_bin[bin] == 0)
671                         continue;
672                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
673                         1 << (bin - 1), 1 << bin,
674                         idle_stats.c1nc_gating_bin[bin],
675                         idle_stats.c1nc_gating_done_count_bin[bin],
676                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
677                                 idle_stats.c1nc_gating_bin[bin]);
678         }
679
680         seq_printf(s, "\n");
681         seq_printf(s, "%3s %20s %6s %10s\n",
682                 "int", "name", "count", "last count");
683         seq_printf(s, "--------------------------------------------\n");
684         for (i = 0; i < NR_IRQS; i++) {
685                 if (idle_stats.pd_int_count[i] == 0)
686                         continue;
687                 seq_printf(s, "%3d %20s %6d %10d\n",
688                         i, irq_to_desc(i)->action ?
689                                 irq_to_desc(i)->action->name ?: "???" : "???",
690                         idle_stats.pd_int_count[i],
691                         idle_stats.pd_int_count[i] -
692                                 idle_stats.last_pd_int_count[i]);
693                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
694         };
695         return 0;
696 }
697 #endif
698
699 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
700 {
701         int i;
702         struct tegra_cpuidle_ops ops = {
703                 tegra11x_idle_power_down,
704                 tegra11x_cpu_idle_stats_pd_ready,
705                 tegra11x_cpu_idle_stats_pd_time,
706                 tegra11x_pd_is_allowed,
707 #ifdef CONFIG_DEBUG_FS
708                 tegra11x_pd_debug_show
709 #endif
710         };
711
712         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
713
714         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
715                 pd_exit_latencies[i] = tegra_pg_exit_latency;
716
717         *idle_ops = ops;
718         return 0;
719 }