ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43 #include <linux/tegra-soc.h>
44 #include <linux/tegra-timer.h>
45 #include <linux/tegra-cpuidle.h>
46 #include <linux/irqchip/tegra.h>
47
48 #include <asm/cacheflush.h>
49 #include <asm/localtimer.h>
50 #include <asm/suspend.h>
51 #include <asm/cputype.h>
52
53 #include <mach/irqs.h>
54
55 #include <trace/events/nvpower.h>
56
57 #include "clock.h"
58 #include "dvfs.h"
59 #include "iomap.h"
60 #include "pm.h"
61 #include "reset.h"
62 #include "sleep.h"
63 #include "tegra_ptm.h"
64
65 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
67 #define PMC_POWERGATE_STATUS \
68         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
69
70 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
71 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
72
73 #ifdef CONFIG_SMP
74 static s64 tegra_cpu_wake_by_time[4] = {
75         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
76 #endif
77
78 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
79 module_param(cpu_power_gating_in_idle, ulong, 0644);
80
81 static bool slow_cluster_power_gating_noncpu __read_mostly;
82 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
83
84 static uint fast_cluster_power_down_mode __read_mostly;
85 module_param(fast_cluster_power_down_mode, uint, 0644);
86
87 static bool stop_mc_clk_in_idle __read_mostly = false;
88 module_param(stop_mc_clk_in_idle, bool, 0644);
89
90 static struct clk *cpu_clk_for_dvfs;
91
92 static int pd_exit_latencies[5];
93
94 static struct {
95         unsigned int cpu_ready_count[5];
96         unsigned int tear_down_count[5];
97         unsigned long long cpu_wants_pd_time[5];
98         unsigned long long cpu_pg_time[5];
99         unsigned long long rail_pd_time;
100         unsigned long long c0nc_pg_time;
101         unsigned long long c1nc_pg_time;
102         unsigned long long mc_clk_stop_time;
103         unsigned int rail_gating_count;
104         unsigned int rail_gating_bin[32];
105         unsigned int rail_gating_done_count;
106         unsigned int rail_gating_done_count_bin[32];
107         unsigned int c0nc_gating_count;
108         unsigned int c0nc_gating_bin[32];
109         unsigned int c0nc_gating_done_count;
110         unsigned int c0nc_gating_done_count_bin[32];
111         unsigned int c1nc_gating_count;
112         unsigned int c1nc_gating_bin[32];
113         unsigned int c1nc_gating_done_count;
114         unsigned int c1nc_gating_done_count_bin[32];
115         unsigned int mc_clk_stop_count;
116         unsigned int mc_clk_stop_bin[32];
117         unsigned int mc_clk_stop_done_count;
118         unsigned int mc_clk_stop_done_count_bin[32];
119         unsigned int pd_int_count[NR_IRQS];
120         unsigned int last_pd_int_count[NR_IRQS];
121         unsigned int clk_gating_vmin;
122 } idle_stats;
123
124 static inline unsigned int time_to_bin(unsigned int time)
125 {
126         return fls(time);
127 }
128
129 static inline void tegra_irq_unmask(int irq)
130 {
131         struct irq_data *data = irq_get_irq_data(irq);
132         data->chip->irq_unmask(data);
133 }
134
135 static inline unsigned int cpu_number(unsigned int n)
136 {
137         return is_lp_cluster() ? 4 : n;
138 }
139
140 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
141 {
142         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
143 }
144
145 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
146 {
147         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
148 }
149
150 /* Allow rail off only if all secondary CPUs are power gated, and no
151    rail update is in progress */
152 static bool tegra_rail_off_is_allowed(void)
153 {
154         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
155         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
156
157         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
158                 return false;
159
160         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
161                 return false;
162
163         return true;
164 }
165
166 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
167         struct cpuidle_state *state)
168 {
169         s64 request;
170
171         if (!cpumask_test_cpu(cpu_number(dev->cpu),
172                                 to_cpumask(&cpu_power_gating_in_idle)))
173                 return false;
174
175         if (tegra_cpu_timer_get_remain(&request))
176                 return false;
177
178         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
179                 /* possible on the 1st entry after cluster switch*/
180                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
181                 tegra_pd_update_target_residency(state);
182         }
183         if (request < state->target_residency) {
184                 /* Not enough time left to enter LP2 */
185                 return false;
186         }
187
188         return true;
189 }
190
191 static inline void tegra11_irq_restore_affinity(void)
192 {
193 #ifdef CONFIG_SMP
194         /* Disable the distributor. */
195         tegra_gic_dist_disable();
196
197         /* Restore the other CPU's interrupt affinity. */
198         tegra_gic_restore_affinity();
199
200         /* Re-enable the distributor. */
201         tegra_gic_dist_enable();
202 #endif
203 }
204
205 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
206                            struct cpuidle_state *state, s64 request)
207 {
208         ktime_t entry_time;
209         ktime_t exit_time;
210         bool sleep_completed = false;
211         bool multi_cpu_entry = false;
212         int bin;
213         unsigned int flag = 0;
214         s64 sleep_time;
215
216         /* LP2 entry time */
217         entry_time = ktime_get();
218
219         if (request < state->target_residency) {
220                 /* Not enough time left to enter LP2 */
221                 cpu_do_idle();
222                 return false;
223         }
224
225 #ifdef CONFIG_SMP
226         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
227         if (multi_cpu_entry) {
228                 s64 wake_time;
229                 unsigned int i;
230
231                 /* Disable the distributor -- this is the only way to
232                    prevent the other CPUs from responding to interrupts
233                    and potentially fiddling with the distributor
234                    registers while we're fiddling with them. */
235                 tegra_gic_dist_disable();
236
237                 /* Did an interrupt come in for another CPU before we
238                    could disable the distributor? */
239                 if (!tegra_rail_off_is_allowed()) {
240                         /* Yes, re-enable the distributor and clock gating. */
241                         tegra_gic_dist_enable();
242                         cpu_do_idle();
243                         return false;
244                 }
245
246                 /* LP2 initial targeted wake time */
247                 wake_time = ktime_to_us(entry_time) + request;
248
249                 /* CPU0 must wake up before any of the other CPUs. */
250                 smp_rmb();
251                 for (i = 1; i < CONFIG_NR_CPUS; i++)
252                         wake_time = min_t(s64, wake_time,
253                                 tegra_cpu_wake_by_time[i]);
254
255                 /* LP2 actual targeted wake time */
256                 request = wake_time - ktime_to_us(entry_time);
257                 BUG_ON(wake_time < 0LL);
258
259                 if (request < state->target_residency) {
260                         /* Not enough time left to enter LP2 */
261                         tegra_gic_dist_enable();
262                         cpu_do_idle();
263                         return false;
264                 }
265
266                 /* Cancel power gating wake timers for all secondary CPUs */
267                 tegra_pd_timer_cancel_secondary();
268
269                 /* Save and disable the affinity setting for the other
270                    CPUs and route all interrupts to CPU0. */
271                 tegra_gic_disable_affinity();
272
273                 /* Re-enable the distributor. */
274                 tegra_gic_dist_enable();
275         }
276 #endif
277         cpu_pm_enter();
278
279         sleep_time = request -
280                 pd_exit_latencies[cpu_number(dev->cpu)];
281
282         bin = time_to_bin((u32)request / 1000);
283         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
284
285         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
286         if (is_lp_cluster()) {
287                 /* here we are not supporting emulation mode, for now */
288                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
289                 idle_stats.c1nc_gating_count++;
290                 idle_stats.c1nc_gating_bin[bin]++;
291         } else {
292                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
293                 flag = (fast_cluster_power_down_mode
294                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
295                         & TEGRA_POWER_CLUSTER_PART_MASK;
296
297                 if (((request < tegra_min_residency_crail()) &&
298                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
299                         ((fast_cluster_power_down_mode &
300                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
301                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
302
303                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
304                         idle_stats.rail_gating_count++;
305                         idle_stats.rail_gating_bin[bin]++;
306                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
307                         idle_stats.c0nc_gating_count++;
308                         idle_stats.c0nc_gating_bin[bin]++;
309                 }
310         }
311
312         if (stop_mc_clk_in_idle && (state->power_usage == 0) &&
313             (request > tegra_mc_clk_stop_min_residency())) {
314                 flag |= TEGRA_POWER_STOP_MC_CLK;
315
316                 trace_nvmc_clk_stop_rcuidle(NVPOWER_MC_CLK_STOP_ENTRY,
317                                                 sleep_time);
318                 idle_stats.mc_clk_stop_count++;
319                 idle_stats.mc_clk_stop_bin[bin]++;
320
321                 tegra_mc_clk_prepare();
322         }
323
324         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
325                 sleep_completed = true;
326         else {
327                 int irq = tegra_gic_pending_interrupt();
328                 idle_stats.pd_int_count[irq]++;
329         }
330
331         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
332         exit_time = ktime_get();
333
334         if (flag & TEGRA_POWER_STOP_MC_CLK)
335                 tegra_mc_clk_finish();
336
337         if (!is_lp_cluster())
338                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
339
340         if (flag & TEGRA_POWER_STOP_MC_CLK)
341                 idle_stats.mc_clk_stop_time +=
342                         ktime_to_us(ktime_sub(exit_time, entry_time));
343         else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL)
344                 idle_stats.rail_pd_time +=
345                         ktime_to_us(ktime_sub(exit_time, entry_time));
346         else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
347                 if (is_lp_cluster())
348                         idle_stats.c1nc_pg_time +=
349                                 ktime_to_us(ktime_sub(exit_time, entry_time));
350                 else
351                         idle_stats.c0nc_pg_time +=
352                                 ktime_to_us(ktime_sub(exit_time, entry_time));
353         }
354
355         if (multi_cpu_entry)
356                 tegra11_irq_restore_affinity();
357
358         if (sleep_completed) {
359                 /*
360                  * Stayed in LP2 for the full time until the next tick,
361                  * adjust the exit latency based on measurement
362                  */
363                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
364                         - request;
365                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
366                         offset / 16;
367                 latency = clamp(latency, 0, 10000);
368                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
369                 state->exit_latency = latency;          /* for idle governor */
370                 smp_wmb();
371
372                 if (flag & TEGRA_POWER_STOP_MC_CLK) {
373                         trace_nvmc_clk_stop_rcuidle(NVPOWER_MC_CLK_STOP_EXIT,
374                                                         sleep_time);
375                         idle_stats.mc_clk_stop_done_count++;
376                         idle_stats.mc_clk_stop_done_count_bin[bin]++;
377                 } else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL) {
378                         idle_stats.rail_gating_done_count++;
379                         idle_stats.rail_gating_done_count_bin[bin]++;
380                 } else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
381                         if (is_lp_cluster()) {
382                                 idle_stats.c1nc_gating_done_count++;
383                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
384                         } else {
385                                 idle_stats.c0nc_gating_done_count++;
386                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
387                         }
388                 }
389
390                 pr_debug("%lld %lld %d %d\n", request,
391                         ktime_to_us(ktime_sub(exit_time, entry_time)),
392                         offset, bin);
393         }
394
395         cpu_pm_exit();
396
397         return true;
398 }
399
400 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
401                            struct cpuidle_state *state, s64 request)
402 {
403 #ifdef CONFIG_SMP
404         s64 sleep_time;
405         u32 cntp_tval;
406         u32 cntfrq;
407         ktime_t entry_time;
408         bool sleep_completed = false;
409         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
410         unsigned int cpu = cpu_number(dev->cpu);
411
412         if ((tegra_cpu_timer_get_remain(&request) == -ETIME) ||
413                 (request <= state->target_residency) || (!ts) ||
414                 (ts->nohz_mode == NOHZ_MODE_INACTIVE) ||
415                 !tegra_is_cpu_wake_timer_ready(dev->cpu)) {
416                 /*
417                  * Not enough time left to enter LP2, or wake timer not ready
418                  */
419                 cpu_do_idle();
420                 return false;
421         }
422
423 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
424         cntfrq = tegra_clk_measure_input_freq();
425         cntp_tval = (request - state->exit_latency) * (cntfrq / 1000000);
426         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r"(cntp_tval));
427 #endif
428         cpu_pm_enter();
429
430 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
431         sleep_time = request - state->exit_latency;
432         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
433         tegra_pd_set_trigger(sleep_time);
434 #endif
435         idle_stats.tear_down_count[cpu]++;
436
437         entry_time = ktime_get();
438
439         /* Save time this CPU must be awakened by. */
440         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
441         smp_wmb();
442
443 #ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
444         if ((cpu == 0) || (cpu == 4)) {
445                 tegra_generic_smc(0x84000001, ((1 << 16) | 5),
446                                 (TEGRA_RESET_HANDLER_BASE +
447                                 tegra_cpu_reset_handler_offset));
448         }
449 #endif
450         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
451
452         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
453
454 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
455         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (cntp_tval));
456         if ((s32)cntp_tval <= 0)
457                 sleep_completed = true;
458 #else
459         sleep_completed = !tegra_pd_timer_remain();
460         tegra_pd_set_trigger(0);
461         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
462 #endif
463         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
464         idle_stats.cpu_pg_time[cpu] += sleep_time;
465         if (sleep_completed) {
466                 /*
467                  * Stayed in LP2 for the full time until timer expires,
468                  * adjust the exit latency based on measurement
469                  */
470                 int offset = sleep_time - request;
471                 int latency = pd_exit_latencies[cpu] +
472                         offset / 16;
473                 latency = clamp(latency, 0, 10000);
474                 pd_exit_latencies[cpu] = latency;
475                 state->exit_latency = latency;          /* for idle governor */
476                 smp_wmb();
477         }
478 #endif
479         cpu_pm_exit();
480
481         return true;
482 }
483
484 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
485                            struct cpuidle_state *state)
486 {
487         bool power_down;
488         bool cpu_gating_only = false;
489         bool clkgt_at_vmin = false;
490         bool power_gating_cpu_only = true;
491         int status = -1;
492         unsigned long rate;
493         s64 request;
494
495         if (tegra_cpu_timer_get_remain(&request)) {
496                 cpu_do_idle();
497                 return false;
498         }
499
500         tegra_set_cpu_in_pd(dev->cpu);
501         cpu_gating_only = (((fast_cluster_power_down_mode
502                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
503                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0) ||
504                         (tegra_dvfs_is_dfll_bypass() &&
505                          !tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail));
506
507         if (is_lp_cluster()) {
508                 if (slow_cluster_power_gating_noncpu &&
509                         (request > tegra_min_residency_ncpu()))
510                                 power_gating_cpu_only = false;
511                 else
512                         power_gating_cpu_only = true;
513         } else {
514                 if (num_online_cpus() > 1)
515                         power_gating_cpu_only = true;
516                 else {
517                         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
518                                 clkgt_at_vmin = false;
519                         else if (tegra_force_clkgt_at_vmin ==
520                                         TEGRA_CPUIDLE_FORCE_DO_CLKGT_VMIN)
521                                 clkgt_at_vmin = true;
522                         else if (tegra_force_clkgt_at_vmin ==
523                                         TEGRA_CPUIDLE_FORCE_NO_CLKGT_VMIN)
524                                 clkgt_at_vmin = false;
525                         else if ((request >= tegra_min_residency_vmin_fmin()) &&
526                                  ((request < tegra_min_residency_ncpu()) ||
527                                    cpu_gating_only))
528                                 clkgt_at_vmin = true;
529
530                         if (!cpu_gating_only && tegra_rail_off_is_allowed()) {
531                                 if (fast_cluster_power_down_mode &
532                                                 TEGRA_POWER_CLUSTER_FORCE_MASK)
533                                         power_gating_cpu_only = false;
534                                 else if (request >
535                                                 tegra_min_residency_ncpu())
536                                         power_gating_cpu_only = false;
537                                 else
538                                         power_gating_cpu_only = true;
539                         } else
540                                 power_gating_cpu_only = true;
541                 }
542         }
543
544         if (clkgt_at_vmin) {
545                 rate = 0;
546                 status = tegra_cpu_g_idle_rate_exchange(&rate);
547                 if (!status) {
548                         idle_stats.clk_gating_vmin++;
549                         cpu_do_idle();
550                         tegra_cpu_g_idle_rate_exchange(&rate);
551                         power_down = true;
552                 } else
553                         power_down = tegra_cpu_core_power_down(dev, state,
554                                                                 request);
555         } else if (!power_gating_cpu_only) {
556                 if (is_lp_cluster()) {
557                         rate = ULONG_MAX;
558                         status = tegra_cpu_lp_idle_rate_exchange(&rate);
559                 }
560
561                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
562
563                 /* restore cpu clock after cluster power ungating */
564                 if (status == 0)
565                         tegra_cpu_lp_idle_rate_exchange(&rate);
566         } else
567                 power_down = tegra_cpu_core_power_down(dev, state, request);
568
569         ptm_power_idle_resume(dev->cpu);
570         tegra_clear_cpu_in_pd(dev->cpu);
571
572         return power_down;
573 }
574
575 #ifdef CONFIG_DEBUG_FS
576 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
577 {
578         int bin;
579         int i;
580         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
581         seq_printf(s, "-----------------------------------------------------------------------------\n");
582         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
583                 idle_stats.cpu_ready_count[0],
584                 idle_stats.cpu_ready_count[1],
585                 idle_stats.cpu_ready_count[2],
586                 idle_stats.cpu_ready_count[3],
587                 idle_stats.cpu_ready_count[4]);
588         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
589                 idle_stats.tear_down_count[0],
590                 idle_stats.tear_down_count[1],
591                 idle_stats.tear_down_count[2],
592                 idle_stats.tear_down_count[3],
593                 idle_stats.tear_down_count[4]);
594         seq_printf(s, "clk gating @ Vmin count:      %8u\n",
595                 idle_stats.clk_gating_vmin);
596         seq_printf(s, "rail gating count:      %8u\n",
597                 idle_stats.rail_gating_count);
598         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
599                 idle_stats.rail_gating_done_count,
600                 idle_stats.rail_gating_done_count * 100 /
601                         (idle_stats.rail_gating_count ?: 1));
602
603         seq_printf(s, "c0nc gating count:      %8u\n",
604                 idle_stats.c0nc_gating_count);
605         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
606                 idle_stats.c0nc_gating_done_count,
607                 idle_stats.c0nc_gating_done_count * 100 /
608                         (idle_stats.c0nc_gating_count ?: 1));
609
610         seq_printf(s, "c1nc gating count:      %8u\n",
611                 idle_stats.c1nc_gating_count);
612         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
613                 idle_stats.c1nc_gating_done_count,
614                 idle_stats.c1nc_gating_done_count * 100 /
615                         (idle_stats.c1nc_gating_count ?: 1));
616
617         seq_printf(s, "\n");
618         seq_printf(s, "cpu ready time:                 " \
619                         "%8llu %8llu %8llu %8llu %8llu ms\n",
620                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
621                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
622                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
623                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
624                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
625
626         seq_printf(s, "cpu power gating time:          " \
627                         "%8llu %8llu %8llu %8llu %8llu ms\n",
628                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
629                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
630                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
631                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
632                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
633
634         seq_printf(s, "power gated %%:                 " \
635                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
636                 (int)(idle_stats.cpu_wants_pd_time[0] ?
637                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
638                         idle_stats.cpu_wants_pd_time[0]) : 0),
639                 (int)(idle_stats.cpu_wants_pd_time[1] ?
640                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
641                         idle_stats.cpu_wants_pd_time[1]) : 0),
642                 (int)(idle_stats.cpu_wants_pd_time[2] ?
643                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
644                         idle_stats.cpu_wants_pd_time[2]) : 0),
645                 (int)(idle_stats.cpu_wants_pd_time[3] ?
646                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
647                         idle_stats.cpu_wants_pd_time[3]) : 0),
648                 (int)(idle_stats.cpu_wants_pd_time[4] ?
649                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
650                         idle_stats.cpu_wants_pd_time[4]) : 0));
651
652         seq_printf(s, "\n");
653         seq_printf(s, "rail gating time  c0nc gating time  " \
654                         "c1nc gating time  mc_clk gating time\n");
655         seq_printf(s, "%8llu ms          %8llu ms          " \
656                         "%8llu ms          %8llu ms\n",
657                 div64_u64(idle_stats.rail_pd_time, 1000),
658                 div64_u64(idle_stats.c0nc_pg_time, 1000),
659                 div64_u64(idle_stats.c1nc_pg_time, 1000),
660                 div64_u64(idle_stats.mc_clk_stop_time, 1000));
661         seq_printf(s, "%8d%%             %8d%%             " \
662                         "%8d%%             %8d%%\n",
663                 (int)(idle_stats.cpu_wants_pd_time[0] ?
664                         div64_u64(idle_stats.rail_pd_time * 100,
665                         idle_stats.cpu_wants_pd_time[0]) : 0),
666                 (int)(idle_stats.cpu_wants_pd_time[0] ?
667                         div64_u64(idle_stats.c0nc_pg_time * 100,
668                         idle_stats.cpu_wants_pd_time[0]) : 0),
669                 (int)(idle_stats.cpu_wants_pd_time[4] ?
670                         div64_u64(idle_stats.c1nc_pg_time * 100,
671                         idle_stats.cpu_wants_pd_time[4]) : 0),
672                 (int)(idle_stats.cpu_wants_pd_time[4] ?
673                         div64_u64(idle_stats.mc_clk_stop_time * 100,
674                         idle_stats.cpu_wants_pd_time[4]) : 0));
675
676         seq_printf(s, "\n");
677
678         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
679         seq_printf(s, "-------------------------------------------------\n");
680         for (bin = 0; bin < 32; bin++) {
681                 if (idle_stats.rail_gating_bin[bin] == 0)
682                         continue;
683                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
684                         1 << (bin - 1), 1 << bin,
685                         idle_stats.rail_gating_bin[bin],
686                         idle_stats.rail_gating_done_count_bin[bin],
687                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
688                                 idle_stats.rail_gating_bin[bin]);
689         }
690         seq_printf(s, "\n");
691
692         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
693         seq_printf(s, "-------------------------------------------------\n");
694         for (bin = 0; bin < 32; bin++) {
695                 if (idle_stats.c0nc_gating_bin[bin] == 0)
696                         continue;
697                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
698                         1 << (bin - 1), 1 << bin,
699                         idle_stats.c0nc_gating_bin[bin],
700                         idle_stats.c0nc_gating_done_count_bin[bin],
701                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
702                                 idle_stats.c0nc_gating_bin[bin]);
703         }
704         seq_printf(s, "\n");
705
706         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
707         seq_printf(s, "-------------------------------------------------\n");
708         for (bin = 0; bin < 32; bin++) {
709                 if (idle_stats.c1nc_gating_bin[bin] == 0)
710                         continue;
711                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
712                         1 << (bin - 1), 1 << bin,
713                         idle_stats.c1nc_gating_bin[bin],
714                         idle_stats.c1nc_gating_done_count_bin[bin],
715                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
716                                 idle_stats.c1nc_gating_bin[bin]);
717         }
718         seq_printf(s, "\n");
719
720         seq_printf(s, "%19s %8s %8s %8s\n", "", "mc clk stop", "comp", "%");
721         seq_printf(s, "-------------------------------------------------\n");
722         for (bin = 0; bin < 32; bin++) {
723                 if (idle_stats.mc_clk_stop_bin[bin] == 0)
724                         continue;
725                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
726                         1 << (bin - 1), 1 << bin,
727                         idle_stats.mc_clk_stop_bin[bin],
728                         idle_stats.mc_clk_stop_done_count_bin[bin],
729                         idle_stats.mc_clk_stop_done_count_bin[bin] * 100 /
730                                 idle_stats.mc_clk_stop_bin[bin]);
731         }
732
733         seq_printf(s, "\n");
734         seq_printf(s, "%3s %20s %6s %10s\n",
735                 "int", "name", "count", "last count");
736         seq_printf(s, "--------------------------------------------\n");
737         for (i = 0; i < NR_IRQS; i++) {
738                 if (idle_stats.pd_int_count[i] == 0)
739                         continue;
740                 seq_printf(s, "%3d %20s %6d %10d\n",
741                         i, irq_to_desc(i)->action ?
742                                 irq_to_desc(i)->action->name ?: "???" : "???",
743                         idle_stats.pd_int_count[i],
744                         idle_stats.pd_int_count[i] -
745                                 idle_stats.last_pd_int_count[i]);
746                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
747         };
748         return 0;
749 }
750 #endif
751
752 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
753 {
754         int i;
755         struct tegra_cpuidle_ops ops = {
756                 tegra11x_idle_power_down,
757                 tegra11x_cpu_idle_stats_pd_ready,
758                 tegra11x_cpu_idle_stats_pd_time,
759                 tegra11x_pd_is_allowed,
760 #ifdef CONFIG_DEBUG_FS
761                 tegra11x_pd_debug_show
762 #endif
763         };
764
765         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
766
767         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
768                 pd_exit_latencies[i] = tegra_pg_exit_latency;
769
770         *idle_ops = ops;
771         return 0;
772 }