ARM: tegra: pcie: Remove dock detect variable
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t14x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t14x.c
3  *
4  * Copyright (c) 2012-2013 NVIDIA Corporation. All rights reserved.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/debugfs.h>
21 #include <linux/delay.h>
22 #include <linux/hrtimer.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/io.h>
27 #include <linux/ratelimit.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/suspend.h>
33 #include <linux/tick.h>
34 #include <linux/clk.h>
35 #include <linux/cpu_pm.h>
36 #include <linux/module.h>
37 #include <linux/tegra-soc.h>
38 #include <linux/irqchip/tegra.h>
39
40 #include <asm/cacheflush.h>
41 #include <asm/localtimer.h>
42 #include <asm/suspend.h>
43 #include <asm/smp_twd.h>
44 #include <asm/cputype.h>
45
46 #include <mach/irqs.h>
47
48 #include <trace/events/nvpower.h>
49
50 #include "clock.h"
51 #include "cpuidle.h"
52 #include "dvfs.h"
53 #include "iomap.h"
54 #include "pm.h"
55 #include "reset.h"
56 #include "sleep.h"
57 #include "timer.h"
58
59 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
60         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
61 #define PMC_POWERGATE_STATUS \
62         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
63
64 #ifdef CONFIG_SMP
65 static s64 tegra_cpu_wake_by_time[4] = {
66         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
67 #endif
68
69 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
70 module_param(cpu_power_gating_in_idle, ulong, 0644);
71
72 static bool slow_cluster_power_gating_noncpu __read_mostly;
73 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
74
75 static uint fast_cluster_power_down_mode __read_mostly;
76 module_param(fast_cluster_power_down_mode, uint, 0644);
77
78 static bool stop_mc_clk_in_idle __read_mostly;
79 module_param(stop_mc_clk_in_idle, bool, 0644);
80
81 static struct clk *cpu_clk_for_dvfs;
82 #ifdef CONFIG_HAVE_ARM_TWD
83 static struct clk *twd_clk;
84 #endif
85
86 static int pd_exit_latencies[5];
87
88 static struct {
89         unsigned int cpu_ready_count[5];
90         unsigned int tear_down_count[5];
91         unsigned long long cpu_wants_pd_time[5];
92         unsigned long long cpu_pg_time[5];
93         unsigned long long rail_pd_time;
94         unsigned long long c0nc_pg_time;
95         unsigned long long c1nc_pg_time;
96         unsigned long long mc_clk_stop_time;
97         unsigned int rail_gating_count;
98         unsigned int rail_gating_bin[32];
99         unsigned int rail_gating_done_count;
100         unsigned int rail_gating_done_count_bin[32];
101         unsigned int c0nc_gating_count;
102         unsigned int c0nc_gating_bin[32];
103         unsigned int c0nc_gating_done_count;
104         unsigned int c0nc_gating_done_count_bin[32];
105         unsigned int c1nc_gating_count;
106         unsigned int c1nc_gating_bin[32];
107         unsigned int c1nc_gating_done_count;
108         unsigned int c1nc_gating_done_count_bin[32];
109         unsigned int mc_clk_stop_count;
110         unsigned int mc_clk_stop_bin[32];
111         unsigned int mc_clk_stop_done_count;
112         unsigned int mc_clk_stop_done_count_bin[32];
113         unsigned int pd_int_count[NR_IRQS];
114         unsigned int last_pd_int_count[NR_IRQS];
115         unsigned int clk_gating_vmin;
116 } idle_stats;
117
118 static inline unsigned int time_to_bin(unsigned int time)
119 {
120         return fls(time);
121 }
122
123 static inline void tegra_irq_unmask(int irq)
124 {
125         struct irq_data *data = irq_get_irq_data(irq);
126         data->chip->irq_unmask(data);
127 }
128
129 static inline unsigned int cpu_number(unsigned int n)
130 {
131         return is_lp_cluster() ? 4 : n;
132 }
133
134 void tegra14x_cpu_idle_stats_pd_ready(unsigned int cpu)
135 {
136         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
137 }
138
139 void tegra14x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
140 {
141         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
142 }
143
144 /* Allow rail off only if all secondary CPUs are power gated, and no
145    rail update is in progress */
146 static bool tegra_rail_off_is_allowed(void)
147 {
148         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
149         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
150
151         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
152                 return false;
153
154         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
155                 return false;
156
157         return true;
158 }
159
160 bool tegra14x_pd_is_allowed(struct cpuidle_device *dev,
161         struct cpuidle_state *state)
162 {
163         s64 request;
164
165         if (!cpumask_test_cpu(cpu_number(dev->cpu),
166                                 to_cpumask(&cpu_power_gating_in_idle)))
167                 return false;
168
169         request = ktime_to_us(tick_nohz_get_sleep_length());
170         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
171                 /* possible on the 1st entry after cluster switch*/
172                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
173                 tegra_pd_update_target_residency(state);
174         }
175         if (request < state->target_residency) {
176                 /* Not enough time left to enter LP2 */
177                 return false;
178         }
179
180         return true;
181 }
182
183 static inline void tegra14_irq_restore_affinity(void)
184 {
185 #ifdef CONFIG_SMP
186         /* Disable the distributor. */
187         tegra_gic_dist_disable();
188
189         /* Restore the other CPU's interrupt affinity. */
190         tegra_gic_restore_affinity();
191
192         /* Re-enable the distributor. */
193         tegra_gic_dist_enable();
194 #endif
195 }
196
197 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
198                            struct cpuidle_state *state, s64 request)
199 {
200         ktime_t entry_time;
201         ktime_t exit_time;
202         bool sleep_completed = false;
203         bool multi_cpu_entry = false;
204         int bin;
205         unsigned int flag = 0;
206         s64 sleep_time;
207
208         /* LP2 entry time */
209         entry_time = ktime_get();
210
211         if (request < state->target_residency) {
212                 /* Not enough time left to enter LP2 */
213                 cpu_do_idle();
214                 return false;
215         }
216
217 #ifdef CONFIG_SMP
218         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
219         if (multi_cpu_entry) {
220                 s64 wake_time;
221                 unsigned int i;
222
223                 /* Disable the distributor -- this is the only way to
224                    prevent the other CPUs from responding to interrupts
225                    and potentially fiddling with the distributor
226                    registers while we're fiddling with them. */
227                 tegra_gic_dist_disable();
228
229                 /* Did an interrupt come in for another CPU before we
230                    could disable the distributor? */
231                 if (!tegra_rail_off_is_allowed()) {
232                         /* Yes, re-enable the distributor and clock gating. */
233                         tegra_gic_dist_enable();
234                         cpu_do_idle();
235                         return false;
236                 }
237
238                 /* LP2 initial targeted wake time */
239                 wake_time = ktime_to_us(entry_time) + request;
240
241                 /* CPU0 must wake up before any of the other CPUs. */
242                 smp_rmb();
243                 for (i = 1; i < CONFIG_NR_CPUS; i++)
244                         wake_time = min_t(s64, wake_time,
245                                 tegra_cpu_wake_by_time[i]);
246
247                 /* LP2 actual targeted wake time */
248                 request = wake_time - ktime_to_us(entry_time);
249                 BUG_ON(wake_time < 0LL);
250
251                 if (request < state->target_residency) {
252                         /* Not enough time left to enter LP2 */
253                         tegra_gic_dist_enable();
254                         cpu_do_idle();
255                         return false;
256                 }
257
258                 /* Cancel power gating wake timers for all secondary CPUs */
259                 tegra_pd_timer_cancel_secondary();
260
261                 /* Save and disable the affinity setting for the other
262                    CPUs and route all interrupts to CPU0. */
263                 tegra_gic_disable_affinity();
264
265                 /* Re-enable the distributor. */
266                 tegra_gic_dist_enable();
267         }
268 #endif
269         cpu_pm_enter();
270
271         sleep_time = request -
272                 pd_exit_latencies[cpu_number(dev->cpu)];
273
274         bin = time_to_bin((u32)request / 1000);
275         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
276
277         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
278         if (is_lp_cluster()) {
279                 /* here we are not supporting emulation mode, for now */
280                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
281                 idle_stats.c1nc_gating_count++;
282                 idle_stats.c1nc_gating_bin[bin]++;
283         } else {
284                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
285                 flag = (fast_cluster_power_down_mode
286                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
287                         & TEGRA_POWER_CLUSTER_PART_MASK;
288                 if ((request < tegra_min_residency_crail()) &&
289                         (flag != TEGRA_POWER_CLUSTER_PART_MASK))
290                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
291
292                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
293                         idle_stats.rail_gating_count++;
294                         idle_stats.rail_gating_bin[bin]++;
295                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
296                         idle_stats.c0nc_gating_count++;
297                         idle_stats.c0nc_gating_bin[bin]++;
298                 }
299         }
300
301         if (stop_mc_clk_in_idle && (state->power_usage == 0) &&
302                 (request > tegra_mc_clk_stop_min_residency())) {
303                 flag |= TEGRA_POWER_STOP_MC_CLK;
304
305                 trace_nvmc_clk_stop_rcuidle(NVPOWER_MC_CLK_STOP_ENTRY,
306                                                 sleep_time);
307                 idle_stats.mc_clk_stop_count++;
308                 idle_stats.mc_clk_stop_bin[bin]++;
309
310                 tegra_mc_clk_prepare();
311         }
312
313         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
314                 sleep_completed = true;
315         else {
316                 int irq = tegra_gic_pending_interrupt();
317                 idle_stats.pd_int_count[irq]++;
318         }
319
320         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
321         exit_time = ktime_get();
322
323         if (flag & TEGRA_POWER_STOP_MC_CLK)
324                 tegra_mc_clk_finish();
325
326         if (!is_lp_cluster())
327                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
328
329         if (flag & TEGRA_POWER_STOP_MC_CLK)
330                 idle_stats.mc_clk_stop_time +=
331                         ktime_to_us(ktime_sub(exit_time, entry_time));
332         else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL)
333                 idle_stats.rail_pd_time +=
334                         ktime_to_us(ktime_sub(exit_time, entry_time));
335         else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
336                 if (is_lp_cluster())
337                         idle_stats.c1nc_pg_time +=
338                                 ktime_to_us(ktime_sub(exit_time, entry_time));
339                 else
340                         idle_stats.c0nc_pg_time +=
341                                 ktime_to_us(ktime_sub(exit_time, entry_time));
342         }
343
344         if (multi_cpu_entry)
345                 tegra14_irq_restore_affinity();
346
347         if (sleep_completed) {
348                 /*
349                  * Stayed in LP2 for the full time until the next tick,
350                  * adjust the exit latency based on measurement
351                  */
352                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
353                         - request;
354                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
355                         offset / 16;
356                 latency = clamp(latency, 0, 10000);
357                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
358                 state->exit_latency = latency;          /* for idle governor */
359                 smp_wmb();
360
361                 if (flag & TEGRA_POWER_STOP_MC_CLK) {
362                         trace_nvmc_clk_stop_rcuidle(NVPOWER_MC_CLK_STOP_EXIT,
363                                                         sleep_time);
364                         idle_stats.mc_clk_stop_done_count++;
365                         idle_stats.mc_clk_stop_done_count_bin[bin]++;
366                 } else if (flag & TEGRA_POWER_CLUSTER_PART_CRAIL) {
367                         idle_stats.rail_gating_done_count++;
368                         idle_stats.rail_gating_done_count_bin[bin]++;
369                 } else if (flag & TEGRA_POWER_CLUSTER_PART_NONCPU) {
370                         if (is_lp_cluster()) {
371                                 idle_stats.c1nc_gating_done_count++;
372                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
373                         } else {
374                                 idle_stats.c0nc_gating_done_count++;
375                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
376                         }
377                 }
378
379                 pr_debug("%lld %lld %d %d\n", request,
380                         ktime_to_us(ktime_sub(exit_time, entry_time)),
381                         offset, bin);
382         }
383
384         cpu_pm_exit();
385
386         return true;
387 }
388
389 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
390                            struct cpuidle_state *state, s64 request)
391 {
392 #ifdef CONFIG_SMP
393         s64 sleep_time;
394         ktime_t entry_time;
395 #ifdef CONFIG_HAVE_ARM_TWD
396         struct tegra_twd_context twd_context;
397 #endif
398         bool sleep_completed = false;
399         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
400 #ifdef CONFIG_HAVE_ARM_TWD
401 #if defined(CONFIG_TEGRA_LP2_CPU_TIMER)
402         void __iomem *twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
403 #endif
404
405         if (!tegra_twd_get_state(&twd_context)) {
406                 unsigned long twd_rate = clk_get_rate(twd_clk);
407
408                 if ((twd_context.twd_ctrl & TWD_TIMER_CONTROL_ENABLE) &&
409                     (twd_context.twd_ctrl & TWD_TIMER_CONTROL_IT_ENABLE)) {
410                         request = div_u64((u64)twd_context.twd_cnt * 1000000,
411                                           twd_rate);
412 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
413                         if (request >= state->target_residency) {
414                                 twd_context.twd_cnt -= state->exit_latency *
415                                         (twd_rate / 1000000);
416                                 writel(twd_context.twd_cnt,
417                                         twd_base + TWD_TIMER_COUNTER);
418                         }
419 #endif
420                 }
421         }
422 #endif
423
424         if (!tegra_is_cpu_wake_timer_ready(dev->cpu) ||
425             (request < state->target_residency) ||
426             (!ts) || (ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
427                 /*
428                  * Not enough time left to enter LP2, or wake timer not ready
429                  */
430                 cpu_do_idle();
431                 return false;
432         }
433
434         cpu_pm_enter();
435
436 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
437         sleep_time = request - state->exit_latency;
438         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
439 #ifdef CONFIG_HAVE_ARM_TWD
440         tegra_twd_suspend(&twd_context);
441 #endif
442         tegra_pd_set_trigger(sleep_time);
443 #endif
444         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
445
446         entry_time = ktime_get();
447
448         /* Save time this CPU must be awakened by. */
449         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
450         smp_wmb();
451
452 #ifdef CONFIG_TRUSTED_LITTLE_KERNEL
453         if (dev->cpu == 0) {
454                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE4,
455                                 (TEGRA_RESET_HANDLER_BASE +
456                                 tegra_cpu_reset_handler_offset));
457         }
458 #endif
459         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
460
461         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
462
463 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
464 #ifdef CONFIG_HAVE_ARM_TWD
465         if (!tegra_twd_get_state(&twd_context))
466                 sleep_completed = (twd_context.twd_cnt == 0);
467 #endif
468 #else
469         sleep_completed = !tegra_pd_timer_remain();
470         tegra_pd_set_trigger(0);
471 #ifdef CONFIG_HAVE_ARM_TWD
472         tegra_twd_resume(&twd_context);
473 #endif
474         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
475 #endif
476         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
477         idle_stats.cpu_pg_time[cpu_number(dev->cpu)] += sleep_time;
478         if (sleep_completed) {
479                 /*
480                  * Stayed in LP2 for the full time until timer expires,
481                  * adjust the exit latency based on measurement
482                  */
483                 int offset = sleep_time - request;
484                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
485                         offset / 16;
486                 latency = clamp(latency, 0, 10000);
487                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
488                 state->exit_latency = latency;          /* for idle governor */
489                 smp_wmb();
490         }
491 #endif
492         cpu_pm_exit();
493
494         return true;
495 }
496
497 bool tegra14x_idle_power_down(struct cpuidle_device *dev,
498                            struct cpuidle_state *state)
499 {
500         bool power_down;
501         bool cpu_gating_only = false;
502         bool clkgt_at_vmin = false;
503         bool power_gating_cpu_only = true;
504         unsigned long rate;
505         int status = -1;
506         s64 request = ktime_to_us(tick_nohz_get_sleep_length());
507
508         tegra_set_cpu_in_pd(dev->cpu);
509         cpu_gating_only = (((fast_cluster_power_down_mode
510                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
511                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
512
513         if (is_lp_cluster()) {
514                 if (slow_cluster_power_gating_noncpu &&
515                         (request > tegra_min_residency_ncpu()))
516                                 power_gating_cpu_only = false;
517                 else
518                         power_gating_cpu_only = true;
519         } else {
520                 if (num_online_cpus() > 1)
521                         power_gating_cpu_only = true;
522                 else {
523                         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
524                                 clkgt_at_vmin = false;
525                         else if (tegra_force_clkgt_at_vmin ==
526                                         TEGRA_CPUIDLE_FORCE_DO_CLKGT_VMIN)
527                                 clkgt_at_vmin = true;
528                         else if (tegra_force_clkgt_at_vmin ==
529                                         TEGRA_CPUIDLE_FORCE_NO_CLKGT_VMIN)
530                                 clkgt_at_vmin = false;
531                         else if ((request >= tegra_min_residency_vmin_fmin()) &&
532                                  ((request < tegra_min_residency_ncpu()) ||
533                                    cpu_gating_only))
534                                 clkgt_at_vmin = true;
535
536                         if (!cpu_gating_only && tegra_rail_off_is_allowed()) {
537                                 if (fast_cluster_power_down_mode &
538                                                 TEGRA_POWER_CLUSTER_FORCE_MASK)
539                                         power_gating_cpu_only = false;
540                                 else if (request >
541                                                 tegra_min_residency_ncpu())
542                                         power_gating_cpu_only = false;
543                                 else
544                                         power_gating_cpu_only = true;
545                         } else
546                                 power_gating_cpu_only = true;
547                 }
548         }
549
550         if (clkgt_at_vmin) {
551                 rate = 0;
552                 status = tegra_cpu_g_idle_rate_exchange(&rate);
553                 if (!status) {
554                         idle_stats.clk_gating_vmin++;
555                         cpu_do_idle();
556                         tegra_cpu_g_idle_rate_exchange(&rate);
557                         power_down = true;
558                 } else {
559                         power_down = tegra_cpu_core_power_down(dev, state,
560                                                                request);
561                 }
562         } else if (power_gating_cpu_only)
563                 power_down = tegra_cpu_core_power_down(dev, state, request);
564         else {
565                 if (is_lp_cluster()) {
566                         rate = ULONG_MAX;
567                         status = tegra_cpu_lp_idle_rate_exchange(&rate);
568                 }
569
570                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
571
572                 /* restore cpu clock after cluster power ungating */
573                 if (status == 0)
574                         tegra_cpu_lp_idle_rate_exchange(&rate);
575         }
576
577         tegra_clear_cpu_in_pd(dev->cpu);
578
579         return power_down;
580 }
581
582 #ifdef CONFIG_DEBUG_FS
583 int tegra14x_pd_debug_show(struct seq_file *s, void *data)
584 {
585         int bin;
586         int i;
587         unsigned long long total_c0cpu0_pg_time = 0;
588         unsigned long long total_c1cpu0_pg_time = 0;
589
590         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
591         seq_printf(s, "-----------------------------------------------------------------------------\n");
592         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
593                 idle_stats.cpu_ready_count[0],
594                 idle_stats.cpu_ready_count[1],
595                 idle_stats.cpu_ready_count[2],
596                 idle_stats.cpu_ready_count[3],
597                 idle_stats.cpu_ready_count[4]);
598         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
599                 idle_stats.tear_down_count[0],
600                 idle_stats.tear_down_count[1],
601                 idle_stats.tear_down_count[2],
602                 idle_stats.tear_down_count[3],
603                 idle_stats.tear_down_count[4]);
604         seq_printf(s, "clk gating @ Vmin count:      %8u\n",
605                 idle_stats.clk_gating_vmin);
606         seq_printf(s, "rail gating count:      %8u\n",
607                 idle_stats.rail_gating_count);
608         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
609                 idle_stats.rail_gating_done_count,
610                 idle_stats.rail_gating_done_count * 100 /
611                         (idle_stats.rail_gating_count ?: 1));
612
613         seq_printf(s, "c0nc gating count:      %8u\n",
614                 idle_stats.c0nc_gating_count);
615         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
616                 idle_stats.c0nc_gating_done_count,
617                 idle_stats.c0nc_gating_done_count * 100 /
618                         (idle_stats.c0nc_gating_count ?: 1));
619
620         seq_printf(s, "c1nc gating count:      %8u\n",
621                 idle_stats.c1nc_gating_count);
622         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
623                 idle_stats.c1nc_gating_done_count,
624                 idle_stats.c1nc_gating_done_count * 100 /
625                         (idle_stats.c1nc_gating_count ?: 1));
626
627         seq_printf(s, "mc clk stop count:      %8u\n",
628                 idle_stats.mc_clk_stop_count);
629         seq_printf(s, "mc_clk_stop completed:  %8u %7u%%\n",
630                 idle_stats.mc_clk_stop_done_count,
631                 idle_stats.mc_clk_stop_done_count * 100 /
632                         (idle_stats.mc_clk_stop_count ?: 1));
633
634         seq_printf(s, "\n");
635         seq_printf(s, "cpu ready time:                 " \
636                         "%8llu %8llu %8llu %8llu %8llu ms\n",
637                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
638                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
639                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
640                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
641                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
642
643         total_c0cpu0_pg_time = idle_stats.cpu_pg_time[0] + \
644                                 idle_stats.c0nc_pg_time + \
645                                  idle_stats.rail_pd_time;
646         total_c1cpu0_pg_time = idle_stats.cpu_pg_time[4] + \
647                                 idle_stats.c1nc_pg_time;
648
649         seq_printf(s, "cpu power gating time:          " \
650                         "%8llu %8llu %8llu %8llu %8llu ms\n",
651                 div64_u64(total_c0cpu0_pg_time, 1000),
652                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
653                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
654                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
655                 div64_u64(total_c1cpu0_pg_time, 1000));
656
657         seq_printf(s, "power gated %%:                 " \
658                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
659                 (int)(idle_stats.cpu_wants_pd_time[0] ?
660                         div64_u64(total_c0cpu0_pg_time * 100,
661                         idle_stats.cpu_wants_pd_time[0]) : 0),
662                 (int)(idle_stats.cpu_wants_pd_time[1] ?
663                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
664                         idle_stats.cpu_wants_pd_time[1]) : 0),
665                 (int)(idle_stats.cpu_wants_pd_time[2] ?
666                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
667                         idle_stats.cpu_wants_pd_time[2]) : 0),
668                 (int)(idle_stats.cpu_wants_pd_time[3] ?
669                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
670                         idle_stats.cpu_wants_pd_time[3]) : 0),
671                 (int)(idle_stats.cpu_wants_pd_time[4] ?
672                         div64_u64(total_c1cpu0_pg_time * 100,
673                         idle_stats.cpu_wants_pd_time[4]) : 0));
674
675         seq_printf(s, "\n");
676         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
677         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
678                 div64_u64(idle_stats.rail_pd_time, 1000),
679                 div64_u64(idle_stats.c0nc_pg_time, 1000),
680                 div64_u64(idle_stats.c1nc_pg_time, 1000));
681         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
682                 (int)(idle_stats.cpu_wants_pd_time[0] ?
683                         div64_u64(idle_stats.rail_pd_time * 100,
684                         idle_stats.cpu_wants_pd_time[0]) : 0),
685                 (int)(idle_stats.cpu_wants_pd_time[0] ?
686                         div64_u64(idle_stats.c0nc_pg_time * 100,
687                         idle_stats.cpu_wants_pd_time[0]) : 0),
688                 (int)(idle_stats.cpu_wants_pd_time[4] ?
689                         div64_u64(idle_stats.c1nc_pg_time * 100,
690                         idle_stats.cpu_wants_pd_time[4]) : 0));
691
692         seq_printf(s, "\n");
693
694         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
695         seq_printf(s, "-------------------------------------------------\n");
696         for (bin = 0; bin < 32; bin++) {
697                 if (idle_stats.rail_gating_bin[bin] == 0)
698                         continue;
699                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
700                         1 << (bin - 1), 1 << bin,
701                         idle_stats.rail_gating_bin[bin],
702                         idle_stats.rail_gating_done_count_bin[bin],
703                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
704                                 idle_stats.rail_gating_bin[bin]);
705         }
706         seq_printf(s, "\n");
707
708         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
709         seq_printf(s, "-------------------------------------------------\n");
710         for (bin = 0; bin < 32; bin++) {
711                 if (idle_stats.c0nc_gating_bin[bin] == 0)
712                         continue;
713                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
714                         1 << (bin - 1), 1 << bin,
715                         idle_stats.c0nc_gating_bin[bin],
716                         idle_stats.c0nc_gating_done_count_bin[bin],
717                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
718                                 idle_stats.c0nc_gating_bin[bin]);
719         }
720         seq_printf(s, "\n");
721
722         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
723         seq_printf(s, "-------------------------------------------------\n");
724         for (bin = 0; bin < 32; bin++) {
725                 if (idle_stats.c1nc_gating_bin[bin] == 0)
726                         continue;
727                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
728                         1 << (bin - 1), 1 << bin,
729                         idle_stats.c1nc_gating_bin[bin],
730                         idle_stats.c1nc_gating_done_count_bin[bin],
731                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
732                                 idle_stats.c1nc_gating_bin[bin]);
733         }
734         seq_printf(s, "\n");
735
736         seq_printf(s, "%19s %8s %8s %8s\n", "", "mc clk stop", "comp", "%");
737         seq_printf(s, "-------------------------------------------------\n");
738         for (bin = 0; bin < 32; bin++) {
739                 if (idle_stats.mc_clk_stop_bin[bin] == 0)
740                         continue;
741                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
742                         1 << (bin - 1), 1 << bin,
743                         idle_stats.mc_clk_stop_bin[bin],
744                         idle_stats.mc_clk_stop_done_count_bin[bin],
745                         idle_stats.mc_clk_stop_done_count_bin[bin] * 100 /
746                                 idle_stats.mc_clk_stop_bin[bin]);
747         }
748
749         seq_printf(s, "\n");
750         seq_printf(s, "%3s %20s %6s %10s\n",
751                 "int", "name", "count", "last count");
752         seq_printf(s, "--------------------------------------------\n");
753         for (i = 0; i < NR_IRQS; i++) {
754                 if (idle_stats.pd_int_count[i] == 0)
755                         continue;
756                 seq_printf(s, "%3d %20s %6d %10d\n",
757                         i, irq_to_desc(i)->action ?
758                                 irq_to_desc(i)->action->name ?: "???" : "???",
759                         idle_stats.pd_int_count[i],
760                         idle_stats.pd_int_count[i] -
761                                 idle_stats.last_pd_int_count[i]);
762                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
763         };
764         return 0;
765 }
766 #endif
767
768 int __init tegra14x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
769 {
770         int i;
771         struct tegra_cpuidle_ops ops = {
772                 tegra14x_idle_power_down,
773                 tegra14x_cpu_idle_stats_pd_ready,
774                 tegra14x_cpu_idle_stats_pd_time,
775                 tegra14x_pd_is_allowed,
776 #ifdef CONFIG_DEBUG_FS
777                 tegra14x_pd_debug_show
778 #endif
779         };
780
781         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
782 #ifdef CONFIG_HAVE_ARM_TWD
783         twd_clk = tegra_get_clock_by_name("twd");
784 #endif
785
786         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
787                 pd_exit_latencies[i] = tegra_pg_exit_latency;
788
789         *idle_ops = ops;
790
791         return 0;
792 }