ARM: tegra11x: Support min residency per platform
[linux-3.10.git] / arch / arm / mach-tegra / cpuidle-t11x.c
1 /*
2  * arch/arm/mach-tegra/cpuidle-t11x.c
3  *
4  * CPU idle driver for Tegra11x CPUs
5  *
6  * Copyright (c) 2012-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/io.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/cputype.h>
49
50 #include <mach/irqs.h>
51 #include <mach/hardware.h>
52
53 #include <trace/events/power.h>
54
55 #include "clock.h"
56 #include "cpuidle.h"
57 #include "dvfs.h"
58 #include "fuse.h"
59 #include "gic.h"
60 #include "iomap.h"
61 #include "pm.h"
62 #include "reset.h"
63 #include "sleep.h"
64 #include "timer.h"
65 #include "fuse.h"
66
67 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
69 #define PMC_POWERGATE_STATUS \
70         (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
71
72 #define ARCH_TIMER_CTRL_ENABLE          (1 << 0)
73 #define ARCH_TIMER_CTRL_IT_MASK         (1 << 1)
74
75 #ifdef CONFIG_SMP
76 static s64 tegra_cpu_wake_by_time[4] = {
77         LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
78 #endif
79
80 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
81 module_param(cpu_power_gating_in_idle, ulong, 0644);
82
83 static bool slow_cluster_power_gating_noncpu __read_mostly;
84 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
85
86 static uint fast_cluster_power_down_mode __read_mostly;
87 module_param(fast_cluster_power_down_mode, uint, 0644);
88
89 static struct clk *cpu_clk_for_dvfs;
90
91 static int pd_exit_latencies[5];
92
93 static struct {
94         unsigned int cpu_ready_count[5];
95         unsigned int tear_down_count[5];
96         unsigned long long cpu_wants_pd_time[5];
97         unsigned long long cpu_pg_time[5];
98         unsigned long long rail_pd_time;
99         unsigned long long c0nc_pg_time;
100         unsigned long long c1nc_pg_time;
101         unsigned int rail_gating_count;
102         unsigned int rail_gating_bin[32];
103         unsigned int rail_gating_done_count;
104         unsigned int rail_gating_done_count_bin[32];
105         unsigned int c0nc_gating_count;
106         unsigned int c0nc_gating_bin[32];
107         unsigned int c0nc_gating_done_count;
108         unsigned int c0nc_gating_done_count_bin[32];
109         unsigned int c1nc_gating_count;
110         unsigned int c1nc_gating_bin[32];
111         unsigned int c1nc_gating_done_count;
112         unsigned int c1nc_gating_done_count_bin[32];
113         unsigned int pd_int_count[NR_IRQS];
114         unsigned int last_pd_int_count[NR_IRQS];
115         unsigned int clk_gating_vmin;
116 } idle_stats;
117
118 static inline unsigned int time_to_bin(unsigned int time)
119 {
120         return fls(time);
121 }
122
123 static inline void tegra_irq_unmask(int irq)
124 {
125         struct irq_data *data = irq_get_irq_data(irq);
126         data->chip->irq_unmask(data);
127 }
128
129 static inline unsigned int cpu_number(unsigned int n)
130 {
131         return is_lp_cluster() ? 4 : n;
132 }
133
134 void tegra11x_cpu_idle_stats_pd_ready(unsigned int cpu)
135 {
136         idle_stats.cpu_ready_count[cpu_number(cpu)]++;
137 }
138
139 void tegra11x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
140 {
141         idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
142 }
143
144 /* Allow rail off only if all secondary CPUs are power gated, and no
145    rail update is in progress */
146 static bool tegra_rail_off_is_allowed(void)
147 {
148         u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
149         u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
150
151         if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
152                 return false;
153
154         if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
155                 return false;
156
157         return true;
158 }
159
160 bool tegra11x_pd_is_allowed(struct cpuidle_device *dev,
161         struct cpuidle_state *state)
162 {
163         s64 request;
164
165         if (!cpumask_test_cpu(cpu_number(dev->cpu),
166                                 to_cpumask(&cpu_power_gating_in_idle)))
167                 return false;
168
169         if (tegra_cpu_timer_get_remain(&request))
170                 return false;
171
172         if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
173                 /* possible on the 1st entry after cluster switch*/
174                 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
175                 tegra_pd_update_target_residency(state);
176         }
177         if (request < state->target_residency) {
178                 /* Not enough time left to enter LP2 */
179                 return false;
180         }
181
182         return true;
183 }
184
185 static inline void tegra11_irq_restore_affinity(void)
186 {
187 #ifdef CONFIG_SMP
188         /* Disable the distributor. */
189         tegra_gic_dist_disable();
190
191         /* Restore the other CPU's interrupt affinity. */
192         tegra_gic_restore_affinity();
193
194         /* Re-enable the distributor. */
195         tegra_gic_dist_enable();
196 #endif
197 }
198
199 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
200                            struct cpuidle_state *state, s64 request)
201 {
202         ktime_t entry_time;
203         ktime_t exit_time;
204         bool sleep_completed = false;
205         bool multi_cpu_entry = false;
206         int bin;
207         unsigned int flag = 0;
208         s64 sleep_time;
209
210         /* LP2 entry time */
211         entry_time = ktime_get();
212
213         if (request < state->target_residency) {
214                 /* Not enough time left to enter LP2 */
215                 cpu_do_idle();
216                 return false;
217         }
218
219 #ifdef CONFIG_SMP
220         multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
221         if (multi_cpu_entry) {
222                 s64 wake_time;
223                 unsigned int i;
224
225                 /* Disable the distributor -- this is the only way to
226                    prevent the other CPUs from responding to interrupts
227                    and potentially fiddling with the distributor
228                    registers while we're fiddling with them. */
229                 tegra_gic_dist_disable();
230
231                 /* Did an interrupt come in for another CPU before we
232                    could disable the distributor? */
233                 if (!tegra_rail_off_is_allowed()) {
234                         /* Yes, re-enable the distributor and clock gating. */
235                         tegra_gic_dist_enable();
236                         cpu_do_idle();
237                         return false;
238                 }
239
240                 /* LP2 initial targeted wake time */
241                 wake_time = ktime_to_us(entry_time) + request;
242
243                 /* CPU0 must wake up before any of the other CPUs. */
244                 smp_rmb();
245                 for (i = 1; i < CONFIG_NR_CPUS; i++)
246                         wake_time = min_t(s64, wake_time,
247                                 tegra_cpu_wake_by_time[i]);
248
249                 /* LP2 actual targeted wake time */
250                 request = wake_time - ktime_to_us(entry_time);
251                 BUG_ON(wake_time < 0LL);
252
253                 if (request < state->target_residency) {
254                         /* Not enough time left to enter LP2 */
255                         tegra_gic_dist_enable();
256                         cpu_do_idle();
257                         return false;
258                 }
259
260                 /* Cancel power gating wake timers for all secondary CPUs */
261                 tegra_pd_timer_cancel_secondary();
262
263                 /* Save and disable the affinity setting for the other
264                    CPUs and route all interrupts to CPU0. */
265                 tegra_gic_disable_affinity();
266
267                 /* Re-enable the distributor. */
268                 tegra_gic_dist_enable();
269         }
270 #endif
271         cpu_pm_enter();
272
273         sleep_time = request -
274                 pd_exit_latencies[cpu_number(dev->cpu)];
275
276         bin = time_to_bin((u32)request / 1000);
277         idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
278
279         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
280         if (is_lp_cluster()) {
281                 /* here we are not supporting emulation mode, for now */
282                 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
283                 idle_stats.c1nc_gating_count++;
284                 idle_stats.c1nc_gating_bin[bin]++;
285         } else {
286                 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
287                 flag = (fast_cluster_power_down_mode
288                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
289                         & TEGRA_POWER_CLUSTER_PART_MASK;
290
291                 if (((request < tegra_min_residency_crail()) &&
292                         (flag != TEGRA_POWER_CLUSTER_PART_MASK)) &&
293                         ((fast_cluster_power_down_mode &
294                         TEGRA_POWER_CLUSTER_FORCE_MASK) == 0))
295                         flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
296
297                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
298                         idle_stats.rail_gating_count++;
299                         idle_stats.rail_gating_bin[bin]++;
300                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
301                         idle_stats.c0nc_gating_count++;
302                         idle_stats.c0nc_gating_bin[bin]++;
303                 }
304         }
305
306         if (tegra_idle_power_down_last(sleep_time, flag) == 0)
307                 sleep_completed = true;
308         else {
309                 int irq = tegra_gic_pending_interrupt();
310                 idle_stats.pd_int_count[irq]++;
311         }
312
313         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
314         exit_time = ktime_get();
315         if (!is_lp_cluster())
316                 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
317
318         if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL)
319                 idle_stats.rail_pd_time +=
320                         ktime_to_us(ktime_sub(exit_time, entry_time));
321         else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
322                 if (is_lp_cluster())
323                         idle_stats.c1nc_pg_time +=
324                                 ktime_to_us(ktime_sub(exit_time, entry_time));
325                 else
326                         idle_stats.c0nc_pg_time +=
327                                 ktime_to_us(ktime_sub(exit_time, entry_time));
328         }
329
330         if (multi_cpu_entry)
331                 tegra11_irq_restore_affinity();
332
333         if (sleep_completed) {
334                 /*
335                  * Stayed in LP2 for the full time until the next tick,
336                  * adjust the exit latency based on measurement
337                  */
338                 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
339                         - request;
340                 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
341                         offset / 16;
342                 latency = clamp(latency, 0, 10000);
343                 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
344                 state->exit_latency = latency;          /* for idle governor */
345                 smp_wmb();
346
347                 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
348                         idle_stats.rail_gating_done_count++;
349                         idle_stats.rail_gating_done_count_bin[bin]++;
350                 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
351                         if (is_lp_cluster()) {
352                                 idle_stats.c1nc_gating_done_count++;
353                                 idle_stats.c1nc_gating_done_count_bin[bin]++;
354                         } else {
355                                 idle_stats.c0nc_gating_done_count++;
356                                 idle_stats.c0nc_gating_done_count_bin[bin]++;
357                         }
358                 }
359
360                 pr_debug("%lld %lld %d %d\n", request,
361                         ktime_to_us(ktime_sub(exit_time, entry_time)),
362                         offset, bin);
363         }
364
365         cpu_pm_exit();
366
367         return true;
368 }
369
370 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
371                            struct cpuidle_state *state, s64 request)
372 {
373 #ifdef CONFIG_SMP
374         s64 sleep_time;
375         u32 cntp_tval;
376         u32 cntfrq;
377         ktime_t entry_time;
378         bool sleep_completed = false;
379         struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
380         unsigned int cpu = cpu_number(dev->cpu);
381
382         if ((tegra_cpu_timer_get_remain(&request) == -ETIME) ||
383                 (request <= state->target_residency) || (!ts) ||
384                 (ts->nohz_mode == NOHZ_MODE_INACTIVE) ||
385                 !tegra_is_cpu_wake_timer_ready(dev->cpu)) {
386                 /*
387                  * Not enough time left to enter LP2, or wake timer not ready
388                  */
389                 cpu_do_idle();
390                 return false;
391         }
392
393 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
394         asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (cntfrq));
395         cntp_tval = (request - state->exit_latency) * (cntfrq / 1000000);
396         asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r"(cntp_tval));
397 #endif
398         cpu_pm_enter();
399
400 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
401         sleep_time = request - state->exit_latency;
402         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
403         tegra_pd_set_trigger(sleep_time);
404 #endif
405         idle_stats.tear_down_count[cpu]++;
406
407         entry_time = ktime_get();
408
409         /* Save time this CPU must be awakened by. */
410         tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
411         smp_wmb();
412
413 #ifdef CONFIG_TRUSTED_FOUNDATIONS
414         if ((cpu == 0) || (cpu == 4)) {
415                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE7,
416                                 (TEGRA_RESET_HANDLER_BASE +
417                                 tegra_cpu_reset_handler_offset));
418         }
419 #endif
420         cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
421
422         tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
423
424 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
425         asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (cntp_tval));
426         if ((s32)cntp_tval <= 0)
427                 sleep_completed = true;
428 #else
429         sleep_completed = !tegra_pd_timer_remain();
430         tegra_pd_set_trigger(0);
431         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
432 #endif
433         sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
434         idle_stats.cpu_pg_time[cpu] += sleep_time;
435         if (sleep_completed) {
436                 /*
437                  * Stayed in LP2 for the full time until timer expires,
438                  * adjust the exit latency based on measurement
439                  */
440                 int offset = sleep_time - request;
441                 int latency = pd_exit_latencies[cpu] +
442                         offset / 16;
443                 latency = clamp(latency, 0, 10000);
444                 pd_exit_latencies[cpu] = latency;
445                 state->exit_latency = latency;          /* for idle governor */
446                 smp_wmb();
447         }
448 #endif
449         cpu_pm_exit();
450
451         return true;
452 }
453
454 bool tegra11x_idle_power_down(struct cpuidle_device *dev,
455                            struct cpuidle_state *state)
456 {
457         bool power_down;
458         bool cpu_gating_only = false;
459         bool clkgt_at_vmin = false;
460         bool power_gating_cpu_only = true;
461         int status = -1;
462         unsigned long rate;
463         s64 request;
464
465         if (tegra_cpu_timer_get_remain(&request)) {
466                 cpu_do_idle();
467                 return false;
468         }
469
470         tegra_set_cpu_in_pd(dev->cpu);
471         cpu_gating_only = (((fast_cluster_power_down_mode
472                         << TEGRA_POWER_CLUSTER_PART_SHIFT)
473                         & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
474
475         if (is_lp_cluster()) {
476                 if (slow_cluster_power_gating_noncpu &&
477                         (request > tegra_min_residency_ncpu()))
478                                 power_gating_cpu_only = false;
479                 else
480                         power_gating_cpu_only = true;
481         } else {
482                 if (num_online_cpus() > 1)
483                         power_gating_cpu_only = true;
484                 else {
485                         if (tegra_force_clkgt_at_vmin ==
486                                         TEGRA_CPUIDLE_FORCE_DO_CLKGT_VMIN)
487                                 clkgt_at_vmin = true;
488                         else if (tegra_force_clkgt_at_vmin ==
489                                         TEGRA_CPUIDLE_FORCE_NO_CLKGT_VMIN)
490                                 clkgt_at_vmin = false;
491                         else if ((request >= tegra_min_residency_vmin_fmin()) &&
492                                  (request < tegra_min_residency_ncpu()))
493                                 clkgt_at_vmin = true;
494
495                         if (!cpu_gating_only && tegra_rail_off_is_allowed()) {
496                                 if (fast_cluster_power_down_mode &
497                                                 TEGRA_POWER_CLUSTER_FORCE_MASK)
498                                         power_gating_cpu_only = false;
499                                 else if (request >
500                                                 tegra_min_residency_ncpu())
501                                         power_gating_cpu_only = false;
502                                 else
503                                         power_gating_cpu_only = true;
504                         } else
505                                 power_gating_cpu_only = true;
506                 }
507         }
508
509         if (clkgt_at_vmin) {
510                 rate = 0;
511                 status = tegra11_cpu_dfll_rate_exchange(&rate);
512                 if (!status) {
513                         idle_stats.clk_gating_vmin++;
514                         cpu_do_idle();
515                         tegra11_cpu_dfll_rate_exchange(&rate);
516                         power_down = false;
517                 } else
518                         power_down = tegra_cpu_core_power_down(dev, state,
519                                                                 request);
520         } else if (!power_gating_cpu_only) {
521                 if (is_lp_cluster()) {
522                         rate = ULONG_MAX;
523                         status = tegra_cpu_backup_rate_exchange(&rate);
524                 }
525
526                 power_down = tegra_cpu_cluster_power_down(dev, state, request);
527
528                 /* restore cpu clock after cluster power ungating */
529                 if (status == 0)
530                         tegra_cpu_backup_rate_exchange(&rate);
531         } else
532                 power_down = tegra_cpu_core_power_down(dev, state, request);
533
534         tegra_clear_cpu_in_pd(dev->cpu);
535
536         return power_down;
537 }
538
539 #ifdef CONFIG_DEBUG_FS
540 int tegra11x_pd_debug_show(struct seq_file *s, void *data)
541 {
542         int bin;
543         int i;
544         seq_printf(s, "                                    cpu0     cpu1     cpu2     cpu3     cpulp\n");
545         seq_printf(s, "-----------------------------------------------------------------------------\n");
546         seq_printf(s, "cpu ready:                      %8u %8u %8u %8u %8u\n",
547                 idle_stats.cpu_ready_count[0],
548                 idle_stats.cpu_ready_count[1],
549                 idle_stats.cpu_ready_count[2],
550                 idle_stats.cpu_ready_count[3],
551                 idle_stats.cpu_ready_count[4]);
552         seq_printf(s, "tear down:                      %8u %8u %8u %8u %8u\n",
553                 idle_stats.tear_down_count[0],
554                 idle_stats.tear_down_count[1],
555                 idle_stats.tear_down_count[2],
556                 idle_stats.tear_down_count[3],
557                 idle_stats.tear_down_count[4]);
558         seq_printf(s, "clk gating @ Vmin count:      %8u\n",
559                 idle_stats.clk_gating_vmin);
560         seq_printf(s, "rail gating count:      %8u\n",
561                 idle_stats.rail_gating_count);
562         seq_printf(s, "rail gating completed:  %8u %7u%%\n",
563                 idle_stats.rail_gating_done_count,
564                 idle_stats.rail_gating_done_count * 100 /
565                         (idle_stats.rail_gating_count ?: 1));
566
567         seq_printf(s, "c0nc gating count:      %8u\n",
568                 idle_stats.c0nc_gating_count);
569         seq_printf(s, "c0nc gating completed:  %8u %7u%%\n",
570                 idle_stats.c0nc_gating_done_count,
571                 idle_stats.c0nc_gating_done_count * 100 /
572                         (idle_stats.c0nc_gating_count ?: 1));
573
574         seq_printf(s, "c1nc gating count:      %8u\n",
575                 idle_stats.c1nc_gating_count);
576         seq_printf(s, "c1nc gating completed:  %8u %7u%%\n",
577                 idle_stats.c1nc_gating_done_count,
578                 idle_stats.c1nc_gating_done_count * 100 /
579                         (idle_stats.c1nc_gating_count ?: 1));
580
581         seq_printf(s, "\n");
582         seq_printf(s, "cpu ready time:                 " \
583                         "%8llu %8llu %8llu %8llu %8llu ms\n",
584                 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
585                 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
586                 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
587                 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
588                 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
589
590         seq_printf(s, "cpu power gating time:          " \
591                         "%8llu %8llu %8llu %8llu %8llu ms\n",
592                 div64_u64(idle_stats.cpu_pg_time[0], 1000),
593                 div64_u64(idle_stats.cpu_pg_time[1], 1000),
594                 div64_u64(idle_stats.cpu_pg_time[2], 1000),
595                 div64_u64(idle_stats.cpu_pg_time[3], 1000),
596                 div64_u64(idle_stats.cpu_pg_time[4], 1000));
597
598         seq_printf(s, "power gated %%:                 " \
599                         "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
600                 (int)(idle_stats.cpu_wants_pd_time[0] ?
601                         div64_u64(idle_stats.cpu_pg_time[0] * 100,
602                         idle_stats.cpu_wants_pd_time[0]) : 0),
603                 (int)(idle_stats.cpu_wants_pd_time[1] ?
604                         div64_u64(idle_stats.cpu_pg_time[1] * 100,
605                         idle_stats.cpu_wants_pd_time[1]) : 0),
606                 (int)(idle_stats.cpu_wants_pd_time[2] ?
607                         div64_u64(idle_stats.cpu_pg_time[2] * 100,
608                         idle_stats.cpu_wants_pd_time[2]) : 0),
609                 (int)(idle_stats.cpu_wants_pd_time[3] ?
610                         div64_u64(idle_stats.cpu_pg_time[3] * 100,
611                         idle_stats.cpu_wants_pd_time[3]) : 0),
612                 (int)(idle_stats.cpu_wants_pd_time[4] ?
613                         div64_u64(idle_stats.cpu_pg_time[4] * 100,
614                         idle_stats.cpu_wants_pd_time[4]) : 0));
615
616         seq_printf(s, "\n");
617         seq_printf(s, "rail gating time  c0nc gating time  c1nc gating time\n");
618         seq_printf(s, "%8llu ms          %8llu ms          %8llu ms\n",
619                 div64_u64(idle_stats.rail_pd_time, 1000),
620                 div64_u64(idle_stats.c0nc_pg_time, 1000),
621                 div64_u64(idle_stats.c1nc_pg_time, 1000));
622         seq_printf(s, "%8d%%             %8d%%             %8d%%\n",
623                 (int)(idle_stats.cpu_wants_pd_time[0] ?
624                         div64_u64(idle_stats.rail_pd_time * 100,
625                         idle_stats.cpu_wants_pd_time[0]) : 0),
626                 (int)(idle_stats.cpu_wants_pd_time[0] ?
627                         div64_u64(idle_stats.c0nc_pg_time * 100,
628                         idle_stats.cpu_wants_pd_time[0]) : 0),
629                 (int)(idle_stats.cpu_wants_pd_time[4] ?
630                         div64_u64(idle_stats.c1nc_pg_time * 100,
631                         idle_stats.cpu_wants_pd_time[4]) : 0));
632
633         seq_printf(s, "\n");
634
635         seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
636         seq_printf(s, "-------------------------------------------------\n");
637         for (bin = 0; bin < 32; bin++) {
638                 if (idle_stats.rail_gating_bin[bin] == 0)
639                         continue;
640                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
641                         1 << (bin - 1), 1 << bin,
642                         idle_stats.rail_gating_bin[bin],
643                         idle_stats.rail_gating_done_count_bin[bin],
644                         idle_stats.rail_gating_done_count_bin[bin] * 100 /
645                                 idle_stats.rail_gating_bin[bin]);
646         }
647         seq_printf(s, "\n");
648
649         seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
650         seq_printf(s, "-------------------------------------------------\n");
651         for (bin = 0; bin < 32; bin++) {
652                 if (idle_stats.c0nc_gating_bin[bin] == 0)
653                         continue;
654                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
655                         1 << (bin - 1), 1 << bin,
656                         idle_stats.c0nc_gating_bin[bin],
657                         idle_stats.c0nc_gating_done_count_bin[bin],
658                         idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
659                                 idle_stats.c0nc_gating_bin[bin]);
660         }
661         seq_printf(s, "\n");
662
663         seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
664         seq_printf(s, "-------------------------------------------------\n");
665         for (bin = 0; bin < 32; bin++) {
666                 if (idle_stats.c1nc_gating_bin[bin] == 0)
667                         continue;
668                 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
669                         1 << (bin - 1), 1 << bin,
670                         idle_stats.c1nc_gating_bin[bin],
671                         idle_stats.c1nc_gating_done_count_bin[bin],
672                         idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
673                                 idle_stats.c1nc_gating_bin[bin]);
674         }
675
676         seq_printf(s, "\n");
677         seq_printf(s, "%3s %20s %6s %10s\n",
678                 "int", "name", "count", "last count");
679         seq_printf(s, "--------------------------------------------\n");
680         for (i = 0; i < NR_IRQS; i++) {
681                 if (idle_stats.pd_int_count[i] == 0)
682                         continue;
683                 seq_printf(s, "%3d %20s %6d %10d\n",
684                         i, irq_to_desc(i)->action ?
685                                 irq_to_desc(i)->action->name ?: "???" : "???",
686                         idle_stats.pd_int_count[i],
687                         idle_stats.pd_int_count[i] -
688                                 idle_stats.last_pd_int_count[i]);
689                 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
690         };
691         return 0;
692 }
693 #endif
694
695 int __init tegra11x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
696 {
697         int i;
698         struct tegra_cpuidle_ops ops = {
699                 tegra11x_idle_power_down,
700                 tegra11x_cpu_idle_stats_pd_ready,
701                 tegra11x_cpu_idle_stats_pd_time,
702                 tegra11x_pd_is_allowed,
703 #ifdef CONFIG_DEBUG_FS
704                 tegra11x_pd_debug_show
705 #endif
706         };
707
708         cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
709
710         for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
711                 pd_exit_latencies[i] = tegra_pg_exit_latency;
712
713         *idle_ops = ops;
714         return 0;
715 }