2 * arch/arm/mach-tegra/cpuidle-t14x.c
4 * CPU idle driver for Tegra14x CPUs
6 * Copyright (c) 2012-2013, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/kernel.h>
24 #include <linux/cpu.h>
25 #include <linux/cpuidle.h>
26 #include <linux/debugfs.h>
27 #include <linux/delay.h>
28 #include <linux/hrtimer.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
33 #include <linux/ratelimit.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/slab.h>
37 #include <linux/smp.h>
38 #include <linux/suspend.h>
39 #include <linux/tick.h>
40 #include <linux/clk.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/module.h>
44 #include <asm/cacheflush.h>
45 #include <asm/hardware/gic.h>
46 #include <asm/localtimer.h>
47 #include <asm/suspend.h>
48 #include <asm/smp_twd.h>
49 #include <asm/cputype.h>
51 #include <mach/irqs.h>
52 #include <mach/hardware.h>
54 #include <trace/events/power.h>
68 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS \
69 (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x470)
70 #define PMC_POWERGATE_STATUS \
71 (IO_ADDRESS(TEGRA_PMC_BASE) + 0x038)
74 static s64 tegra_cpu_wake_by_time[4] = {
75 LLONG_MAX, LLONG_MAX, LLONG_MAX, LLONG_MAX };
78 static ulong cpu_power_gating_in_idle __read_mostly = 0x1f;
79 module_param(cpu_power_gating_in_idle, ulong, 0644);
81 static bool slow_cluster_power_gating_noncpu __read_mostly;
82 module_param(slow_cluster_power_gating_noncpu, bool, 0644);
84 static uint fast_cluster_power_down_mode __read_mostly;
85 module_param(fast_cluster_power_down_mode, uint, 0644);
87 static struct clk *cpu_clk_for_dvfs;
88 static struct clk *twd_clk;
90 static int pd_exit_latencies[5];
93 unsigned int cpu_ready_count[5];
94 unsigned int tear_down_count[5];
95 unsigned long long cpu_wants_pd_time[5];
96 unsigned long long cpu_pg_time[5];
97 unsigned long long rail_pd_time;
98 unsigned long long c0nc_pg_time;
99 unsigned long long c1nc_pg_time;
100 unsigned int rail_gating_count;
101 unsigned int rail_gating_bin[32];
102 unsigned int rail_gating_done_count;
103 unsigned int rail_gating_done_count_bin[32];
104 unsigned int c0nc_gating_count;
105 unsigned int c0nc_gating_bin[32];
106 unsigned int c0nc_gating_done_count;
107 unsigned int c0nc_gating_done_count_bin[32];
108 unsigned int c1nc_gating_count;
109 unsigned int c1nc_gating_bin[32];
110 unsigned int c1nc_gating_done_count;
111 unsigned int c1nc_gating_done_count_bin[32];
112 unsigned int pd_int_count[NR_IRQS];
113 unsigned int last_pd_int_count[NR_IRQS];
116 static inline unsigned int time_to_bin(unsigned int time)
121 static inline void tegra_irq_unmask(int irq)
123 struct irq_data *data = irq_get_irq_data(irq);
124 data->chip->irq_unmask(data);
127 static inline unsigned int cpu_number(unsigned int n)
129 return is_lp_cluster() ? 4 : n;
132 void tegra14x_cpu_idle_stats_pd_ready(unsigned int cpu)
134 idle_stats.cpu_ready_count[cpu_number(cpu)]++;
137 void tegra14x_cpu_idle_stats_pd_time(unsigned int cpu, s64 us)
139 idle_stats.cpu_wants_pd_time[cpu_number(cpu)] += us;
142 /* Allow rail off only if all secondary CPUs are power gated, and no
143 rail update is in progress */
144 static bool tegra_rail_off_is_allowed(void)
146 u32 rst = readl(CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
147 u32 pg = readl(PMC_POWERGATE_STATUS) >> 8;
149 if (((rst & 0xE) != 0xE) || ((pg & 0xE) != 0))
152 if (tegra_dvfs_rail_updating(cpu_clk_for_dvfs))
158 bool tegra14x_pd_is_allowed(struct cpuidle_device *dev,
159 struct cpuidle_state *state)
163 if (!cpumask_test_cpu(cpu_number(dev->cpu),
164 to_cpumask(&cpu_power_gating_in_idle)))
167 request = ktime_to_us(tick_nohz_get_sleep_length());
168 if (state->exit_latency != pd_exit_latencies[cpu_number(dev->cpu)]) {
169 /* possible on the 1st entry after cluster switch*/
170 state->exit_latency = pd_exit_latencies[cpu_number(dev->cpu)];
171 tegra_pd_update_target_residency(state);
173 if (request < state->target_residency) {
174 /* Not enough time left to enter LP2 */
181 static inline void tegra14_irq_restore_affinity(void)
184 /* Disable the distributor. */
185 tegra_gic_dist_disable();
187 /* Restore the other CPU's interrupt affinity. */
188 tegra_gic_restore_affinity();
190 /* Re-enable the distributor. */
191 tegra_gic_dist_enable();
195 static bool tegra_cpu_cluster_power_down(struct cpuidle_device *dev,
196 struct cpuidle_state *state, s64 request)
200 bool sleep_completed = false;
201 bool multi_cpu_entry = false;
203 unsigned int flag = 0;
207 entry_time = ktime_get();
209 if (request < state->target_residency) {
210 /* Not enough time left to enter LP2 */
216 multi_cpu_entry = !is_lp_cluster() && (num_online_cpus() > 1);
217 if (multi_cpu_entry) {
221 /* Disable the distributor -- this is the only way to
222 prevent the other CPUs from responding to interrupts
223 and potentially fiddling with the distributor
224 registers while we're fiddling with them. */
225 tegra_gic_dist_disable();
227 /* Did an interrupt come in for another CPU before we
228 could disable the distributor? */
229 if (!tegra_rail_off_is_allowed()) {
230 /* Yes, re-enable the distributor and clock gating. */
231 tegra_gic_dist_enable();
236 /* LP2 initial targeted wake time */
237 wake_time = ktime_to_us(entry_time) + request;
239 /* CPU0 must wake up before any of the other CPUs. */
241 for (i = 1; i < CONFIG_NR_CPUS; i++)
242 wake_time = min_t(s64, wake_time,
243 tegra_cpu_wake_by_time[i]);
245 /* LP2 actual targeted wake time */
246 request = wake_time - ktime_to_us(entry_time);
247 BUG_ON(wake_time < 0LL);
249 if (request < state->target_residency) {
250 /* Not enough time left to enter LP2 */
251 tegra_gic_dist_enable();
256 /* Cancel power gating wake timers for all secondary CPUs */
257 tegra_pd_timer_cancel_secondary();
259 /* Save and disable the affinity setting for the other
260 CPUs and route all interrupts to CPU0. */
261 tegra_gic_disable_affinity();
263 /* Re-enable the distributor. */
264 tegra_gic_dist_enable();
269 sleep_time = request -
270 pd_exit_latencies[cpu_number(dev->cpu)];
272 bin = time_to_bin((u32)request / 1000);
273 idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
275 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
276 if (is_lp_cluster()) {
277 /* here we are not supporting emulation mode, for now */
278 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
279 idle_stats.c1nc_gating_count++;
280 idle_stats.c1nc_gating_bin[bin]++;
282 tegra_dvfs_rail_off(tegra_cpu_rail, entry_time);
283 flag = (fast_cluster_power_down_mode
284 << TEGRA_POWER_CLUSTER_PART_SHIFT)
285 & TEGRA_POWER_CLUSTER_PART_MASK;
286 if ((request < tegra_min_residency_crail()) &&
287 (flag != TEGRA_POWER_CLUSTER_PART_MASK))
288 flag = TEGRA_POWER_CLUSTER_PART_NONCPU;
290 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
291 idle_stats.rail_gating_count++;
292 idle_stats.rail_gating_bin[bin]++;
293 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
294 idle_stats.c0nc_gating_count++;
295 idle_stats.c0nc_gating_bin[bin]++;
299 if (tegra_idle_power_down_last(sleep_time, flag) == 0)
300 sleep_completed = true;
302 int irq = tegra_gic_pending_interrupt();
303 idle_stats.pd_int_count[irq]++;
306 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
307 exit_time = ktime_get();
308 if (!is_lp_cluster())
309 tegra_dvfs_rail_on(tegra_cpu_rail, exit_time);
311 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL)
312 idle_stats.rail_pd_time +=
313 ktime_to_us(ktime_sub(exit_time, entry_time));
314 else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
316 idle_stats.c1nc_pg_time +=
317 ktime_to_us(ktime_sub(exit_time, entry_time));
319 idle_stats.c0nc_pg_time +=
320 ktime_to_us(ktime_sub(exit_time, entry_time));
324 tegra14_irq_restore_affinity();
326 if (sleep_completed) {
328 * Stayed in LP2 for the full time until the next tick,
329 * adjust the exit latency based on measurement
331 int offset = ktime_to_us(ktime_sub(exit_time, entry_time))
333 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
335 latency = clamp(latency, 0, 10000);
336 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
337 state->exit_latency = latency; /* for idle governor */
340 if (flag == TEGRA_POWER_CLUSTER_PART_CRAIL) {
341 idle_stats.rail_gating_done_count++;
342 idle_stats.rail_gating_done_count_bin[bin]++;
343 } else if (flag == TEGRA_POWER_CLUSTER_PART_NONCPU) {
344 if (is_lp_cluster()) {
345 idle_stats.c1nc_gating_done_count++;
346 idle_stats.c1nc_gating_done_count_bin[bin]++;
348 idle_stats.c0nc_gating_done_count++;
349 idle_stats.c0nc_gating_done_count_bin[bin]++;
353 pr_debug("%lld %lld %d %d\n", request,
354 ktime_to_us(ktime_sub(exit_time, entry_time)),
363 static bool tegra_cpu_core_power_down(struct cpuidle_device *dev,
364 struct cpuidle_state *state, s64 request)
369 struct tegra_twd_context twd_context;
370 bool sleep_completed = false;
371 struct tick_sched *ts = tick_get_tick_sched(dev->cpu);
372 #if defined(CONFIG_TEGRA_LP2_CPU_TIMER)
373 void __iomem *twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
376 if (!tegra_twd_get_state(&twd_context)) {
377 unsigned long twd_rate = clk_get_rate(twd_clk);
379 if ((twd_context.twd_ctrl & TWD_TIMER_CONTROL_ENABLE) &&
380 (twd_context.twd_ctrl & TWD_TIMER_CONTROL_IT_ENABLE)) {
381 request = div_u64((u64)twd_context.twd_cnt * 1000000,
383 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
384 if (request >= state->target_residency) {
385 twd_context.twd_cnt -= state->exit_latency *
386 (twd_rate / 1000000);
387 writel(twd_context.twd_cnt,
388 twd_base + TWD_TIMER_COUNTER);
394 if (!tegra_is_cpu_wake_timer_ready(dev->cpu) ||
395 (request < state->target_residency) ||
396 (!ts) || (ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
398 * Not enough time left to enter LP2, or wake timer not ready
406 #if !defined(CONFIG_TEGRA_LP2_CPU_TIMER)
407 sleep_time = request - state->exit_latency;
408 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
409 tegra_twd_suspend(&twd_context);
410 tegra_pd_set_trigger(sleep_time);
412 idle_stats.tear_down_count[cpu_number(dev->cpu)]++;
414 entry_time = ktime_get();
416 /* Save time this CPU must be awakened by. */
417 tegra_cpu_wake_by_time[dev->cpu] = ktime_to_us(entry_time) + request;
420 cpu_suspend(0, tegra3_sleep_cpu_secondary_finish);
422 tegra_cpu_wake_by_time[dev->cpu] = LLONG_MAX;
424 #ifdef CONFIG_TEGRA_LP2_CPU_TIMER
425 if (!tegra_twd_get_state(&twd_context))
426 sleep_completed = (twd_context.twd_cnt == 0);
428 sleep_completed = !tegra_pd_timer_remain();
429 tegra_pd_set_trigger(0);
430 tegra_twd_resume(&twd_context);
431 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
433 sleep_time = ktime_to_us(ktime_sub(ktime_get(), entry_time));
434 idle_stats.cpu_pg_time[cpu_number(dev->cpu)] += sleep_time;
435 if (sleep_completed) {
437 * Stayed in LP2 for the full time until timer expires,
438 * adjust the exit latency based on measurement
440 int offset = sleep_time - request;
441 int latency = pd_exit_latencies[cpu_number(dev->cpu)] +
443 latency = clamp(latency, 0, 10000);
444 pd_exit_latencies[cpu_number(dev->cpu)] = latency;
445 state->exit_latency = latency; /* for idle governor */
454 bool tegra14x_idle_power_down(struct cpuidle_device *dev,
455 struct cpuidle_state *state)
458 bool cpu_gating_only = false;
459 bool power_gating_cpu_only = true;
460 s64 request = ktime_to_us(tick_nohz_get_sleep_length());
462 tegra_set_cpu_in_pd(dev->cpu);
463 cpu_gating_only = (((fast_cluster_power_down_mode
464 << TEGRA_POWER_CLUSTER_PART_SHIFT)
465 & TEGRA_POWER_CLUSTER_PART_MASK) == 0);
467 if (is_lp_cluster()) {
468 if (slow_cluster_power_gating_noncpu &&
469 (request > tegra_min_residency_ncpu()))
470 power_gating_cpu_only = false;
472 power_gating_cpu_only = true;
473 } else if (!cpu_gating_only &&
475 (num_online_cpus() == 1) &&
476 tegra_rail_off_is_allowed() &&
477 (request > tegra_min_residency_ncpu()))
478 power_gating_cpu_only = false;
480 power_gating_cpu_only = true;
482 if (power_gating_cpu_only)
483 power_down = tegra_cpu_core_power_down(dev, state, request);
485 power_down = tegra_cpu_cluster_power_down(dev, state, request);
487 tegra_clear_cpu_in_pd(dev->cpu);
492 #ifdef CONFIG_DEBUG_FS
493 int tegra14x_pd_debug_show(struct seq_file *s, void *data)
497 unsigned long long total_c0cpu0_pg_time = 0;
498 unsigned long long total_c1cpu0_pg_time = 0;
500 seq_printf(s, " cpu0 cpu1 cpu2 cpu3 cpulp\n");
501 seq_printf(s, "-----------------------------------------------------------------------------\n");
502 seq_printf(s, "cpu ready: %8u %8u %8u %8u %8u\n",
503 idle_stats.cpu_ready_count[0],
504 idle_stats.cpu_ready_count[1],
505 idle_stats.cpu_ready_count[2],
506 idle_stats.cpu_ready_count[3],
507 idle_stats.cpu_ready_count[4]);
508 seq_printf(s, "tear down: %8u %8u %8u %8u %8u\n",
509 idle_stats.tear_down_count[0],
510 idle_stats.tear_down_count[1],
511 idle_stats.tear_down_count[2],
512 idle_stats.tear_down_count[3],
513 idle_stats.tear_down_count[4]);
514 seq_printf(s, "rail gating count: %8u\n",
515 idle_stats.rail_gating_count);
516 seq_printf(s, "rail gating completed: %8u %7u%%\n",
517 idle_stats.rail_gating_done_count,
518 idle_stats.rail_gating_done_count * 100 /
519 (idle_stats.rail_gating_count ?: 1));
521 seq_printf(s, "c0nc gating count: %8u\n",
522 idle_stats.c0nc_gating_count);
523 seq_printf(s, "c0nc gating completed: %8u %7u%%\n",
524 idle_stats.c0nc_gating_done_count,
525 idle_stats.c0nc_gating_done_count * 100 /
526 (idle_stats.c0nc_gating_count ?: 1));
528 seq_printf(s, "c1nc gating count: %8u\n",
529 idle_stats.c1nc_gating_count);
530 seq_printf(s, "c1nc gating completed: %8u %7u%%\n",
531 idle_stats.c1nc_gating_done_count,
532 idle_stats.c1nc_gating_done_count * 100 /
533 (idle_stats.c1nc_gating_count ?: 1));
536 seq_printf(s, "cpu ready time: " \
537 "%8llu %8llu %8llu %8llu %8llu ms\n",
538 div64_u64(idle_stats.cpu_wants_pd_time[0], 1000),
539 div64_u64(idle_stats.cpu_wants_pd_time[1], 1000),
540 div64_u64(idle_stats.cpu_wants_pd_time[2], 1000),
541 div64_u64(idle_stats.cpu_wants_pd_time[3], 1000),
542 div64_u64(idle_stats.cpu_wants_pd_time[4], 1000));
544 total_c0cpu0_pg_time = idle_stats.cpu_pg_time[0] + \
545 idle_stats.c0nc_pg_time + \
546 idle_stats.rail_pd_time;
547 total_c1cpu0_pg_time = idle_stats.cpu_pg_time[4] + \
548 idle_stats.c1nc_pg_time;
550 seq_printf(s, "cpu power gating time: " \
551 "%8llu %8llu %8llu %8llu %8llu ms\n",
552 div64_u64(total_c0cpu0_pg_time, 1000),
553 div64_u64(idle_stats.cpu_pg_time[1], 1000),
554 div64_u64(idle_stats.cpu_pg_time[2], 1000),
555 div64_u64(idle_stats.cpu_pg_time[3], 1000),
556 div64_u64(total_c1cpu0_pg_time, 1000));
558 seq_printf(s, "power gated %%: " \
559 "%7d%% %7d%% %7d%% %7d%% %7d%%\n",
560 (int)(idle_stats.cpu_wants_pd_time[0] ?
561 div64_u64(total_c0cpu0_pg_time * 100,
562 idle_stats.cpu_wants_pd_time[0]) : 0),
563 (int)(idle_stats.cpu_wants_pd_time[1] ?
564 div64_u64(idle_stats.cpu_pg_time[1] * 100,
565 idle_stats.cpu_wants_pd_time[1]) : 0),
566 (int)(idle_stats.cpu_wants_pd_time[2] ?
567 div64_u64(idle_stats.cpu_pg_time[2] * 100,
568 idle_stats.cpu_wants_pd_time[2]) : 0),
569 (int)(idle_stats.cpu_wants_pd_time[3] ?
570 div64_u64(idle_stats.cpu_pg_time[3] * 100,
571 idle_stats.cpu_wants_pd_time[3]) : 0),
572 (int)(idle_stats.cpu_wants_pd_time[4] ?
573 div64_u64(total_c1cpu0_pg_time * 100,
574 idle_stats.cpu_wants_pd_time[4]) : 0));
577 seq_printf(s, "rail gating time c0nc gating time c1nc gating time\n");
578 seq_printf(s, "%8llu ms %8llu ms %8llu ms\n",
579 div64_u64(idle_stats.rail_pd_time, 1000),
580 div64_u64(idle_stats.c0nc_pg_time, 1000),
581 div64_u64(idle_stats.c1nc_pg_time, 1000));
582 seq_printf(s, "%8d%% %8d%% %8d%%\n",
583 (int)(idle_stats.cpu_wants_pd_time[0] ?
584 div64_u64(idle_stats.rail_pd_time * 100,
585 idle_stats.cpu_wants_pd_time[0]) : 0),
586 (int)(idle_stats.cpu_wants_pd_time[0] ?
587 div64_u64(idle_stats.c0nc_pg_time * 100,
588 idle_stats.cpu_wants_pd_time[0]) : 0),
589 (int)(idle_stats.cpu_wants_pd_time[4] ?
590 div64_u64(idle_stats.c1nc_pg_time * 100,
591 idle_stats.cpu_wants_pd_time[4]) : 0));
595 seq_printf(s, "%19s %8s %8s %8s\n", "", "rail gating", "comp", "%");
596 seq_printf(s, "-------------------------------------------------\n");
597 for (bin = 0; bin < 32; bin++) {
598 if (idle_stats.rail_gating_bin[bin] == 0)
600 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
601 1 << (bin - 1), 1 << bin,
602 idle_stats.rail_gating_bin[bin],
603 idle_stats.rail_gating_done_count_bin[bin],
604 idle_stats.rail_gating_done_count_bin[bin] * 100 /
605 idle_stats.rail_gating_bin[bin]);
609 seq_printf(s, "%19s %8s %8s %8s\n", "", "c0nc gating", "comp", "%");
610 seq_printf(s, "-------------------------------------------------\n");
611 for (bin = 0; bin < 32; bin++) {
612 if (idle_stats.c0nc_gating_bin[bin] == 0)
614 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
615 1 << (bin - 1), 1 << bin,
616 idle_stats.c0nc_gating_bin[bin],
617 idle_stats.c0nc_gating_done_count_bin[bin],
618 idle_stats.c0nc_gating_done_count_bin[bin] * 100 /
619 idle_stats.c0nc_gating_bin[bin]);
623 seq_printf(s, "%19s %8s %8s %8s\n", "", "c1nc gating", "comp", "%");
624 seq_printf(s, "-------------------------------------------------\n");
625 for (bin = 0; bin < 32; bin++) {
626 if (idle_stats.c1nc_gating_bin[bin] == 0)
628 seq_printf(s, "%6u - %6u ms: %8u %8u %7u%%\n",
629 1 << (bin - 1), 1 << bin,
630 idle_stats.c1nc_gating_bin[bin],
631 idle_stats.c1nc_gating_done_count_bin[bin],
632 idle_stats.c1nc_gating_done_count_bin[bin] * 100 /
633 idle_stats.c1nc_gating_bin[bin]);
637 seq_printf(s, "%3s %20s %6s %10s\n",
638 "int", "name", "count", "last count");
639 seq_printf(s, "--------------------------------------------\n");
640 for (i = 0; i < NR_IRQS; i++) {
641 if (idle_stats.pd_int_count[i] == 0)
643 seq_printf(s, "%3d %20s %6d %10d\n",
644 i, irq_to_desc(i)->action ?
645 irq_to_desc(i)->action->name ?: "???" : "???",
646 idle_stats.pd_int_count[i],
647 idle_stats.pd_int_count[i] -
648 idle_stats.last_pd_int_count[i]);
649 idle_stats.last_pd_int_count[i] = idle_stats.pd_int_count[i];
655 int __init tegra14x_cpuidle_init_soc(struct tegra_cpuidle_ops *idle_ops)
658 struct tegra_cpuidle_ops ops = {
659 tegra14x_idle_power_down,
660 tegra14x_cpu_idle_stats_pd_ready,
661 tegra14x_cpu_idle_stats_pd_time,
662 tegra14x_pd_is_allowed,
663 #ifdef CONFIG_DEBUG_FS
664 tegra14x_pd_debug_show
668 cpu_clk_for_dvfs = tegra_get_clock_by_name("cpu_g");
669 twd_clk = tegra_get_clock_by_name("twd");
671 for (i = 0; i < ARRAY_SIZE(pd_exit_latencies); i++)
672 pd_exit_latencies[i] = tegra_pg_exit_latency;