ARM: tegra: pcie: Remove dock detect variable
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/irqchip/arm-gic.h>
32 #include <linux/sched.h>
33 #include <linux/tegra-powergate.h>
34 #include <linux/tegra-soc.h>
35 #include <linux/tegra-cpuidle.h>
36
37 #include <mach/irqs.h>
38 #include <mach/io_dpd.h>
39 #include <mach/edp.h>
40
41 #include <asm/smp_plat.h>
42 #include <asm/cputype.h>
43
44 #include "clock.h"
45 #include "iomap.h"
46 #include "pm.h"
47 #include "sleep.h"
48 #include "tegra3_emc.h"
49 #include "dvfs.h"
50 #include "tegra11_soctherm.h"
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/nvpower.h>
54
55 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
56 #define CAR_CCLK_BURST_POLICY \
57         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
58
59 #define CAR_SUPER_CCLK_DIVIDER \
60         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
61
62 #define CAR_CCLKG_BURST_POLICY \
63         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
64
65 #define CAR_SUPER_CCLKG_DIVIDER \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
67
68 #define CAR_CCLKLP_BURST_POLICY \
69         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
70 #define PLLX_DIV2_BYPASS_LP     (1<<16)
71
72 #define CAR_SUPER_CCLKLP_DIVIDER \
73         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
74
75 #define CAR_BOND_OUT_V \
76         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
77 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
78 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
79
80 #define CAR_CLK_ENB_V_SET \
81         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
82 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
83 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
84
85 #define CAR_RST_CPUG_CMPLX_SET \
86         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
87
88 #define CAR_RST_CPUG_CMPLX_CLR \
89         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
90
91 #define CAR_RST_CPULP_CMPLX_SET \
92         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
93
94 #define CAR_RST_CPULP_CMPLX_CLR \
95         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
96
97 #define CAR_CLK_CPUG_CMPLX_SET \
98         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
99
100 #define CAR_CLK_CPUG_CMPLX_CLR \
101         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
102
103 #define CAR_CLK_CPULP_CMPLX_SET \
104         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
105
106 #define CAR_CLK_CPULP_CMPLX_CLR \
107         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
108
109 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
110 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
111
112 #define PLLX_FO_G (1<<28)
113 #define PLLX_FO_LP (1<<29)
114
115 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
116         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
117
118 static struct clk *cclk_lp;
119
120 static int cluster_switch_prolog_clock(unsigned int flags)
121 {
122         u32 reg;
123         u32 CclkBurstPolicy;
124         u32 SuperCclkDivier;
125
126         /* Read the bond out register containing the G and LP CPUs. */
127         reg = readl(CAR_BOND_OUT_V);
128
129         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
130            LP settings overwrite by save/restore code */
131         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
132         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
133         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
134
135         /* Switching to G? */
136         if (flags & TEGRA_POWER_CLUSTER_G) {
137                 /* Do the G CPUs exist? */
138                 if (reg & CAR_BOND_OUT_V_CPU_G)
139                         return -ENXIO;
140
141                 /* Keep G CPU clock policy set by upper laayer, with the
142                    exception of the transition via LP1 */
143                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
144                         /* In LP1 power mode come up on CLKM (oscillator) */
145                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
146                         CclkBurstPolicy &= ~0xF;
147                         SuperCclkDivier = 0;
148
149                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
150                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
151                 }
152
153 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
154                 /* Hold G CPUs 1-3 in reset after the switch */
155                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
156                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
157
158                 /* Take G CPU 0 out of reset after the switch */
159                 reg = CPU_RESET(0);
160                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
161
162                 /* Disable the clocks on G CPUs 1-3 after the switch */
163                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
164                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
165
166                 /* Enable the clock on G CPU 0 after the switch */
167                 reg = CPU_CLOCK(0);
168                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
169
170                 /* Enable the G CPU complex clock after the switch */
171                 reg = CAR_CLK_ENB_V_CPU_G;
172                 writel(reg, CAR_CLK_ENB_V_SET);
173 #endif
174         }
175         /* Switching to LP? */
176         else if (flags & TEGRA_POWER_CLUSTER_LP) {
177                 /* Does the LP CPU exist? */
178                 if (reg & CAR_BOND_OUT_V_CPU_LP)
179                         return -ENXIO;
180
181                 /* Keep LP CPU clock policy set by upper layer, with the
182                    exception of the transition via LP1 */
183                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
184                         /* In LP1 power mode come up on CLKM (oscillator) */
185                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
186                         CclkBurstPolicy &= ~0xF;
187                         SuperCclkDivier = 0;
188
189                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
190                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
191                 }
192
193 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
194                 /* Take the LP CPU ut of reset after the switch */
195                 reg = CPU_RESET(0);
196                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
197
198                 /* Enable the clock on the LP CPU after the switch */
199                 reg = CPU_CLOCK(0);
200                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
201
202                 /* Enable the LP CPU complex clock after the switch */
203                 reg = CAR_CLK_ENB_V_CPU_LP;
204                 writel(reg, CAR_CLK_ENB_V_SET);
205 #endif
206         }
207
208         return 0;
209 }
210
211 static inline void enable_pllx_cluster_port(void)
212 {
213         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
214         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
215         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
216 }
217
218 static inline void disable_pllx_cluster_port(void)
219 {
220         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
221         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
222         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
223 }
224
225 void tegra_cluster_switch_prolog(unsigned int flags)
226 {
227         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
228         unsigned int current_cluster = is_lp_cluster()
229                                         ? TEGRA_POWER_CLUSTER_LP
230                                         : TEGRA_POWER_CLUSTER_G;
231         u32 reg;
232         u32 cpu;
233
234         cpu = cpu_logical_map(smp_processor_id());
235
236         /* Read the flow controler CSR register and clear the CPU switch
237            and immediate flags. If an actual CPU switch is to be performed,
238            re-write the CSR register with the desired values. */
239         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
240         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
241                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
242
243         /* Program flow controller for immediate wake if requested */
244         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
245                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
246
247         /* Do nothing if no switch actions requested */
248         if (!target_cluster)
249                 goto done;
250
251 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
252         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
253
254         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
255             (flags & TEGRA_POWER_CLUSTER_PART_NONCPU))
256                 WARN(1, "CRAIL & CxNC flags must not be set together\n");
257
258         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
259             (current_cluster == TEGRA_POWER_CLUSTER_LP))
260                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
261         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL) {
262                 if (tegra_crail_can_start_early()) {
263                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
264                         tegra_soctherm_adjust_cpu_zone(false);
265                 } else {
266                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
267                 }
268         } else if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
269                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
270 #endif
271
272         if ((current_cluster != target_cluster) ||
273                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
274                 if (current_cluster != target_cluster) {
275                         // Set up the clocks for the target CPU.
276                         if (cluster_switch_prolog_clock(flags)) {
277                                 /* The target CPU does not exist */
278                                 goto done;
279                         }
280
281                         /* Set up the flow controller to switch CPUs. */
282                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
283
284                         /* Enable target port of PLL_X */
285                         enable_pllx_cluster_port();
286                 }
287         }
288
289 done:
290         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
291 }
292
293
294 static void cluster_switch_epilog_actlr(void)
295 {
296         u32 actlr;
297
298         /*
299          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
300          *
301          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
302          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
303          * switch by CPU save/restore code, but SMP bit is restored correctly.
304          * Synchronize these two bits here after LP=>G transition. Note that
305          * only CPU0 core is powered on before and after the switch. See also
306          * bug 807595.
307         */
308         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
309                 return;
310
311         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
312
313         if (actlr & (0x1 << 6)) {
314                 actlr |= 0x1;
315                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
316         }
317 }
318
319 static void cluster_switch_epilog_gic(void)
320 {
321         unsigned int max_irq, i;
322         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
323
324         /* Reprogram the interrupt affinity because the on the LP CPU,
325            the interrupt distributor affinity regsiters are stubbed out
326            by ARM (reads as zero, writes ignored). So when the LP CPU
327            context save code runs, the affinity registers will read
328            as all zero. This causes all interrupts to be effectively
329            disabled when back on the G CPU because they aren't routable
330            to any CPU. See bug 667720 for details. */
331
332         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
333         max_irq = (max_irq + 1) * 32;
334
335         for (i = 32; i < max_irq; i += 4) {
336                 u32 val = 0x01010101;
337 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
338                 unsigned int irq;
339                 for (irq = i; irq < (i + 4); irq++) {
340                         struct cpumask mask;
341                         struct irq_desc *desc = irq_to_desc(irq);
342
343                         if (desc && desc->affinity_hint) {
344                                 if (cpumask_and(&mask, desc->affinity_hint,
345                                                 desc->irq_data.affinity))
346                                         val |= (*cpumask_bits(&mask) & 0xff) <<
347                                                 ((irq & 3) * 8);
348                         }
349                 }
350 #endif
351                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
352         }
353 }
354
355 void tegra_cluster_switch_epilog(unsigned int flags)
356 {
357         u32 reg;
358         u32 cpu;
359
360         cpu = cpu_logical_map(smp_processor_id());
361
362         /* Make sure the switch and immediate flags are cleared in
363            the flow controller to prevent undesirable side-effects
364            for future users of the flow controller. */
365         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
366         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
367                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
368 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
369         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
370 #endif
371         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
372
373         /* Perform post-switch LP=>G clean-up */
374         if (!is_lp_cluster()) {
375                 cluster_switch_epilog_actlr();
376                 cluster_switch_epilog_gic();
377 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
378                 if (tegra_crail_can_start_early())
379                         tegra_soctherm_adjust_cpu_zone(true);
380         } else  if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
381                     tegra_crail_can_start_early()) {
382                 tegra_powergate_partition(TEGRA_POWERGATE_CRAIL);
383 #endif
384         }
385
386         /* Disable unused port of PLL_X */
387         disable_pllx_cluster_port();
388
389         #if DEBUG_CLUSTER_SWITCH
390         {
391                 /* FIXME: clock functions below are taking mutex */
392                 struct clk *c = tegra_get_clock_by_name(
393                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
394                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
395                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
396         }
397         #endif
398 }
399
400 static int tegra_crail_startup_early(void)
401 {
402 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
403         u32 reg;
404         int us = tegra_cpu_power_good_time();
405
406         if (tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL))
407                 return 0;
408
409         /*
410          * Toggle CRAIL, insert s/w  power good delay (load h/w power good
411          * timer with very small settings so it expires for sure within power
412          * gate toggle timeout).
413          */
414         tegra_limit_cpu_power_timers(1, 1);
415         tegra_unpowergate_partition(TEGRA_POWERGATE_CRAIL);
416         if (timekeeping_suspended)
417                 udelay(us);                     /* suspend exit */
418         else
419                 usleep_range(us, us + 10);      /* regular scheduling */
420
421         if (!tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL)) {
422                 WARN(1, "Failed to turn CRAIL ON in %d us\n", us);
423                 return -ETIMEDOUT;
424         }
425
426         /* If needed trigger RAM rapair request in s/w (auto-clear in h/w) */
427         #define RAM_REPAIR_TIMEOUT 500
428
429         reg = readl(FLOW_CTRL_RAM_REPAIR) | FLOW_CTRL_RAM_REPAIR_REQ;
430         if (!(reg & FLOW_CTRL_RAM_REPAIR_BYPASS_EN)) {
431                 int ram_repair_time = RAM_REPAIR_TIMEOUT;
432                 flowctrl_writel(reg, FLOW_CTRL_RAM_REPAIR);
433                 while (readl(FLOW_CTRL_RAM_REPAIR) & FLOW_CTRL_RAM_REPAIR_REQ) {
434                         udelay(1);
435                         if (!(ram_repair_time--)) {
436                                 WARN(1, "Failed to repair RAM in %d us\n",
437                                      RAM_REPAIR_TIMEOUT);
438                                 return -ETIMEDOUT;
439                         }
440                 }
441         }
442 #endif
443         return 0;
444 }
445
446 int tegra_cluster_control(unsigned int us, unsigned int flags)
447 {
448         static ktime_t last_g2lp;
449
450         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
451         unsigned int current_cluster = is_lp_cluster()
452                                         ? TEGRA_POWER_CLUSTER_LP
453                                         : TEGRA_POWER_CLUSTER_G;
454         unsigned long irq_flags;
455
456         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
457                 return -EINVAL;
458
459         if (num_online_cpus() > 1)
460                 return -EBUSY;
461
462         if ((current_cluster == target_cluster)
463         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
464                 return -EEXIST;
465
466         if (target_cluster == TEGRA_POWER_CLUSTER_G)
467                 if (!is_g_cluster_present())
468                         return -EPERM;
469
470         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
471                 us = 0;
472
473         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
474                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
475                 is_lp_cluster() ? "LP" : "G",
476                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
477                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
478                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
479                 us));
480
481         if ((current_cluster == TEGRA_POWER_CLUSTER_LP) &&
482             (target_cluster == TEGRA_POWER_CLUSTER_G)) {
483                 if (!timekeeping_suspended) {
484                         ktime_t now = ktime_get();
485                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
486                         s64 t_off = tegra_cpu_power_off_time();
487                         if (t_off > t)
488                                 udelay((unsigned int)(t_off - t));
489                 }
490
491                 /* Start CPU rail transition up early - before disabling irq */
492                 if (tegra_crail_can_start_early()) {
493                         int ret = tegra_crail_startup_early();
494                         if (ret)
495                                 return ret;
496                 }
497         }
498
499         local_irq_save(irq_flags);
500         if (is_idle_task(current))
501                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START,
502                                             current_cluster,
503                                             target_cluster);
504         else
505                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START,
506                                     current_cluster,
507                                     target_cluster);
508         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
509
510 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
511         if (current_cluster != target_cluster && !timekeeping_suspended) {
512                 if (target_cluster == TEGRA_POWER_CLUSTER_LP) {
513                         u32 cpu;
514
515                         cpu = cpu_logical_map(smp_processor_id());
516                         writel(cpu, FLOW_CTRL_MPID);
517                 }
518         }
519 #endif
520
521         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
522                 if (us)
523                         tegra_pd_set_trigger(us);
524
525                 tegra_cluster_switch_prolog(flags);
526                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
527                 tegra_cluster_switch_epilog(flags);
528
529                 if (us)
530                         tegra_pd_set_trigger(0);
531         } else {
532                 int cpu;
533
534                 cpu = cpu_logical_map(smp_processor_id());
535
536                 tegra_set_cpu_in_pd(cpu);
537                 cpu_pm_enter();
538                 if (!timekeeping_suspended)
539                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
540                                            &cpu);
541                 tegra_idle_power_down_last(0, flags);
542                 if (!timekeeping_suspended)
543                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
544                                            &cpu);
545                 cpu_pm_exit();
546                 tegra_clear_cpu_in_pd(cpu);
547         }
548
549         if (current_cluster != target_cluster && !timekeeping_suspended) {
550                 ktime_t now = ktime_get();
551                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
552                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
553                 } else {
554                         last_g2lp = now;
555                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
556                 }
557         }
558         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_end);
559         if (is_idle_task(current))
560                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE,
561                                             current_cluster,
562                                             target_cluster);
563         else
564                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE,
565                                     current_cluster,
566                                     target_cluster);
567         local_irq_restore(irq_flags);
568
569         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
570
571         return 0;
572 }
573
574 int tegra_switch_to_lp_cluster()
575 {
576         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
577         struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
578         int rate = clk_get_rate(cpu_clk);
579         int e;
580
581         if (is_lp_cluster())
582                 return 0;
583
584         /* Change the Clock Rate to desired LP CPU's clock rate */
585
586         if (rate > cpu_lp_clk->max_rate) {
587                 e = clk_set_rate(cpu_clk, cpu_lp_clk->max_rate);
588                 if (e) {
589                         pr_err("cluster_swtich: Failed to set clock %d", e);
590                         return e;
591                 }
592         }
593
594         e = clk_set_parent(cpu_clk, cpu_lp_clk);
595         if (e) {
596                 pr_err("cluster switching request failed (%d)\n", e);
597                 return e;
598         }
599         return e;
600 }
601
602 int tegra_switch_to_g_cluster()
603 {
604         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
605         struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
606         int e;
607
608         if (!is_lp_cluster())
609                 return 0;
610
611         e = clk_set_parent(cpu_clk, cpu_g_clk);
612         if (e) {
613                 pr_err("cluster switching request failed (%d)\n", e);
614                 return e;
615         }
616
617         /* Switch back to G Cluster Cpu Max Clock rate */
618
619         e = clk_set_rate(cpu_clk, cpu_g_clk->max_rate);
620         if (e) {
621                 pr_err("cluster_swtich: Failed to increase the clock %d\n", e);
622                 return e;
623         }
624         return e;
625 }
626
627 int tegra_cluster_switch(struct clk *cpu_clk, struct clk *new_cluster_clk)
628 {
629         int ret;
630         bool is_target_lp = is_lp_cluster() ^
631                 (clk_get_parent(cpu_clk) != new_cluster_clk);
632
633         /* Update core edp limits before switch to LP cluster; abort on error */
634         if (is_target_lp) {
635                 ret = tegra_core_edp_cpu_state_update(is_target_lp);
636                 if (ret)
637                         return ret;
638         }
639
640         ret = clk_set_parent(cpu_clk, new_cluster_clk);
641         if (ret)
642                 return ret;
643
644         /* Update core edp limits after switch to G cluster; ignore error */
645         if (!is_target_lp)
646                 tegra_core_edp_cpu_state_update(is_target_lp);
647
648         return 0;
649 }
650 #endif
651
652 #ifdef CONFIG_PM_SLEEP
653
654 void tegra_lp0_suspend_mc(void)
655 {
656         /* Since memory frequency after LP0 is restored to boot rate
657            mc timing is saved during init, not on entry to LP0. Keep
658            this hook just in case, anyway */
659 }
660
661 void tegra_lp0_resume_mc(void)
662 {
663         tegra_mc_timing_restore();
664 }
665
666 static int __init get_clock_cclk_lp(void)
667 {
668         if (!cclk_lp)
669                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
670         return 0;
671 }
672
673 subsys_initcall(get_clock_cclk_lp);
674
675 void tegra_lp0_cpu_mode(bool enter)
676 {
677         static bool entered_on_g = false;
678         unsigned int flags;
679
680         if (enter)
681                 entered_on_g = !is_lp_cluster();
682
683         if (entered_on_g) {
684                 if (enter)
685                         tegra_clk_prepare_enable(cclk_lp);
686
687                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
688                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
689 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
690                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
691 #endif
692                 if (!tegra_cluster_control(0, flags)) {
693                         if (!enter)
694                                 tegra_clk_disable_unprepare(cclk_lp);
695                         pr_info("Tegra: switched to %s cluster\n",
696                                 enter ? "LP" : "G");
697                 }
698         }
699 }
700
701 #define IO_DPD_INFO(_name, _index, _bit) \
702         { \
703                 .name = _name, \
704                 .io_dpd_reg_index = _index, \
705                 .io_dpd_bit = _bit, \
706         }
707
708 /* PMC IO DPD register offsets */
709 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
710 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
711 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
712 #define APBDEV_DPD_ENABLE_LSB           30
713 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
714 #define APBDEV_DPD2_ENABLE_LSB          5
715 #else
716 #define APBDEV_DPD2_ENABLE_LSB          30
717 #endif
718 #define PMC_DPD_SAMPLE                  0x20
719
720 static struct tegra_io_dpd tegra_list_io_dpd[] = {
721 };
722 #endif
723
724 /* we want to cleanup bootloader io dpd setting in kernel */
725 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
726
727 #if defined CONFIG_PM_SLEEP
728 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
729 {
730 #ifdef CONFIG_TEGRA_IO_DPD
731         int i;
732         const char *name = dev ? dev_name(dev) : NULL;
733         if (name) {
734                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
735                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
736                                 strlen(name)))) {
737                                 return &tegra_list_io_dpd[i];
738                         }
739                 }
740         }
741         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
742                 ((name) ? name : "NULL"));
743 #endif
744         return NULL;
745 }
746
747 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
748
749 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
750 {
751         unsigned int enable_mask;
752         unsigned int dpd_status;
753         unsigned int dpd_enable_lsb;
754
755         if (!hnd)
756                 return;
757
758         spin_lock(&tegra_io_dpd_lock);
759         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
760                                                 APBDEV_DPD_ENABLE_LSB;
761         writel(0x1, pmc + PMC_DPD_SAMPLE);
762         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
763         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
764         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
765                                         hnd->io_dpd_reg_index * 8));
766         /* delay pclk * (reset APBDEV_PMC_SEL_DPD_TIM_0 value 127 + 5) */
767         udelay(7);
768         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
769                                         hnd->io_dpd_reg_index * 8));
770         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
771                 if (!tegra_platform_is_fpga()) {
772                         pr_info("Error: dpd%d enable failed, status=%#x\n",
773                         (hnd->io_dpd_reg_index + 1), dpd_status);
774                 }
775         }
776         /* Sample register must be reset before next sample operation */
777         writel(0x0, pmc + PMC_DPD_SAMPLE);
778         spin_unlock(&tegra_io_dpd_lock);
779         return;
780 }
781
782 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
783 {
784         unsigned int enable_mask;
785         unsigned int dpd_status;
786         unsigned int dpd_enable_lsb;
787
788         if (!hnd)
789                 return;
790
791         spin_lock(&tegra_io_dpd_lock);
792         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
793                                                 APBDEV_DPD_ENABLE_LSB;
794         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
795         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
796                                         hnd->io_dpd_reg_index * 8));
797         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
798                                         hnd->io_dpd_reg_index * 8));
799         if (dpd_status & (1 << hnd->io_dpd_bit)) {
800                 if (!tegra_platform_is_fpga()) {
801                         pr_info("Error: dpd%d disable failed, status=%#x\n",
802                         (hnd->io_dpd_reg_index + 1), dpd_status);
803                 }
804         }
805         spin_unlock(&tegra_io_dpd_lock);
806         return;
807 }
808
809 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
810 {
811         struct tegra_io_dpd *hnd = container_of(
812                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
813         tegra_io_dpd_disable(hnd);
814         hnd->need_delay_dpd = 0;
815 }
816
817 int tegra_io_dpd_init(void)
818 {
819         int i;
820         for (i = 0;
821                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
822                 i++) {
823                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
824                                 tegra_io_dpd_delayed_disable);
825                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
826                         tegra_list_io_dpd[i].need_delay_dpd = 0;
827         }
828         return 0;
829 }
830
831 #else
832
833 int tegra_io_dpd_init(void)
834 {
835         return 0;
836 }
837
838 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
839 {
840 }
841
842 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
843 {
844 }
845
846 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
847 {
848         return NULL;
849 }
850
851 #endif
852
853 EXPORT_SYMBOL(tegra_io_dpd_get);
854 EXPORT_SYMBOL(tegra_io_dpd_enable);
855 EXPORT_SYMBOL(tegra_io_dpd_disable);
856 EXPORT_SYMBOL(tegra_io_dpd_init);
857
858 struct io_dpd_reg_info {
859         u32 req_reg_off;
860         u8 dpd_code_lsb;
861 };
862
863 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
864         {0x1b8, 30},
865         {0x1c0, 5},
866 };
867
868 /* io dpd off request code */
869 #define IO_DPD_CODE_OFF         1
870
871 /* cleans io dpd settings from bootloader during kernel init */
872 void tegra_bl_io_dpd_cleanup()
873 {
874         int i;
875         unsigned int dpd_mask;
876         unsigned int dpd_status;
877
878         pr_info("Clear bootloader IO dpd settings\n");
879         /* clear all dpd requests from bootloader */
880         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
881                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
882                 dpd_mask |= (IO_DPD_CODE_OFF <<
883                         t3_io_dpd_req_regs[i].dpd_code_lsb);
884                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
885                 /* dpd status register is next to req reg in tegra3 */
886                 dpd_status = readl(pmc +
887                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
888         }
889         return;
890 }
891 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
892