ARM: tegra: fix warning during LP0
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/irqchip/arm-gic.h>
32 #include <linux/sched.h>
33 #include <linux/tegra-powergate.h>
34 #include <linux/tegra-soc.h>
35
36 #include <mach/irqs.h>
37 #include <mach/io_dpd.h>
38 #include <mach/edp.h>
39
40 #include <asm/smp_plat.h>
41 #include <asm/cputype.h>
42
43 #include "clock.h"
44 #include "cpuidle.h"
45 #include "iomap.h"
46 #include "pm.h"
47 #include "sleep.h"
48 #include "tegra3_emc.h"
49 #include "dvfs.h"
50 #include "tegra11_soctherm.h"
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/nvpower.h>
54
55 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
56 #define CAR_CCLK_BURST_POLICY \
57         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
58
59 #define CAR_SUPER_CCLK_DIVIDER \
60         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
61
62 #define CAR_CCLKG_BURST_POLICY \
63         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
64
65 #define CAR_SUPER_CCLKG_DIVIDER \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
67
68 #define CAR_CCLKLP_BURST_POLICY \
69         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
70 #define PLLX_DIV2_BYPASS_LP     (1<<16)
71
72 #define CAR_SUPER_CCLKLP_DIVIDER \
73         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
74
75 #define CAR_BOND_OUT_V \
76         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
77 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
78 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
79
80 #define CAR_CLK_ENB_V_SET \
81         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
82 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
83 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
84
85 #define CAR_RST_CPUG_CMPLX_SET \
86         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
87
88 #define CAR_RST_CPUG_CMPLX_CLR \
89         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
90
91 #define CAR_RST_CPULP_CMPLX_SET \
92         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
93
94 #define CAR_RST_CPULP_CMPLX_CLR \
95         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
96
97 #define CAR_CLK_CPUG_CMPLX_SET \
98         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
99
100 #define CAR_CLK_CPUG_CMPLX_CLR \
101         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
102
103 #define CAR_CLK_CPULP_CMPLX_SET \
104         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
105
106 #define CAR_CLK_CPULP_CMPLX_CLR \
107         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
108
109 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
110 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
111
112 #define PLLX_FO_G (1<<28)
113 #define PLLX_FO_LP (1<<29)
114
115 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
116         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
117
118 static struct clk *cclk_lp;
119
120 static int cluster_switch_prolog_clock(unsigned int flags)
121 {
122         u32 reg;
123         u32 CclkBurstPolicy;
124         u32 SuperCclkDivier;
125
126         /* Read the bond out register containing the G and LP CPUs. */
127         reg = readl(CAR_BOND_OUT_V);
128
129         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
130            LP settings overwrite by save/restore code */
131         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
132         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
133         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
134
135         /* Switching to G? */
136         if (flags & TEGRA_POWER_CLUSTER_G) {
137                 /* Do the G CPUs exist? */
138                 if (reg & CAR_BOND_OUT_V_CPU_G)
139                         return -ENXIO;
140
141                 /* Keep G CPU clock policy set by upper laayer, with the
142                    exception of the transition via LP1 */
143                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
144                         /* In LP1 power mode come up on CLKM (oscillator) */
145                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
146                         CclkBurstPolicy &= ~0xF;
147                         SuperCclkDivier = 0;
148
149                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
150                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
151                 }
152
153 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
154                 /* Hold G CPUs 1-3 in reset after the switch */
155                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
156                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
157
158                 /* Take G CPU 0 out of reset after the switch */
159                 reg = CPU_RESET(0);
160                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
161
162                 /* Disable the clocks on G CPUs 1-3 after the switch */
163                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
164                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
165
166                 /* Enable the clock on G CPU 0 after the switch */
167                 reg = CPU_CLOCK(0);
168                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
169
170                 /* Enable the G CPU complex clock after the switch */
171                 reg = CAR_CLK_ENB_V_CPU_G;
172                 writel(reg, CAR_CLK_ENB_V_SET);
173 #endif
174         }
175         /* Switching to LP? */
176         else if (flags & TEGRA_POWER_CLUSTER_LP) {
177                 /* Does the LP CPU exist? */
178                 if (reg & CAR_BOND_OUT_V_CPU_LP)
179                         return -ENXIO;
180
181                 /* Keep LP CPU clock policy set by upper layer, with the
182                    exception of the transition via LP1 */
183                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
184                         /* In LP1 power mode come up on CLKM (oscillator) */
185                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
186                         CclkBurstPolicy &= ~0xF;
187                         SuperCclkDivier = 0;
188
189                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
190                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
191                 }
192
193 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
194                 /* Take the LP CPU ut of reset after the switch */
195                 reg = CPU_RESET(0);
196                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
197
198                 /* Enable the clock on the LP CPU after the switch */
199                 reg = CPU_CLOCK(0);
200                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
201
202                 /* Enable the LP CPU complex clock after the switch */
203                 reg = CAR_CLK_ENB_V_CPU_LP;
204                 writel(reg, CAR_CLK_ENB_V_SET);
205 #endif
206         }
207
208         return 0;
209 }
210
211 static inline void enable_pllx_cluster_port(void)
212 {
213         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
214         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
215         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
216 }
217
218 static inline void disable_pllx_cluster_port(void)
219 {
220         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
221         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
222         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
223 }
224
225 void tegra_cluster_switch_prolog(unsigned int flags)
226 {
227         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
228         unsigned int current_cluster = is_lp_cluster()
229                                         ? TEGRA_POWER_CLUSTER_LP
230                                         : TEGRA_POWER_CLUSTER_G;
231         u32 reg;
232         u32 cpu;
233
234         cpu = cpu_logical_map(smp_processor_id());
235
236         /* Read the flow controler CSR register and clear the CPU switch
237            and immediate flags. If an actual CPU switch is to be performed,
238            re-write the CSR register with the desired values. */
239         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
240         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
241                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
242
243         /* Program flow controller for immediate wake if requested */
244         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
245                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
246
247         /* Do nothing if no switch actions requested */
248         if (!target_cluster)
249                 goto done;
250
251 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
252         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
253         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
254             ((flags & TEGRA_POWER_CLUSTER_PART_NONCPU) == 0) &&
255             (current_cluster == TEGRA_POWER_CLUSTER_LP))
256                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
257         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL) {
258                 if (tegra_crail_can_start_early()) {
259                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
260                         tegra_soctherm_adjust_cpu_zone(false);
261                 } else {
262                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
263                 }
264         }
265
266         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
267                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
268 #endif
269
270         if ((current_cluster != target_cluster) ||
271                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
272                 if (current_cluster != target_cluster) {
273                         // Set up the clocks for the target CPU.
274                         if (cluster_switch_prolog_clock(flags)) {
275                                 /* The target CPU does not exist */
276                                 goto done;
277                         }
278
279                         /* Set up the flow controller to switch CPUs. */
280                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
281
282                         /* Enable target port of PLL_X */
283                         enable_pllx_cluster_port();
284                 }
285         }
286
287 done:
288         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
289 }
290
291
292 static void cluster_switch_epilog_actlr(void)
293 {
294         u32 actlr;
295
296         /*
297          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
298          *
299          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
300          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
301          * switch by CPU save/restore code, but SMP bit is restored correctly.
302          * Synchronize these two bits here after LP=>G transition. Note that
303          * only CPU0 core is powered on before and after the switch. See also
304          * bug 807595.
305         */
306         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
307                 return;
308
309         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
310
311         if (actlr & (0x1 << 6)) {
312                 actlr |= 0x1;
313                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
314         }
315 }
316
317 static void cluster_switch_epilog_gic(void)
318 {
319         unsigned int max_irq, i;
320         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
321
322         /* Reprogram the interrupt affinity because the on the LP CPU,
323            the interrupt distributor affinity regsiters are stubbed out
324            by ARM (reads as zero, writes ignored). So when the LP CPU
325            context save code runs, the affinity registers will read
326            as all zero. This causes all interrupts to be effectively
327            disabled when back on the G CPU because they aren't routable
328            to any CPU. See bug 667720 for details. */
329
330         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
331         max_irq = (max_irq + 1) * 32;
332
333         for (i = 32; i < max_irq; i += 4) {
334                 u32 val = 0x01010101;
335 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
336                 unsigned int irq;
337                 for (irq = i; irq < (i + 4); irq++) {
338                         struct cpumask mask;
339                         struct irq_desc *desc = irq_to_desc(irq);
340
341                         if (desc && desc->affinity_hint) {
342                                 if (cpumask_and(&mask, desc->affinity_hint,
343                                                 desc->irq_data.affinity))
344                                         val |= (*cpumask_bits(&mask) & 0xff) <<
345                                                 ((irq & 3) * 8);
346                         }
347                 }
348 #endif
349                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
350         }
351 }
352
353 void tegra_cluster_switch_epilog(unsigned int flags)
354 {
355         u32 reg;
356         u32 cpu;
357
358         cpu = cpu_logical_map(smp_processor_id());
359
360         /* Make sure the switch and immediate flags are cleared in
361            the flow controller to prevent undesirable side-effects
362            for future users of the flow controller. */
363         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
364         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
365                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
366 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
367         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
368 #endif
369         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
370
371         /* Perform post-switch LP=>G clean-up */
372         if (!is_lp_cluster()) {
373                 cluster_switch_epilog_actlr();
374                 cluster_switch_epilog_gic();
375 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
376                 if (tegra_crail_can_start_early())
377                         tegra_soctherm_adjust_cpu_zone(true);
378         } else  if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
379                     tegra_crail_can_start_early()) {
380                 tegra_powergate_partition(TEGRA_POWERGATE_CRAIL);
381 #endif
382         }
383
384         /* Disable unused port of PLL_X */
385         disable_pllx_cluster_port();
386
387         #if DEBUG_CLUSTER_SWITCH
388         {
389                 /* FIXME: clock functions below are taking mutex */
390                 struct clk *c = tegra_get_clock_by_name(
391                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
392                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
393                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
394         }
395         #endif
396 }
397
398 static int tegra_crail_startup_early(void)
399 {
400 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
401         u32 reg;
402         int us = tegra_cpu_power_good_time();
403
404         if (tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL))
405                 return 0;
406
407         /*
408          * Toggle CRAIL, insert s/w  power good delay (load h/w power good
409          * timer with very small settings so it expires for sure within power
410          * gate toggle timeout).
411          */
412         tegra_limit_cpu_power_timers(1, 1);
413         tegra_unpowergate_partition(TEGRA_POWERGATE_CRAIL);
414         if (timekeeping_suspended)
415                 udelay(us);                     /* suspend exit */
416         else
417                 usleep_range(us, us + 10);      /* regular scheduling */
418
419         if (!tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL)) {
420                 WARN(1, "Failed to turn CRAIL ON in %d us\n", us);
421                 return -ETIMEDOUT;
422         }
423
424         /* If needed trigger RAM rapair request in s/w (auto-clear in h/w) */
425         #define RAM_REPAIR_TIMEOUT 500
426
427         reg = readl(FLOW_CTRL_RAM_REPAIR) | FLOW_CTRL_RAM_REPAIR_REQ;
428         if (!(reg & FLOW_CTRL_RAM_REPAIR_BYPASS_EN)) {
429                 int ram_repair_time = RAM_REPAIR_TIMEOUT;
430                 flowctrl_writel(reg, FLOW_CTRL_RAM_REPAIR);
431                 while (readl(FLOW_CTRL_RAM_REPAIR) & FLOW_CTRL_RAM_REPAIR_REQ) {
432                         udelay(1);
433                         if (!(ram_repair_time--)) {
434                                 WARN(1, "Failed to repair RAM in %d us\n",
435                                      RAM_REPAIR_TIMEOUT);
436                                 return -ETIMEDOUT;
437                         }
438                 }
439         }
440 #endif
441         return 0;
442 }
443
444 int tegra_cluster_control(unsigned int us, unsigned int flags)
445 {
446         static ktime_t last_g2lp;
447
448         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
449         unsigned int current_cluster = is_lp_cluster()
450                                         ? TEGRA_POWER_CLUSTER_LP
451                                         : TEGRA_POWER_CLUSTER_G;
452         unsigned long irq_flags;
453
454         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
455                 return -EINVAL;
456
457         if (num_online_cpus() > 1)
458                 return -EBUSY;
459
460         if ((current_cluster == target_cluster)
461         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
462                 return -EEXIST;
463
464         if (target_cluster == TEGRA_POWER_CLUSTER_G)
465                 if (!is_g_cluster_present())
466                         return -EPERM;
467
468         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
469                 us = 0;
470
471         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
472                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
473                 is_lp_cluster() ? "LP" : "G",
474                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
475                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
476                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
477                 us));
478
479         if ((current_cluster == TEGRA_POWER_CLUSTER_LP) &&
480             (target_cluster == TEGRA_POWER_CLUSTER_G)) {
481                 if (!timekeeping_suspended) {
482                         ktime_t now = ktime_get();
483                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
484                         s64 t_off = tegra_cpu_power_off_time();
485                         if (t_off > t)
486                                 udelay((unsigned int)(t_off - t));
487                 }
488
489                 /* Start CPU rail transition up early - before disabling irq */
490                 if (tegra_crail_can_start_early()) {
491                         int ret = tegra_crail_startup_early();
492                         if (ret)
493                                 return ret;
494                 }
495         }
496
497         local_irq_save(irq_flags);
498         if (is_idle_task(current))
499                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START);
500         else
501                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START);
502         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
503
504 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
505         if (current_cluster != target_cluster && !timekeeping_suspended) {
506                 if (target_cluster == TEGRA_POWER_CLUSTER_LP) {
507                         u32 cpu;
508
509                         cpu = cpu_logical_map(smp_processor_id());
510                         writel(cpu, FLOW_CTRL_MPID);
511                 }
512         }
513 #endif
514
515         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
516                 if (us)
517                         tegra_pd_set_trigger(us);
518
519                 tegra_cluster_switch_prolog(flags);
520                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
521                 tegra_cluster_switch_epilog(flags);
522
523                 if (us)
524                         tegra_pd_set_trigger(0);
525         } else {
526                 int cpu;
527
528                 cpu = cpu_logical_map(smp_processor_id());
529
530                 tegra_set_cpu_in_pd(cpu);
531                 cpu_pm_enter();
532                 if (!timekeeping_suspended)
533                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
534                                            &cpu);
535                 tegra_idle_power_down_last(0, flags);
536                 if (!timekeeping_suspended)
537                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
538                                            &cpu);
539                 cpu_pm_exit();
540                 tegra_clear_cpu_in_pd(cpu);
541         }
542
543         if (current_cluster != target_cluster && !timekeeping_suspended) {
544                 ktime_t now = ktime_get();
545                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
546                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
547                 } else {
548                         last_g2lp = now;
549                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
550                 }
551         }
552         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_end);
553         if (is_idle_task(current))
554                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE);
555         else
556                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE);
557         local_irq_restore(irq_flags);
558
559         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
560
561         return 0;
562 }
563
564 int tegra_switch_to_lp_cluster()
565 {
566         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
567         struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
568         int rate = clk_get_rate(cpu_clk);
569         int e;
570
571         if (is_lp_cluster())
572                 return 0;
573
574         /* Change the Clock Rate to desired LP CPU's clock rate */
575
576         if (rate > cpu_lp_clk->max_rate) {
577                 e = clk_set_rate(cpu_clk, cpu_lp_clk->max_rate);
578                 if (e) {
579                         pr_err("cluster_swtich: Failed to set clock %d", e);
580                         return e;
581                 }
582         }
583
584         e = clk_set_parent(cpu_clk, cpu_lp_clk);
585         if (e) {
586                 pr_err("cluster switching request failed (%d)\n", e);
587                 return e;
588         }
589         return e;
590 }
591
592 int tegra_switch_to_g_cluster()
593 {
594         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
595         struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
596         int e;
597
598         if (!is_lp_cluster())
599                 return 0;
600
601         e = clk_set_parent(cpu_clk, cpu_g_clk);
602         if (e) {
603                 pr_err("cluster switching request failed (%d)\n", e);
604                 return e;
605         }
606
607         /* Switch back to G Cluster Cpu Max Clock rate */
608
609         e = clk_set_rate(cpu_clk, cpu_g_clk->max_rate);
610         if (e) {
611                 pr_err("cluster_swtich: Failed to increase the clock %d\n", e);
612                 return e;
613         }
614         return e;
615 }
616
617 int tegra_cluster_switch(struct clk *cpu_clk, struct clk *new_cluster_clk)
618 {
619         int ret;
620         bool is_target_lp = is_lp_cluster() ^
621                 (clk_get_parent(cpu_clk) != new_cluster_clk);
622
623         /* Update core edp limits before switch to LP cluster; abort on error */
624         if (is_target_lp) {
625                 ret = tegra_core_edp_cpu_state_update(is_target_lp);
626                 if (ret)
627                         return ret;
628         }
629
630         ret = clk_set_parent(cpu_clk, new_cluster_clk);
631         if (ret)
632                 return ret;
633
634         /* Update core edp limits after switch to G cluster; ignore error */
635         if (!is_target_lp)
636                 tegra_core_edp_cpu_state_update(is_target_lp);
637
638         return 0;
639 }
640 #endif
641
642 #ifdef CONFIG_PM_SLEEP
643
644 void tegra_lp0_suspend_mc(void)
645 {
646         /* Since memory frequency after LP0 is restored to boot rate
647            mc timing is saved during init, not on entry to LP0. Keep
648            this hook just in case, anyway */
649 }
650
651 void tegra_lp0_resume_mc(void)
652 {
653         tegra_mc_timing_restore();
654 }
655
656 static int __init get_clock_cclk_lp(void)
657 {
658         if (!cclk_lp)
659                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
660         return 0;
661 }
662
663 subsys_initcall(get_clock_cclk_lp);
664
665 void tegra_lp0_cpu_mode(bool enter)
666 {
667         static bool entered_on_g = false;
668         unsigned int flags;
669
670         if (enter)
671                 entered_on_g = !is_lp_cluster();
672
673         if (entered_on_g) {
674                 if (enter)
675                         tegra_clk_prepare_enable(cclk_lp);
676
677                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
678                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
679 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
680                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
681 #endif
682                 if (!tegra_cluster_control(0, flags)) {
683                         if (!enter)
684                                 tegra_clk_disable_unprepare(cclk_lp);
685                         pr_info("Tegra: switched to %s cluster\n",
686                                 enter ? "LP" : "G");
687                 }
688         }
689 }
690
691 #define IO_DPD_INFO(_name, _index, _bit) \
692         { \
693                 .name = _name, \
694                 .io_dpd_reg_index = _index, \
695                 .io_dpd_bit = _bit, \
696         }
697
698 /* PMC IO DPD register offsets */
699 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
700 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
701 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
702 #define APBDEV_DPD_ENABLE_LSB           30
703 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
704 #define APBDEV_DPD2_ENABLE_LSB          5
705 #else
706 #define APBDEV_DPD2_ENABLE_LSB          30
707 #endif
708 #define PMC_DPD_SAMPLE                  0x20
709
710 static struct tegra_io_dpd tegra_list_io_dpd[] = {
711 };
712 #endif
713
714 /* we want to cleanup bootloader io dpd setting in kernel */
715 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
716
717 #if defined CONFIG_PM_SLEEP
718 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
719 {
720 #ifdef CONFIG_TEGRA_IO_DPD
721         int i;
722         const char *name = dev ? dev_name(dev) : NULL;
723         if (name) {
724                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
725                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
726                                 strlen(name)))) {
727                                 return &tegra_list_io_dpd[i];
728                         }
729                 }
730         }
731         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
732                 ((name) ? name : "NULL"));
733 #endif
734         return NULL;
735 }
736
737 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
738
739 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
740 {
741         unsigned int enable_mask;
742         unsigned int dpd_status;
743         unsigned int dpd_enable_lsb;
744
745         if (!hnd)
746                 return;
747
748         spin_lock(&tegra_io_dpd_lock);
749         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
750                                                 APBDEV_DPD_ENABLE_LSB;
751         writel(0x1, pmc + PMC_DPD_SAMPLE);
752         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
753         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
754         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
755                                         hnd->io_dpd_reg_index * 8));
756         udelay(1);
757         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
758                                         hnd->io_dpd_reg_index * 8));
759         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
760                 if (!tegra_platform_is_fpga()) {
761                         pr_info("Error: dpd%d enable failed, status=%#x\n",
762                         (hnd->io_dpd_reg_index + 1), dpd_status);
763                 }
764         }
765         /* Sample register must be reset before next sample operation */
766         writel(0x0, pmc + PMC_DPD_SAMPLE);
767         spin_unlock(&tegra_io_dpd_lock);
768         return;
769 }
770
771 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
772 {
773         unsigned int enable_mask;
774         unsigned int dpd_status;
775         unsigned int dpd_enable_lsb;
776
777         if (!hnd)
778                 return;
779
780         spin_lock(&tegra_io_dpd_lock);
781         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
782                                                 APBDEV_DPD_ENABLE_LSB;
783         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
784         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
785                                         hnd->io_dpd_reg_index * 8));
786         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
787                                         hnd->io_dpd_reg_index * 8));
788         if (dpd_status & (1 << hnd->io_dpd_bit)) {
789                 if (!tegra_platform_is_fpga()) {
790                         pr_info("Error: dpd%d disable failed, status=%#x\n",
791                         (hnd->io_dpd_reg_index + 1), dpd_status);
792                 }
793         }
794         spin_unlock(&tegra_io_dpd_lock);
795         return;
796 }
797
798 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
799 {
800         struct tegra_io_dpd *hnd = container_of(
801                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
802         tegra_io_dpd_disable(hnd);
803         hnd->need_delay_dpd = 0;
804 }
805
806 int tegra_io_dpd_init(void)
807 {
808         int i;
809         for (i = 0;
810                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
811                 i++) {
812                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
813                                 tegra_io_dpd_delayed_disable);
814                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
815                         tegra_list_io_dpd[i].need_delay_dpd = 0;
816         }
817         return 0;
818 }
819
820 #else
821
822 int tegra_io_dpd_init(void)
823 {
824         return 0;
825 }
826
827 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
828 {
829 }
830
831 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
832 {
833 }
834
835 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
836 {
837         return NULL;
838 }
839
840 #endif
841
842 EXPORT_SYMBOL(tegra_io_dpd_get);
843 EXPORT_SYMBOL(tegra_io_dpd_enable);
844 EXPORT_SYMBOL(tegra_io_dpd_disable);
845 EXPORT_SYMBOL(tegra_io_dpd_init);
846
847 struct io_dpd_reg_info {
848         u32 req_reg_off;
849         u8 dpd_code_lsb;
850 };
851
852 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
853         {0x1b8, 30},
854         {0x1c0, 5},
855 };
856
857 /* io dpd off request code */
858 #define IO_DPD_CODE_OFF         1
859
860 /* cleans io dpd settings from bootloader during kernel init */
861 void tegra_bl_io_dpd_cleanup()
862 {
863         int i;
864         unsigned int dpd_mask;
865         unsigned int dpd_status;
866
867         pr_info("Clear bootloader IO dpd settings\n");
868         /* clear all dpd requests from bootloader */
869         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
870                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
871                 dpd_mask |= (IO_DPD_CODE_OFF <<
872                         t3_io_dpd_req_regs[i].dpd_code_lsb);
873                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
874                 /* dpd status register is next to req reg in tegra3 */
875                 dpd_status = readl(pmc +
876                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
877         }
878         return;
879 }
880 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
881