a6918766f97c220315e062fcd8032b7c523521b9
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/irqchip/arm-gic.h>
32 #include <linux/sched.h>
33 #include <linux/tegra-powergate.h>
34 #include <linux/tegra-soc.h>
35
36 #include <mach/irqs.h>
37 #include <mach/io_dpd.h>
38 #include <mach/edp.h>
39
40 #include <asm/smp_plat.h>
41 #include <asm/cputype.h>
42
43 #include "clock.h"
44 #include "cpuidle.h"
45 #include "iomap.h"
46 #include "pm.h"
47 #include "sleep.h"
48 #include "tegra3_emc.h"
49 #include "dvfs.h"
50 #include "tegra11_soctherm.h"
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/nvpower.h>
54
55 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
56 #define CAR_CCLK_BURST_POLICY \
57         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
58
59 #define CAR_SUPER_CCLK_DIVIDER \
60         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
61
62 #define CAR_CCLKG_BURST_POLICY \
63         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
64
65 #define CAR_SUPER_CCLKG_DIVIDER \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
67
68 #define CAR_CCLKLP_BURST_POLICY \
69         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
70 #define PLLX_DIV2_BYPASS_LP     (1<<16)
71
72 #define CAR_SUPER_CCLKLP_DIVIDER \
73         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
74
75 #define CAR_BOND_OUT_V \
76         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
77 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
78 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
79
80 #define CAR_CLK_ENB_V_SET \
81         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
82 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
83 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
84
85 #define CAR_RST_CPUG_CMPLX_SET \
86         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
87
88 #define CAR_RST_CPUG_CMPLX_CLR \
89         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
90
91 #define CAR_RST_CPULP_CMPLX_SET \
92         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
93
94 #define CAR_RST_CPULP_CMPLX_CLR \
95         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
96
97 #define CAR_CLK_CPUG_CMPLX_SET \
98         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
99
100 #define CAR_CLK_CPUG_CMPLX_CLR \
101         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
102
103 #define CAR_CLK_CPULP_CMPLX_SET \
104         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
105
106 #define CAR_CLK_CPULP_CMPLX_CLR \
107         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
108
109 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
110 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
111
112 #define PLLX_FO_G (1<<28)
113 #define PLLX_FO_LP (1<<29)
114
115 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
116         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
117
118 static int cluster_switch_prolog_clock(unsigned int flags)
119 {
120         u32 reg;
121         u32 CclkBurstPolicy;
122         u32 SuperCclkDivier;
123
124         /* Read the bond out register containing the G and LP CPUs. */
125         reg = readl(CAR_BOND_OUT_V);
126
127         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
128            LP settings overwrite by save/restore code */
129         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
130         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
131         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
132
133         /* Switching to G? */
134         if (flags & TEGRA_POWER_CLUSTER_G) {
135                 /* Do the G CPUs exist? */
136                 if (reg & CAR_BOND_OUT_V_CPU_G)
137                         return -ENXIO;
138
139                 /* Keep G CPU clock policy set by upper laayer, with the
140                    exception of the transition via LP1 */
141                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
142                         /* In LP1 power mode come up on CLKM (oscillator) */
143                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
144                         CclkBurstPolicy &= ~0xF;
145                         SuperCclkDivier = 0;
146
147                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
148                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
149                 }
150
151 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
152                 /* Hold G CPUs 1-3 in reset after the switch */
153                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
154                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
155
156                 /* Take G CPU 0 out of reset after the switch */
157                 reg = CPU_RESET(0);
158                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
159
160                 /* Disable the clocks on G CPUs 1-3 after the switch */
161                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
162                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
163
164                 /* Enable the clock on G CPU 0 after the switch */
165                 reg = CPU_CLOCK(0);
166                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
167
168                 /* Enable the G CPU complex clock after the switch */
169                 reg = CAR_CLK_ENB_V_CPU_G;
170                 writel(reg, CAR_CLK_ENB_V_SET);
171 #endif
172         }
173         /* Switching to LP? */
174         else if (flags & TEGRA_POWER_CLUSTER_LP) {
175                 /* Does the LP CPU exist? */
176                 if (reg & CAR_BOND_OUT_V_CPU_LP)
177                         return -ENXIO;
178
179                 /* Keep LP CPU clock policy set by upper layer, with the
180                    exception of the transition via LP1 */
181                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
182                         /* In LP1 power mode come up on CLKM (oscillator) */
183                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
184                         CclkBurstPolicy &= ~0xF;
185                         SuperCclkDivier = 0;
186
187                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
188                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
189                 }
190
191 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
192                 /* Take the LP CPU ut of reset after the switch */
193                 reg = CPU_RESET(0);
194                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
195
196                 /* Enable the clock on the LP CPU after the switch */
197                 reg = CPU_CLOCK(0);
198                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
199
200                 /* Enable the LP CPU complex clock after the switch */
201                 reg = CAR_CLK_ENB_V_CPU_LP;
202                 writel(reg, CAR_CLK_ENB_V_SET);
203 #endif
204         }
205
206         return 0;
207 }
208
209 static inline void enable_pllx_cluster_port(void)
210 {
211         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
212         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
213         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
214 }
215
216 static inline void disable_pllx_cluster_port(void)
217 {
218         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
219         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
220         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
221 }
222
223 void tegra_cluster_switch_prolog(unsigned int flags)
224 {
225         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
226         unsigned int current_cluster = is_lp_cluster()
227                                         ? TEGRA_POWER_CLUSTER_LP
228                                         : TEGRA_POWER_CLUSTER_G;
229         u32 reg;
230         u32 cpu;
231
232         cpu = cpu_logical_map(smp_processor_id());
233
234         /* Read the flow controler CSR register and clear the CPU switch
235            and immediate flags. If an actual CPU switch is to be performed,
236            re-write the CSR register with the desired values. */
237         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
238         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
239                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
240
241         /* Program flow controller for immediate wake if requested */
242         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
243                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
244
245         /* Do nothing if no switch actions requested */
246         if (!target_cluster)
247                 goto done;
248
249 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
250         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
251         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
252             ((flags & TEGRA_POWER_CLUSTER_PART_NONCPU) == 0) &&
253             (current_cluster == TEGRA_POWER_CLUSTER_LP))
254                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
255         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL) {
256                 if (tegra_crail_can_start_early()) {
257                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
258                         tegra_soctherm_adjust_cpu_zone(false);
259                 } else {
260                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
261                 }
262         }
263
264         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
265                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
266 #endif
267
268         if ((current_cluster != target_cluster) ||
269                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
270                 if (current_cluster != target_cluster) {
271                         // Set up the clocks for the target CPU.
272                         if (cluster_switch_prolog_clock(flags)) {
273                                 /* The target CPU does not exist */
274                                 goto done;
275                         }
276
277                         /* Set up the flow controller to switch CPUs. */
278                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
279
280                         /* Enable target port of PLL_X */
281                         enable_pllx_cluster_port();
282                 }
283         }
284
285 done:
286         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
287 }
288
289
290 static void cluster_switch_epilog_actlr(void)
291 {
292         u32 actlr;
293
294         /*
295          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
296          *
297          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
298          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
299          * switch by CPU save/restore code, but SMP bit is restored correctly.
300          * Synchronize these two bits here after LP=>G transition. Note that
301          * only CPU0 core is powered on before and after the switch. See also
302          * bug 807595.
303         */
304         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
305                 return;
306
307         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
308
309         if (actlr & (0x1 << 6)) {
310                 actlr |= 0x1;
311                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
312         }
313 }
314
315 static void cluster_switch_epilog_gic(void)
316 {
317         unsigned int max_irq, i;
318         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
319
320         /* Reprogram the interrupt affinity because the on the LP CPU,
321            the interrupt distributor affinity regsiters are stubbed out
322            by ARM (reads as zero, writes ignored). So when the LP CPU
323            context save code runs, the affinity registers will read
324            as all zero. This causes all interrupts to be effectively
325            disabled when back on the G CPU because they aren't routable
326            to any CPU. See bug 667720 for details. */
327
328         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
329         max_irq = (max_irq + 1) * 32;
330
331         for (i = 32; i < max_irq; i += 4) {
332                 u32 val = 0x01010101;
333 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
334                 unsigned int irq;
335                 for (irq = i; irq < (i + 4); irq++) {
336                         struct cpumask mask;
337                         struct irq_desc *desc = irq_to_desc(irq);
338
339                         if (desc && desc->affinity_hint) {
340                                 if (cpumask_and(&mask, desc->affinity_hint,
341                                                 desc->irq_data.affinity))
342                                         val |= (*cpumask_bits(&mask) & 0xff) <<
343                                                 ((irq & 3) * 8);
344                         }
345                 }
346 #endif
347                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
348         }
349 }
350
351 void tegra_cluster_switch_epilog(unsigned int flags)
352 {
353         u32 reg;
354         u32 cpu;
355
356         cpu = cpu_logical_map(smp_processor_id());
357
358         /* Make sure the switch and immediate flags are cleared in
359            the flow controller to prevent undesirable side-effects
360            for future users of the flow controller. */
361         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
362         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
363                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
364 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
365         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
366 #endif
367         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
368
369         /* Perform post-switch LP=>G clean-up */
370         if (!is_lp_cluster()) {
371                 cluster_switch_epilog_actlr();
372                 cluster_switch_epilog_gic();
373 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
374                 if (tegra_crail_can_start_early())
375                         tegra_soctherm_adjust_cpu_zone(true);
376         } else  if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
377                     tegra_crail_can_start_early()) {
378                 tegra_powergate_partition(TEGRA_POWERGATE_CRAIL);
379 #endif
380         }
381
382         /* Disable unused port of PLL_X */
383         disable_pllx_cluster_port();
384
385         #if DEBUG_CLUSTER_SWITCH
386         {
387                 /* FIXME: clock functions below are taking mutex */
388                 struct clk *c = tegra_get_clock_by_name(
389                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
390                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
391                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
392         }
393         #endif
394 }
395
396 static int tegra_crail_startup_early(void)
397 {
398 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
399         u32 reg;
400         int us = tegra_cpu_power_good_time();
401
402         if (tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL))
403                 return 0;
404
405         /*
406          * Toggle CRAIL, insert s/w  power good delay (load h/w power good
407          * timer with very small settings so it expires for sure within power
408          * gate toggle timeout).
409          */
410         tegra_limit_cpu_power_timers(1, 1);
411         tegra_unpowergate_partition(TEGRA_POWERGATE_CRAIL);
412         if (timekeeping_suspended)
413                 udelay(us);                     /* suspend exit */
414         else
415                 usleep_range(us, us + 10);      /* regular scheduling */
416
417         if (!tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL)) {
418                 WARN(1, "Failed to turn CRAIL ON in %d us\n", us);
419                 return -ETIMEDOUT;
420         }
421
422         /* If needed trigger RAM rapair request in s/w (auto-clear in h/w) */
423         #define RAM_REPAIR_TIMEOUT 500
424
425         reg = readl(FLOW_CTRL_RAM_REPAIR) | FLOW_CTRL_RAM_REPAIR_REQ;
426         if (!(reg & FLOW_CTRL_RAM_REPAIR_BYPASS_EN)) {
427                 int ram_repair_time = RAM_REPAIR_TIMEOUT;
428                 flowctrl_writel(reg, FLOW_CTRL_RAM_REPAIR);
429                 while (readl(FLOW_CTRL_RAM_REPAIR) & FLOW_CTRL_RAM_REPAIR_REQ) {
430                         udelay(1);
431                         if (!(ram_repair_time--)) {
432                                 WARN(1, "Failed to repair RAM in %d us\n",
433                                      RAM_REPAIR_TIMEOUT);
434                                 return -ETIMEDOUT;
435                         }
436                 }
437         }
438 #endif
439         return 0;
440 }
441
442 int tegra_cluster_control(unsigned int us, unsigned int flags)
443 {
444         static ktime_t last_g2lp;
445
446         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
447         unsigned int current_cluster = is_lp_cluster()
448                                         ? TEGRA_POWER_CLUSTER_LP
449                                         : TEGRA_POWER_CLUSTER_G;
450         unsigned long irq_flags;
451
452         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
453                 return -EINVAL;
454
455         if (num_online_cpus() > 1)
456                 return -EBUSY;
457
458         if ((current_cluster == target_cluster)
459         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
460                 return -EEXIST;
461
462         if (target_cluster == TEGRA_POWER_CLUSTER_G)
463                 if (!is_g_cluster_present())
464                         return -EPERM;
465
466         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
467                 us = 0;
468
469         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
470                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
471                 is_lp_cluster() ? "LP" : "G",
472                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
473                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
474                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
475                 us));
476
477         if ((current_cluster == TEGRA_POWER_CLUSTER_LP) &&
478             (target_cluster == TEGRA_POWER_CLUSTER_G)) {
479                 if (!timekeeping_suspended) {
480                         ktime_t now = ktime_get();
481                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
482                         s64 t_off = tegra_cpu_power_off_time();
483                         if (t_off > t)
484                                 udelay((unsigned int)(t_off - t));
485                 }
486
487                 /* Start CPU rail transition up early - before disabling irq */
488                 if (tegra_crail_can_start_early()) {
489                         int ret = tegra_crail_startup_early();
490                         if (ret)
491                                 return ret;
492                 }
493         }
494
495         local_irq_save(irq_flags);
496         if (is_idle_task(current))
497                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START);
498         else
499                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START);
500         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
501
502 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
503         if (current_cluster != target_cluster && !timekeeping_suspended) {
504                 if (target_cluster == TEGRA_POWER_CLUSTER_LP) {
505                         u32 cpu;
506
507                         cpu = cpu_logical_map(smp_processor_id());
508                         writel(cpu, FLOW_CTRL_MPID);
509                 }
510         }
511 #endif
512
513         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
514                 if (us)
515                         tegra_pd_set_trigger(us);
516
517                 tegra_cluster_switch_prolog(flags);
518                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
519                 tegra_cluster_switch_epilog(flags);
520
521                 if (us)
522                         tegra_pd_set_trigger(0);
523         } else {
524                 int cpu;
525
526                 cpu = cpu_logical_map(smp_processor_id());
527
528                 tegra_set_cpu_in_pd(cpu);
529                 cpu_pm_enter();
530                 if (!timekeeping_suspended)
531                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
532                                            &cpu);
533                 tegra_idle_power_down_last(0, flags);
534                 if (!timekeeping_suspended)
535                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
536                                            &cpu);
537                 cpu_pm_exit();
538                 tegra_clear_cpu_in_pd(cpu);
539         }
540
541         if (current_cluster != target_cluster && !timekeeping_suspended) {
542                 ktime_t now = ktime_get();
543                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
544                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
545                 } else {
546                         last_g2lp = now;
547                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
548                 }
549         }
550         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_end);
551         if (is_idle_task(current))
552                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE);
553         else
554                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE);
555         local_irq_restore(irq_flags);
556
557         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
558
559         return 0;
560 }
561
562 int tegra_switch_to_lp_cluster()
563 {
564         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
565         struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
566         int rate = clk_get_rate(cpu_clk);
567         int e;
568
569         if (is_lp_cluster())
570                 return 0;
571
572         /* Change the Clock Rate to desired LP CPU's clock rate */
573
574         if (rate > cpu_lp_clk->max_rate) {
575                 e = clk_set_rate(cpu_clk, cpu_lp_clk->max_rate);
576                 if (e) {
577                         pr_err("cluster_swtich: Failed to set clock %d", e);
578                         return e;
579                 }
580         }
581
582         e = clk_set_parent(cpu_clk, cpu_lp_clk);
583         if (e) {
584                 pr_err("cluster switching request failed (%d)\n", e);
585                 return e;
586         }
587         return e;
588 }
589
590 int tegra_switch_to_g_cluster()
591 {
592         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
593         struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
594         int e;
595
596         if (!is_lp_cluster())
597                 return 0;
598
599         e = clk_set_parent(cpu_clk, cpu_g_clk);
600         if (e) {
601                 pr_err("cluster switching request failed (%d)\n", e);
602                 return e;
603         }
604
605         /* Switch back to G Cluster Cpu Max Clock rate */
606
607         e = clk_set_rate(cpu_clk, cpu_g_clk->max_rate);
608         if (e) {
609                 pr_err("cluster_swtich: Failed to increase the clock %d\n", e);
610                 return e;
611         }
612         return e;
613 }
614
615 int tegra_cluster_switch(struct clk *cpu_clk, struct clk *new_cluster_clk)
616 {
617         int ret;
618         bool is_target_lp = is_lp_cluster() ^
619                 (clk_get_parent(cpu_clk) != new_cluster_clk);
620
621         /* Update core edp limits before switch to LP cluster; abort on error */
622         if (is_target_lp) {
623                 ret = tegra_core_edp_cpu_state_update(is_target_lp);
624                 if (ret)
625                         return ret;
626         }
627
628         ret = clk_set_parent(cpu_clk, new_cluster_clk);
629         if (ret)
630                 return ret;
631
632         /* Update core edp limits after switch to G cluster; ignore error */
633         if (!is_target_lp)
634                 tegra_core_edp_cpu_state_update(is_target_lp);
635
636         return 0;
637 }
638 #endif
639
640 #ifdef CONFIG_PM_SLEEP
641
642 void tegra_lp0_suspend_mc(void)
643 {
644         /* Since memory frequency after LP0 is restored to boot rate
645            mc timing is saved during init, not on entry to LP0. Keep
646            this hook just in case, anyway */
647 }
648
649 void tegra_lp0_resume_mc(void)
650 {
651         tegra_mc_timing_restore();
652 }
653
654 void tegra_lp0_cpu_mode(bool enter)
655 {
656         static struct clk *cclk_lp;
657         static bool entered_on_g = false;
658         unsigned int flags;
659
660         if (!cclk_lp)
661                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
662
663         if (enter)
664                 entered_on_g = !is_lp_cluster();
665
666         if (entered_on_g) {
667                 if (enter)
668                         tegra_clk_prepare_enable(cclk_lp);
669
670                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
671                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
672 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
673                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
674 #endif
675                 if (!tegra_cluster_control(0, flags)) {
676                         if (!enter)
677                                 tegra_clk_disable_unprepare(cclk_lp);
678                         pr_info("Tegra: switched to %s cluster\n",
679                                 enter ? "LP" : "G");
680                 }
681         }
682 }
683
684 #define IO_DPD_INFO(_name, _index, _bit) \
685         { \
686                 .name = _name, \
687                 .io_dpd_reg_index = _index, \
688                 .io_dpd_bit = _bit, \
689         }
690
691 /* PMC IO DPD register offsets */
692 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
693 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
694 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
695 #define APBDEV_DPD_ENABLE_LSB           30
696 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
697 #define APBDEV_DPD2_ENABLE_LSB          5
698 #else
699 #define APBDEV_DPD2_ENABLE_LSB          30
700 #endif
701 #define PMC_DPD_SAMPLE                  0x20
702
703 static struct tegra_io_dpd tegra_list_io_dpd[] = {
704 };
705 #endif
706
707 /* we want to cleanup bootloader io dpd setting in kernel */
708 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
709
710 #if defined CONFIG_PM_SLEEP
711 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
712 {
713 #ifdef CONFIG_TEGRA_IO_DPD
714         int i;
715         const char *name = dev ? dev_name(dev) : NULL;
716         if (name) {
717                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
718                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
719                                 strlen(name)))) {
720                                 return &tegra_list_io_dpd[i];
721                         }
722                 }
723         }
724         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
725                 ((name) ? name : "NULL"));
726 #endif
727         return NULL;
728 }
729
730 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
731
732 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
733 {
734         unsigned int enable_mask;
735         unsigned int dpd_status;
736         unsigned int dpd_enable_lsb;
737
738         if (!hnd)
739                 return;
740
741         spin_lock(&tegra_io_dpd_lock);
742         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
743                                                 APBDEV_DPD_ENABLE_LSB;
744         writel(0x1, pmc + PMC_DPD_SAMPLE);
745         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
746         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
747         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
748                                         hnd->io_dpd_reg_index * 8));
749         udelay(1);
750         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
751                                         hnd->io_dpd_reg_index * 8));
752         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
753                 if (!tegra_platform_is_fpga()) {
754                         pr_info("Error: dpd%d enable failed, status=%#x\n",
755                         (hnd->io_dpd_reg_index + 1), dpd_status);
756                 }
757         }
758         /* Sample register must be reset before next sample operation */
759         writel(0x0, pmc + PMC_DPD_SAMPLE);
760         spin_unlock(&tegra_io_dpd_lock);
761         return;
762 }
763
764 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
765 {
766         unsigned int enable_mask;
767         unsigned int dpd_status;
768         unsigned int dpd_enable_lsb;
769
770         if (!hnd)
771                 return;
772
773         spin_lock(&tegra_io_dpd_lock);
774         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
775                                                 APBDEV_DPD_ENABLE_LSB;
776         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
777         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
778                                         hnd->io_dpd_reg_index * 8));
779         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
780                                         hnd->io_dpd_reg_index * 8));
781         if (dpd_status & (1 << hnd->io_dpd_bit)) {
782                 if (!tegra_platform_is_fpga()) {
783                         pr_info("Error: dpd%d disable failed, status=%#x\n",
784                         (hnd->io_dpd_reg_index + 1), dpd_status);
785                 }
786         }
787         spin_unlock(&tegra_io_dpd_lock);
788         return;
789 }
790
791 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
792 {
793         struct tegra_io_dpd *hnd = container_of(
794                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
795         tegra_io_dpd_disable(hnd);
796         hnd->need_delay_dpd = 0;
797 }
798
799 int tegra_io_dpd_init(void)
800 {
801         int i;
802         for (i = 0;
803                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
804                 i++) {
805                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
806                                 tegra_io_dpd_delayed_disable);
807                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
808                         tegra_list_io_dpd[i].need_delay_dpd = 0;
809         }
810         return 0;
811 }
812
813 #else
814
815 int tegra_io_dpd_init(void)
816 {
817         return 0;
818 }
819
820 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
821 {
822 }
823
824 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
825 {
826 }
827
828 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
829 {
830         return NULL;
831 }
832
833 #endif
834
835 EXPORT_SYMBOL(tegra_io_dpd_get);
836 EXPORT_SYMBOL(tegra_io_dpd_enable);
837 EXPORT_SYMBOL(tegra_io_dpd_disable);
838 EXPORT_SYMBOL(tegra_io_dpd_init);
839
840 struct io_dpd_reg_info {
841         u32 req_reg_off;
842         u8 dpd_code_lsb;
843 };
844
845 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
846         {0x1b8, 30},
847         {0x1c0, 5},
848 };
849
850 /* io dpd off request code */
851 #define IO_DPD_CODE_OFF         1
852
853 /* cleans io dpd settings from bootloader during kernel init */
854 void tegra_bl_io_dpd_cleanup()
855 {
856         int i;
857         unsigned int dpd_mask;
858         unsigned int dpd_status;
859
860         pr_info("Clear bootloader IO dpd settings\n");
861         /* clear all dpd requests from bootloader */
862         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
863                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
864                 dpd_mask |= (IO_DPD_CODE_OFF <<
865                         t3_io_dpd_req_regs[i].dpd_code_lsb);
866                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
867                 /* dpd status register is next to req reg in tegra3 */
868                 dpd_status = readl(pmc +
869                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
870         }
871         return;
872 }
873 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
874