video: tegra: host: Fix NULL instead of integer
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/irqchip/arm-gic.h>
32 #include <linux/sched.h>
33 #include <linux/tegra-powergate.h>
34 #include <linux/tegra-soc.h>
35 #include <linux/tegra-cpuidle.h>
36 #include <linux/tegra-pm.h>
37 #include <linux/tegra_soctherm.h>
38
39 #include <mach/irqs.h>
40 #include <mach/io_dpd.h>
41 #include <mach/edp.h>
42
43 #include <asm/smp_plat.h>
44 #include <asm/cputype.h>
45
46 #include <linux/platform/tegra/clock.h>
47 #include "iomap.h"
48 #include "pm.h"
49 #include "sleep.h"
50 #include "tegra3_emc.h"
51 #include <linux/platform/tegra/dvfs.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/nvpower.h>
55
56 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
57 #define CAR_CCLK_BURST_POLICY \
58         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
59
60 #define CAR_SUPER_CCLK_DIVIDER \
61         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
62
63 #define CAR_CCLKG_BURST_POLICY \
64         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
65
66 #define CAR_SUPER_CCLKG_DIVIDER \
67         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
68
69 #define CAR_CCLKLP_BURST_POLICY \
70         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
71 #define PLLX_DIV2_BYPASS_LP     (1<<16)
72
73 #define CAR_SUPER_CCLKLP_DIVIDER \
74         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
75
76 #define CAR_BOND_OUT_V \
77         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
78 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
79 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
80
81 #define CAR_CLK_ENB_V_SET \
82         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
83 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
84 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
85
86 #define CAR_RST_CPUG_CMPLX_SET \
87         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
88
89 #define CAR_RST_CPUG_CMPLX_CLR \
90         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
91
92 #define CAR_RST_CPULP_CMPLX_SET \
93         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
94
95 #define CAR_RST_CPULP_CMPLX_CLR \
96         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
97
98 #define CAR_CLK_CPUG_CMPLX_SET \
99         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
100
101 #define CAR_CLK_CPUG_CMPLX_CLR \
102         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
103
104 #define CAR_CLK_CPULP_CMPLX_SET \
105         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
106
107 #define CAR_CLK_CPULP_CMPLX_CLR \
108         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
109
110 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
111 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
112
113 #define PLLX_FO_G (1<<28)
114 #define PLLX_FO_LP (1<<29)
115
116 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
117         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
118
119 static struct clk *cclk_lp;
120
121 static int cluster_switch_prolog_clock(unsigned int flags)
122 {
123         u32 reg;
124         u32 CclkBurstPolicy;
125         u32 SuperCclkDivier;
126
127         /* Read the bond out register containing the G and LP CPUs. */
128         reg = readl(CAR_BOND_OUT_V);
129
130         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
131            LP settings overwrite by save/restore code */
132         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
133         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
134         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
135
136         /* Switching to G? */
137         if (flags & TEGRA_POWER_CLUSTER_G) {
138                 /* Do the G CPUs exist? */
139                 if (reg & CAR_BOND_OUT_V_CPU_G)
140                         return -ENXIO;
141
142                 /* Keep G CPU clock policy set by upper laayer, with the
143                    exception of the transition via LP1 */
144                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
145                         /* In LP1 power mode come up on CLKM (oscillator) */
146                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
147                         CclkBurstPolicy &= ~0xF;
148                         SuperCclkDivier = 0;
149
150                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
151                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
152                 }
153
154 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
155                 /* Hold G CPUs 1-3 in reset after the switch */
156                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
157                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
158
159                 /* Take G CPU 0 out of reset after the switch */
160                 reg = CPU_RESET(0);
161                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
162
163                 /* Disable the clocks on G CPUs 1-3 after the switch */
164                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
165                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
166
167                 /* Enable the clock on G CPU 0 after the switch */
168                 reg = CPU_CLOCK(0);
169                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
170
171                 /* Enable the G CPU complex clock after the switch */
172                 reg = CAR_CLK_ENB_V_CPU_G;
173                 writel(reg, CAR_CLK_ENB_V_SET);
174 #endif
175         }
176         /* Switching to LP? */
177         else if (flags & TEGRA_POWER_CLUSTER_LP) {
178                 /* Does the LP CPU exist? */
179                 if (reg & CAR_BOND_OUT_V_CPU_LP)
180                         return -ENXIO;
181
182                 /* Keep LP CPU clock policy set by upper layer, with the
183                    exception of the transition via LP1 */
184                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
185                         /* In LP1 power mode come up on CLKM (oscillator) */
186                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
187                         CclkBurstPolicy &= ~0xF;
188                         SuperCclkDivier = 0;
189
190                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
191                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
192                 }
193
194 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
195                 /* Take the LP CPU ut of reset after the switch */
196                 reg = CPU_RESET(0);
197                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
198
199                 /* Enable the clock on the LP CPU after the switch */
200                 reg = CPU_CLOCK(0);
201                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
202
203                 /* Enable the LP CPU complex clock after the switch */
204                 reg = CAR_CLK_ENB_V_CPU_LP;
205                 writel(reg, CAR_CLK_ENB_V_SET);
206 #endif
207         }
208
209         return 0;
210 }
211
212 static inline void enable_pllx_cluster_port(void)
213 {
214         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
215         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
216         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
217 }
218
219 static inline void disable_pllx_cluster_port(void)
220 {
221         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
222         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
223         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
224 }
225
226 void tegra_cluster_switch_prolog(unsigned int flags)
227 {
228         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
229         unsigned int current_cluster = is_lp_cluster()
230                                         ? TEGRA_POWER_CLUSTER_LP
231                                         : TEGRA_POWER_CLUSTER_G;
232         u32 reg;
233         u32 cpu;
234
235         cpu = cpu_logical_map(smp_processor_id());
236
237         /* Read the flow controler CSR register and clear the CPU switch
238            and immediate flags. If an actual CPU switch is to be performed,
239            re-write the CSR register with the desired values. */
240         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
241         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
242                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
243
244         /* Program flow controller for immediate wake if requested */
245         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
246                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
247
248         /* Do nothing if no switch actions requested */
249         if (!target_cluster)
250                 goto done;
251
252 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
253         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
254
255         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
256             (flags & TEGRA_POWER_CLUSTER_PART_NONCPU))
257                 WARN(1, "CRAIL & CxNC flags must not be set together\n");
258
259         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
260             (current_cluster == TEGRA_POWER_CLUSTER_LP))
261                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
262         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL) {
263                 if (tegra_crail_can_start_early()) {
264                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
265                         tegra_soctherm_adjust_cpu_zone(false);
266                 } else {
267                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
268                 }
269         } else if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
270                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
271 #endif
272
273         if ((current_cluster != target_cluster) ||
274                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
275                 if (current_cluster != target_cluster) {
276                         // Set up the clocks for the target CPU.
277                         if (cluster_switch_prolog_clock(flags)) {
278                                 /* The target CPU does not exist */
279                                 goto done;
280                         }
281
282                         /* Set up the flow controller to switch CPUs. */
283                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
284
285                         /* Enable target port of PLL_X */
286                         enable_pllx_cluster_port();
287                 }
288         }
289
290 done:
291         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
292 }
293
294
295 static void cluster_switch_epilog_actlr(void)
296 {
297         u32 actlr;
298
299         /*
300          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
301          *
302          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
303          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
304          * switch by CPU save/restore code, but SMP bit is restored correctly.
305          * Synchronize these two bits here after LP=>G transition. Note that
306          * only CPU0 core is powered on before and after the switch. See also
307          * bug 807595.
308         */
309         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
310                 return;
311
312 #ifdef CONFIG_ARM64
313         __asm__("mrs %0, actlr_el1\n" : "=r" (actlr));
314 #else
315         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
316 #endif
317
318         if (actlr & (0x1 << 6)) {
319                 actlr |= 0x1;
320 #ifdef CONFIG_ARM64
321                 __asm__("msr actlr_el1, %0\n" : "=r" (actlr));
322 #else
323                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
324 #endif
325         }
326 }
327
328 static void cluster_switch_epilog_gic(void)
329 {
330         unsigned int max_irq, i;
331         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
332
333         /* Reprogram the interrupt affinity because the on the LP CPU,
334            the interrupt distributor affinity regsiters are stubbed out
335            by ARM (reads as zero, writes ignored). So when the LP CPU
336            context save code runs, the affinity registers will read
337            as all zero. This causes all interrupts to be effectively
338            disabled when back on the G CPU because they aren't routable
339            to any CPU. See bug 667720 for details. */
340
341         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
342         max_irq = (max_irq + 1) * 32;
343
344         for (i = 32; i < max_irq; i += 4) {
345                 u32 val = 0x01010101;
346 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
347                 unsigned int irq;
348                 for (irq = i; irq < (i + 4); irq++) {
349                         struct cpumask mask;
350                         struct irq_desc *desc = irq_to_desc(irq);
351
352                         if (desc && desc->affinity_hint) {
353                                 if (cpumask_and(&mask, desc->affinity_hint,
354                                                 desc->irq_data.affinity))
355                                         val |= (*cpumask_bits(&mask) & 0xff) <<
356                                                 ((irq & 3) * 8);
357                         }
358                 }
359 #endif
360                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
361         }
362 }
363
364 void tegra_cluster_switch_epilog(unsigned int flags)
365 {
366         u32 reg;
367         u32 cpu;
368
369         cpu = cpu_logical_map(smp_processor_id());
370
371         /* Make sure the switch and immediate flags are cleared in
372            the flow controller to prevent undesirable side-effects
373            for future users of the flow controller. */
374         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
375         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
376                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
377 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
378         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
379 #endif
380         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
381
382         /* Perform post-switch LP=>G clean-up */
383         if (!is_lp_cluster()) {
384                 cluster_switch_epilog_actlr();
385                 cluster_switch_epilog_gic();
386 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
387                 if (tegra_crail_can_start_early())
388                         tegra_soctherm_adjust_cpu_zone(true);
389         } else  if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
390                     tegra_crail_can_start_early()) {
391                 tegra_powergate_partition(TEGRA_POWERGATE_CRAIL);
392 #endif
393         }
394
395         /* Disable unused port of PLL_X */
396         disable_pllx_cluster_port();
397
398         #if DEBUG_CLUSTER_SWITCH
399         {
400                 /* FIXME: clock functions below are taking mutex */
401                 struct clk *c = tegra_get_clock_by_name(
402                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
403                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
404                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
405         }
406         #endif
407 }
408
409 static int tegra_crail_startup_early(void)
410 {
411 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
412         u32 reg;
413         int us = tegra_cpu_power_good_time();
414
415         if (tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL))
416                 return 0;
417
418         /*
419          * Toggle CRAIL, insert s/w  power good delay (load h/w power good
420          * timer with very small settings so it expires for sure within power
421          * gate toggle timeout).
422          */
423         tegra_limit_cpu_power_timers(1, 1);
424         tegra_unpowergate_partition(TEGRA_POWERGATE_CRAIL);
425         if (timekeeping_suspended)
426                 udelay(us);                     /* suspend exit */
427         else
428                 usleep_range(us, us + 10);      /* regular scheduling */
429
430         if (!tegra_powergate_is_powered(TEGRA_POWERGATE_CRAIL)) {
431                 WARN(1, "Failed to turn CRAIL ON in %d us\n", us);
432                 return -ETIMEDOUT;
433         }
434
435         /* If needed trigger RAM rapair request in s/w (auto-clear in h/w) */
436         #define RAM_REPAIR_TIMEOUT 500
437
438         reg = readl(FLOW_CTRL_RAM_REPAIR) | FLOW_CTRL_RAM_REPAIR_REQ;
439         if (!(reg & FLOW_CTRL_RAM_REPAIR_BYPASS_EN)) {
440                 int ram_repair_time = RAM_REPAIR_TIMEOUT;
441                 flowctrl_writel(reg, FLOW_CTRL_RAM_REPAIR);
442                 while (readl(FLOW_CTRL_RAM_REPAIR) & FLOW_CTRL_RAM_REPAIR_REQ) {
443                         udelay(1);
444                         if (!(ram_repair_time--)) {
445                                 WARN(1, "Failed to repair RAM in %d us\n",
446                                      RAM_REPAIR_TIMEOUT);
447                                 return -ETIMEDOUT;
448                         }
449                 }
450         }
451 #endif
452         return 0;
453 }
454
455 int tegra_cluster_control(unsigned int us, unsigned int flags)
456 {
457         static ktime_t last_g2lp;
458
459         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
460         unsigned int current_cluster = is_lp_cluster()
461                                         ? TEGRA_POWER_CLUSTER_LP
462                                         : TEGRA_POWER_CLUSTER_G;
463         unsigned long irq_flags;
464
465         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
466                 return -EINVAL;
467
468         if (num_online_cpus() > 1)
469                 return -EBUSY;
470
471         if ((current_cluster == target_cluster)
472         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
473                 return -EEXIST;
474
475         if (target_cluster == TEGRA_POWER_CLUSTER_G)
476                 if (!is_g_cluster_present())
477                         return -EPERM;
478
479         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
480                 us = 0;
481
482         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
483                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
484                 is_lp_cluster() ? "LP" : "G",
485                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
486                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
487                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
488                 us));
489
490         if ((current_cluster == TEGRA_POWER_CLUSTER_LP) &&
491             (target_cluster == TEGRA_POWER_CLUSTER_G)) {
492                 if (!timekeeping_suspended) {
493                         ktime_t now = ktime_get();
494                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
495                         s64 t_off = tegra_cpu_power_off_time();
496                         if (t_off > t)
497                                 udelay((unsigned int)(t_off - t));
498                 }
499
500                 /* Start CPU rail transition up early - before disabling irq */
501                 if (tegra_crail_can_start_early()) {
502                         int ret = tegra_crail_startup_early();
503                         if (ret)
504                                 return ret;
505                 }
506         }
507
508         local_irq_save(irq_flags);
509         if (is_idle_task(current))
510                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START,
511                                             current_cluster,
512                                             target_cluster);
513         else
514                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START,
515                                     current_cluster,
516                                     target_cluster);
517         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
518
519 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
520         if (current_cluster != target_cluster && !timekeeping_suspended) {
521                 if (target_cluster == TEGRA_POWER_CLUSTER_LP) {
522                         u32 cpu;
523
524                         cpu = cpu_logical_map(smp_processor_id());
525                         writel(cpu, FLOW_CTRL_MPID);
526                 }
527         }
528 #endif
529
530         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
531                 if (us)
532                         tegra_pd_set_trigger(us);
533
534                 tegra_cluster_switch_prolog(flags);
535                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
536                 tegra_cluster_switch_epilog(flags);
537
538                 if (us)
539                         tegra_pd_set_trigger(0);
540         } else {
541                 int cpu;
542
543                 cpu = cpu_logical_map(smp_processor_id());
544
545                 tegra_set_cpu_in_pd(cpu);
546                 cpu_pm_enter();
547                 if (!timekeeping_suspended)
548                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
549                                            &cpu);
550                 tegra_idle_power_down_last(0, flags);
551                 if (!timekeeping_suspended)
552                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
553                                            &cpu);
554                 cpu_pm_exit();
555                 tegra_clear_cpu_in_pd(cpu);
556         }
557
558         if (current_cluster != target_cluster && !timekeeping_suspended) {
559                 ktime_t now = ktime_get();
560                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
561                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
562                 } else {
563                         last_g2lp = now;
564                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
565                 }
566         }
567         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_end);
568         if (is_idle_task(current))
569                 trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE,
570                                             current_cluster,
571                                             target_cluster);
572         else
573                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE,
574                                     current_cluster,
575                                     target_cluster);
576         local_irq_restore(irq_flags);
577
578         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
579
580         return 0;
581 }
582
583 int tegra_switch_to_lp_cluster()
584 {
585         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
586         struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
587         int rate = clk_get_rate(cpu_clk);
588         int e;
589
590         if (is_lp_cluster())
591                 return 0;
592
593         /* Change the Clock Rate to desired LP CPU's clock rate */
594
595         if (rate > cpu_lp_clk->max_rate) {
596                 e = clk_set_rate(cpu_clk, cpu_lp_clk->max_rate);
597                 if (e) {
598                         pr_err("cluster_swtich: Failed to set clock %d", e);
599                         return e;
600                 }
601         }
602
603         e = clk_set_parent(cpu_clk, cpu_lp_clk);
604         if (e) {
605                 pr_err("cluster switching request failed (%d)\n", e);
606                 return e;
607         }
608         return e;
609 }
610
611 int tegra_switch_to_g_cluster()
612 {
613         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
614         struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
615         int e;
616
617         if (!is_lp_cluster())
618                 return 0;
619
620         e = clk_set_parent(cpu_clk, cpu_g_clk);
621         if (e) {
622                 pr_err("cluster switching request failed (%d)\n", e);
623                 return e;
624         }
625
626         /* Switch back to G Cluster Cpu Max Clock rate */
627
628         e = clk_set_rate(cpu_clk, cpu_g_clk->max_rate);
629         if (e) {
630                 pr_err("cluster_swtich: Failed to increase the clock %d\n", e);
631                 return e;
632         }
633         return e;
634 }
635
636 int tegra_cluster_switch(struct clk *cpu_clk, struct clk *new_cluster_clk)
637 {
638         int ret;
639         bool is_target_lp = is_lp_cluster() ^
640                 (clk_get_parent(cpu_clk) != new_cluster_clk);
641
642         /* Update core edp limits before switch to LP cluster; abort on error */
643         if (is_target_lp) {
644                 ret = tegra_core_edp_cpu_state_update(is_target_lp);
645                 if (ret)
646                         return ret;
647         }
648
649         ret = clk_set_parent(cpu_clk, new_cluster_clk);
650         if (ret)
651                 return ret;
652
653         /* Update core edp limits after switch to G cluster; ignore error */
654         if (!is_target_lp)
655                 tegra_core_edp_cpu_state_update(is_target_lp);
656
657         return 0;
658 }
659 #endif
660
661 #ifdef CONFIG_PM_SLEEP
662
663 void tegra_lp0_suspend_mc(void)
664 {
665         /* Since memory frequency after LP0 is restored to boot rate
666            mc timing is saved during init, not on entry to LP0. Keep
667            this hook just in case, anyway */
668 }
669
670 void tegra_lp0_resume_mc(void)
671 {
672         tegra_mc_timing_restore();
673 }
674
675 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
676 static int __init get_clock_cclk_lp(void)
677 {
678         if (!cclk_lp)
679                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
680         return 0;
681 }
682 subsys_initcall(get_clock_cclk_lp);
683 #endif
684
685 void tegra_lp0_cpu_mode(bool enter)
686 {
687         static bool entered_on_g = false;
688         unsigned int flags;
689
690         if (enter)
691                 entered_on_g = !is_lp_cluster();
692
693         if (entered_on_g) {
694 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
695                 if (enter)
696                         tegra_clk_prepare_enable(cclk_lp);
697 #endif
698
699                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
700                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
701 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
702                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
703 #endif
704 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
705                 if (!tegra_cluster_control(0, flags)) {
706                         if (!enter)
707                                 tegra_clk_disable_unprepare(cclk_lp);
708                         pr_info("Tegra: switched to %s cluster %s LP0\n",
709                                 enter ? "LP" : "G",
710                                 enter ? "before entering" : "after exiting");
711                 }
712 #endif
713         }
714 }
715
716 #define IO_DPD_INFO(_name, _index, _bit) \
717         { \
718                 .name = _name, \
719                 .io_dpd_reg_index = _index, \
720                 .io_dpd_bit = _bit, \
721         }
722
723 /* PMC IO DPD register offsets */
724 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
725 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
726 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
727 #define APBDEV_DPD_ENABLE_LSB           30
728 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
729 #define APBDEV_DPD2_ENABLE_LSB          5
730 #else
731 #define APBDEV_DPD2_ENABLE_LSB          30
732 #endif
733 #define PMC_DPD_SAMPLE                  0x20
734
735 static struct tegra_io_dpd tegra_list_io_dpd[] = {
736 };
737 #endif
738
739 /* we want to cleanup bootloader io dpd setting in kernel */
740 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
741
742 #if defined CONFIG_PM_SLEEP
743 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
744 {
745 #ifdef CONFIG_TEGRA_IO_DPD
746         int i;
747         const char *name = dev ? dev_name(dev) : NULL;
748         if (name) {
749                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
750                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
751                                 strlen(name)))) {
752                                 return &tegra_list_io_dpd[i];
753                         }
754                 }
755         }
756         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
757                 ((name) ? name : "NULL"));
758 #endif
759         return NULL;
760 }
761
762 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
763
764 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
765 {
766         unsigned int enable_mask;
767         unsigned int dpd_status;
768         unsigned int dpd_enable_lsb;
769
770         if (!hnd)
771                 return;
772
773         spin_lock(&tegra_io_dpd_lock);
774         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
775                                                 APBDEV_DPD_ENABLE_LSB;
776         writel(0x1, pmc + PMC_DPD_SAMPLE);
777         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
778         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
779         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
780                                         hnd->io_dpd_reg_index * 8));
781         /* delay pclk * (reset APBDEV_PMC_SEL_DPD_TIM_0 value 127 + 5) */
782         udelay(7);
783         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
784                                         hnd->io_dpd_reg_index * 8));
785         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
786                 if (!tegra_platform_is_fpga()) {
787                         pr_info("Error: dpd%d enable failed, status=%#x\n",
788                         (hnd->io_dpd_reg_index + 1), dpd_status);
789                 }
790         }
791         /* Sample register must be reset before next sample operation */
792         writel(0x0, pmc + PMC_DPD_SAMPLE);
793         spin_unlock(&tegra_io_dpd_lock);
794         return;
795 }
796
797 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
798 {
799         unsigned int enable_mask;
800         unsigned int dpd_status;
801         unsigned int dpd_enable_lsb;
802
803         if (!hnd)
804                 return;
805
806         spin_lock(&tegra_io_dpd_lock);
807         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
808                                                 APBDEV_DPD_ENABLE_LSB;
809         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
810         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
811                                         hnd->io_dpd_reg_index * 8));
812         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
813                                         hnd->io_dpd_reg_index * 8));
814         if (dpd_status & (1 << hnd->io_dpd_bit)) {
815                 if (!tegra_platform_is_fpga()) {
816                         pr_info("Error: dpd%d disable failed, status=%#x\n",
817                         (hnd->io_dpd_reg_index + 1), dpd_status);
818                 }
819         }
820         spin_unlock(&tegra_io_dpd_lock);
821         return;
822 }
823
824 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
825 {
826         struct tegra_io_dpd *hnd = container_of(
827                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
828         tegra_io_dpd_disable(hnd);
829         hnd->need_delay_dpd = 0;
830 }
831
832 int tegra_io_dpd_init(void)
833 {
834         int i;
835         for (i = 0;
836                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
837                 i++) {
838                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
839                                 tegra_io_dpd_delayed_disable);
840                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
841                         tegra_list_io_dpd[i].need_delay_dpd = 0;
842         }
843         return 0;
844 }
845
846 #else
847
848 int tegra_io_dpd_init(void)
849 {
850         return 0;
851 }
852
853 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
854 {
855 }
856
857 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
858 {
859 }
860
861 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
862 {
863         return NULL;
864 }
865
866 #endif
867
868 EXPORT_SYMBOL(tegra_io_dpd_get);
869 EXPORT_SYMBOL(tegra_io_dpd_enable);
870 EXPORT_SYMBOL(tegra_io_dpd_disable);
871 EXPORT_SYMBOL(tegra_io_dpd_init);
872
873 struct io_dpd_reg_info {
874         u32 req_reg_off;
875         u8 dpd_code_lsb;
876 };
877
878 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
879         {0x1b8, 30},
880         {0x1c0, 30},
881 };
882
883 /* io dpd off request code */
884 #define IO_DPD_CODE_OFF         1
885
886 /* cleans io dpd settings from bootloader during kernel init */
887 void tegra_bl_io_dpd_cleanup()
888 {
889         int i;
890         unsigned int dpd_mask;
891         unsigned int dpd_status;
892
893         pr_info("Clear bootloader IO dpd settings\n");
894         /* clear all dpd requests from bootloader */
895         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
896                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
897                 dpd_mask |= (IO_DPD_CODE_OFF <<
898                         t3_io_dpd_req_regs[i].dpd_code_lsb);
899                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
900                 /* dpd status register is next to req reg in tegra3 */
901                 dpd_status = readl(pmc +
902                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
903         }
904         return;
905 }
906 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
907