9f82908f433b48deee58e25b390f0e860a955ada
[linux-3.10.git] / arch / arm / mach-tegra / pm.c
1 /*
2  * arch/arm/mach-tegra/pm.c
3  *
4  * CPU complex suspend & resume functions for Tegra SoCs
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
26 #include <linux/io.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/clk.h>
32 #include <linux/err.h>
33 #include <linux/debugfs.h>
34 #include <linux/delay.h>
35 #include <linux/suspend.h>
36 #include <linux/slab.h>
37 #include <linux/serial_reg.h>
38 #include <linux/seq_file.h>
39 #include <linux/uaccess.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/clk/tegra.h>
43 #include <linux/export.h>
44 #include <linux/vmalloc.h>
45 #include <linux/memblock.h>
46 #include <linux/console.h>
47 #include <linux/tegra_audio.h>
48
49 #include <trace/events/power.h>
50
51 #include <asm/cacheflush.h>
52 #include <asm/idmap.h>
53 #include <asm/localtimer.h>
54 #include <asm/pgalloc.h>
55 #include <asm/pgtable.h>
56 #include <asm/tlbflush.h>
57 #include <asm/suspend.h>
58 #include <asm/smp_plat.h>
59
60 #include <mach/irqs.h>
61 #include <mach/powergate.h>
62 #include <mach/hardware.h>
63
64 #include "board.h"
65 #include "clock.h"
66 #include "common.h"
67 #include "cpuidle.h"
68 #include "fuse.h"
69 #include "gic.h"
70 #include "iomap.h"
71 #include "pm.h"
72 #include "pm-irq.h"
73 #include "reset.h"
74 #include "sleep.h"
75 #include "timer.h"
76 #include "dvfs.h"
77 #include "cpu-tegra.h"
78
79 struct suspend_context {
80         /*
81          * The next 7 values are referenced by offset in __restart_plls
82          * in headsmp-t2.S, and should not be moved
83          */
84         u32 pllx_misc;
85         u32 pllx_base;
86         u32 pllp_misc;
87         u32 pllp_base;
88         u32 pllp_outa;
89         u32 pllp_outb;
90         u32 pll_timeout;
91
92         u32 cpu_burst;
93         u32 clk_csite_src;
94         u32 cclk_divider;
95
96         u32 mc[3];
97         u8 uart[5];
98
99         struct tegra_twd_context twd;
100 #ifdef CONFIG_ARM_ARCH_TIMER
101         struct arch_timer_context arch_timer;
102 #endif
103 };
104
105 #ifdef CONFIG_PM_SLEEP
106 #ifdef CONFIG_TRUSTED_FOUNDATIONS
107 void *tegra_cpu_context;        /* non-cacheable page for CPU context */
108 #endif
109 phys_addr_t tegra_pgd_phys;     /* pgd used by hotplug & LP2 bootup */
110 static pgd_t *tegra_pgd;
111 static DEFINE_SPINLOCK(tegra_lp2_lock);
112 static cpumask_t tegra_in_lp2;
113 static cpumask_t *iram_cpu_lp2_mask;
114 static unsigned long *iram_cpu_lp1_mask;
115 static u8 *iram_save;
116 static unsigned long iram_save_size;
117 static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
118 static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
119 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
120 static int tegra_last_pclk;
121 #endif
122
123 struct suspend_context tegra_sctx;
124
125 #define TEGRA_POWER_PWRREQ_POLARITY     (1 << 8)   /* core power request polarity */
126 #define TEGRA_POWER_PWRREQ_OE           (1 << 9)   /* core power request enable */
127 #define TEGRA_POWER_SYSCLK_POLARITY     (1 << 10)  /* sys clk polarity */
128 #define TEGRA_POWER_SYSCLK_OE           (1 << 11)  /* system clock enable */
129 #define TEGRA_POWER_PWRGATE_DIS         (1 << 12)  /* power gate disabled */
130 #define TEGRA_POWER_EFFECT_LP0          (1 << 14)  /* enter LP0 when CPU pwr gated */
131 #define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15)  /* CPU power request polarity */
132 #define TEGRA_POWER_CPU_PWRREQ_OE       (1 << 16)  /* CPU power request enable */
133
134 #define PMC_CTRL                0x0
135 #define PMC_CTRL_LATCH_WAKEUPS  (1 << 5)
136 #define PMC_WAKE_MASK           0xc
137 #define PMC_WAKE_LEVEL          0x10
138 #define PMC_DPAD_ORIDE          0x1C
139 #define PMC_WAKE_DELAY          0xe0
140 #define PMC_DPD_SAMPLE          0x20
141 #define PMC_IO_DPD_REQ_0        0x1b8
142 #define PMC_IO_DPD2_REQ_0       0X1C0
143
144 #define PMC_WAKE_STATUS         0x14
145 #define PMC_SW_WAKE_STATUS      0x18
146 #define PMC_COREPWRGOOD_TIMER   0x3c
147 #define PMC_CPUPWRGOOD_TIMER    0xc8
148 #define PMC_CPUPWROFF_TIMER     0xcc
149 #define PMC_COREPWROFF_TIMER    PMC_WAKE_DELAY
150
151 #define PMC_PWRGATE_TOGGLE      0x30
152 #define PWRGATE_TOGGLE_START    (1 << 8)
153 #define UN_PWRGATE_CPU          \
154         (PWRGATE_TOGGLE_START | TEGRA_CPU_POWERGATE_ID(TEGRA_POWERGATE_CPU))
155
156 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
157 #define PMC_SCRATCH4_WAKE_CLUSTER_MASK  (1<<31)
158 #endif
159
160 #define CLK_RESET_CCLK_BURST    0x20
161 #define CLK_RESET_CCLK_DIVIDER  0x24
162 #define CLK_RESET_PLLC_BASE     0x80
163 #define CLK_RESET_PLLM_BASE     0x90
164 #define CLK_RESET_PLLX_BASE     0xe0
165 #define CLK_RESET_PLLX_MISC     0xe4
166 #define CLK_RESET_PLLP_BASE     0xa0
167 #define CLK_RESET_PLLP_OUTA     0xa4
168 #define CLK_RESET_PLLP_OUTB     0xa8
169 #define CLK_RESET_PLLP_MISC     0xac
170
171 #define CLK_RESET_SOURCE_CSITE  0x1d4
172
173 #define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
174 #define CLK_RESET_CCLK_RUN_POLICY_SHIFT    4
175 #define CLK_RESET_CCLK_IDLE_POLICY_SHIFT   0
176 #define CLK_RESET_CCLK_IDLE_POLICY         1
177 #define CLK_RESET_CCLK_RUN_POLICY          2
178 #define CLK_RESET_CCLK_BURST_POLICY_PLLM   3
179 #define CLK_RESET_CCLK_BURST_POLICY_PLLX   8
180
181 #define EMC_MRW_0               0x0e8
182 #define EMC_MRW_DEV_SELECTN     30
183 #define EMC_MRW_DEV_NONE        (3 << EMC_MRW_DEV_SELECTN)
184
185 #define MC_SECURITY_START       0x6c
186 #define MC_SECURITY_SIZE        0x70
187 #define MC_SECURITY_CFG2        0x7c
188
189 struct dvfs_rail *tegra_cpu_rail;
190 static struct dvfs_rail *tegra_core_rail;
191 static struct clk *tegra_pclk;
192 static const struct tegra_suspend_platform_data *pdata;
193 static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
194
195 #if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
196 enum tegra_cluster_switch_time_id {
197         tegra_cluster_switch_time_id_start = 0,
198         tegra_cluster_switch_time_id_prolog,
199         tegra_cluster_switch_time_id_switch,
200         tegra_cluster_switch_time_id_epilog,
201         tegra_cluster_switch_time_id_max
202 };
203
204 static unsigned long
205                 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
206 #define tegra_cluster_switch_time(flags, id) \
207         do { \
208                 barrier(); \
209                 if (flags & TEGRA_POWER_CLUSTER_MASK) { \
210                         void __iomem *timer_us = \
211                                                 IO_ADDRESS(TEGRA_TMRUS_BASE); \
212                         if (id < tegra_cluster_switch_time_id_max) \
213                                 tegra_cluster_switch_times[id] = \
214                                                         readl(timer_us); \
215                                 wmb(); \
216                 } \
217                 barrier(); \
218         } while(0)
219 #else
220 #define tegra_cluster_switch_time(flags, id) do {} while(0)
221 #endif
222
223 #ifdef CONFIG_PM_SLEEP
224 static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
225         [TEGRA_SUSPEND_NONE]    = "none",
226         [TEGRA_SUSPEND_LP2]     = "lp2",
227         [TEGRA_SUSPEND_LP1]     = "lp1",
228         [TEGRA_SUSPEND_LP0]     = "lp0",
229 };
230
231 unsigned long tegra_cpu_power_good_time(void)
232 {
233         if (WARN_ON_ONCE(!pdata))
234                 return 5000;
235
236         return pdata->cpu_timer;
237 }
238
239 unsigned long tegra_cpu_power_off_time(void)
240 {
241         if (WARN_ON_ONCE(!pdata))
242                 return 5000;
243
244         return pdata->cpu_off_timer;
245 }
246
247 unsigned long tegra_cpu_lp2_min_residency(void)
248 {
249         if (WARN_ON_ONCE(!pdata))
250                 return 2000;
251
252         return pdata->cpu_lp2_min_residency;
253 }
254
255 /*
256  * create_suspend_pgtable
257  *
258  * Creates a page table with identity mappings of physical memory and IRAM
259  * for use when the MMU is off, in addition to all the regular kernel mappings.
260  */
261 static __init int create_suspend_pgtable(void)
262 {
263         tegra_pgd = pgd_alloc(&init_mm);
264         if (!tegra_pgd)
265                 return -ENOMEM;
266
267         /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
268         identity_mapping_add(tegra_pgd, phys_to_virt(PHYS_OFFSET),
269                 high_memory, 0);
270         identity_mapping_add(tegra_pgd, IO_IRAM_VIRT,
271                 IO_IRAM_VIRT + SECTION_SIZE, 0);
272
273         /* inner/outer write-back/write-allocate, sharable */
274         tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
275
276         return 0;
277 }
278
279 /* ensures that sufficient time is passed for a register write to
280  * serialize into the 32KHz domain */
281 static void pmc_32kwritel(u32 val, unsigned long offs)
282 {
283         writel(val, pmc + offs);
284         udelay(130);
285 }
286
287 static void set_power_timers(unsigned long us_on, unsigned long us_off,
288                              long rate)
289 {
290         static unsigned long last_us_off = 0;
291         unsigned long long ticks;
292         unsigned long long pclk;
293
294         if (WARN_ON_ONCE(rate <= 0))
295                 pclk = 100000000;
296         else
297                 pclk = rate;
298
299         if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
300                 ticks = (us_on * pclk) + 999999ull;
301                 do_div(ticks, 1000000);
302                 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
303
304                 ticks = (us_off * pclk) + 999999ull;
305                 do_div(ticks, 1000000);
306                 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
307                 wmb();
308         }
309         tegra_last_pclk = pclk;
310         last_us_off = us_off;
311 }
312
313 /*
314  * restore_cpu_complex
315  *
316  * restores cpu clock setting, clears flow controller
317  *
318  * Always called on CPU 0.
319  */
320 static void restore_cpu_complex(u32 mode)
321 {
322         int cpu = smp_processor_id();
323         unsigned int reg, policy;
324
325         BUG_ON(cpu != 0);
326
327 #ifdef CONFIG_SMP
328         cpu = cpu_logical_map(cpu);
329 #endif
330         /* Is CPU complex already running on PLLX? */
331         reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
332         policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
333
334         if (policy == CLK_RESET_CCLK_IDLE_POLICY)
335                 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
336         else if (policy == CLK_RESET_CCLK_RUN_POLICY)
337                 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
338         else
339                 BUG();
340
341         if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
342                 /* restore PLLX settings if CPU is on different PLL */
343                 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
344                 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
345
346                 /* wait for PLL stabilization if PLLX was enabled */
347                 if (tegra_sctx.pllx_base & (1<<30)) {
348 #if USE_PLL_LOCK_BITS
349                         /* Enable lock detector */
350                         reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
351                         reg |= 1<<18;
352                         writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
353                         while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
354                                  (1<<27)))
355                                 cpu_relax();
356
357                         udelay(PLL_POST_LOCK_DELAY);
358 #else
359                         udelay(300);
360 #endif
361                 }
362         }
363
364         /* Restore original burst policy setting for calls resulting from CPU
365            LP2 in idle or system suspend; keep cluster switch prolog setting
366            intact. */
367         if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
368                 writel(tegra_sctx.cclk_divider, clk_rst +
369                        CLK_RESET_CCLK_DIVIDER);
370                 writel(tegra_sctx.cpu_burst, clk_rst +
371                        CLK_RESET_CCLK_BURST);
372         }
373
374         writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
375
376         /* Do not power-gate CPU 0 when flow controlled */
377         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
378         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
379         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
380         reg &= ~FLOW_CTRL_CSR_ENABLE;           /* clear enable */
381         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr */
382         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event */
383         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
384
385         /* If an immedidate cluster switch is being perfomed, restore the
386            local timer registers. For calls resulting from CPU LP2 in
387            idle or system suspend, the local timer was shut down and
388            timekeeping switched over to the global system timer. In this
389            case keep local timer disabled, and restore only periodic load. */
390         if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
391                       TEGRA_POWER_CLUSTER_IMMEDIATE))) {
392 #ifdef CONFIG_ARM_ARCH_TIMER
393                 tegra_sctx.arch_timer.cntp_ctl = 0;
394 #endif
395 #ifdef CONFIG_HAVE_ARM_TWD
396                 tegra_sctx.twd.twd_ctrl = 0;
397 #endif
398         }
399 #ifdef CONFIG_ARM_ARCH_TIMER
400         arch_timer_resume(&tegra_sctx.arch_timer);
401 #endif
402 #ifdef CONFIG_HAVE_ARM_TWD
403         tegra_twd_resume(&tegra_sctx.twd);
404 #endif
405 }
406
407 /*
408  * suspend_cpu_complex
409  *
410  * saves pll state for use by restart_plls, prepares flow controller for
411  * transition to suspend state
412  *
413  * Must always be called on cpu 0.
414  */
415 static void suspend_cpu_complex(u32 mode)
416 {
417         int cpu = smp_processor_id();
418         unsigned int reg;
419         int i;
420
421         BUG_ON(cpu != 0);
422
423 #ifdef CONFIG_SMP
424         cpu = cpu_logical_map(cpu);
425 #endif
426         /* switch coresite to clk_m, save off original source */
427         tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
428         writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
429
430         tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
431         tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
432         tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
433         tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
434         tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
435         tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
436         tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
437         tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
438
439 #ifdef CONFIG_HAVE_ARM_TWD
440         tegra_twd_suspend(&tegra_sctx.twd);
441 #endif
442 #ifdef CONFIG_ARM_ARCH_TIMER
443         arch_timer_suspend(&tegra_sctx.arch_timer);
444 #endif
445
446         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
447         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
448         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
449         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr flag */
450         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event flag */
451 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
452         reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu;   /* enable power gating on wfe */
453 #else
454         reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu;   /* enable power gating on wfi */
455 #endif
456         reg |= FLOW_CTRL_CSR_ENABLE;            /* enable power gating */
457         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
458
459         for (i = 0; i < num_possible_cpus(); i++) {
460                 if (i == cpu)
461                         continue;
462                 reg = readl(FLOW_CTRL_CPU_CSR(i));
463                 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
464                 reg |= FLOW_CTRL_CSR_INTR_FLAG;
465                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
466         }
467
468         tegra_gic_cpu_disable(true);
469 }
470
471 void tegra_clear_cpu_in_lp2(int cpu)
472 {
473         spin_lock(&tegra_lp2_lock);
474         BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
475         cpumask_clear_cpu(cpu, &tegra_in_lp2);
476
477         /* Update the IRAM copy used by the reset handler. The IRAM copy
478            can't use used directly by cpumask_clear_cpu() because it uses
479            LDREX/STREX which requires the addressed location to be inner
480            cacheable and sharable which IRAM isn't. */
481         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
482         dsb();
483
484         spin_unlock(&tegra_lp2_lock);
485 }
486
487 bool tegra_set_cpu_in_lp2(int cpu)
488 {
489         bool last_cpu = false;
490
491         spin_lock(&tegra_lp2_lock);
492         BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
493         cpumask_set_cpu(cpu, &tegra_in_lp2);
494
495         /* Update the IRAM copy used by the reset handler. The IRAM copy
496            can't use used directly by cpumask_set_cpu() because it uses
497            LDREX/STREX which requires the addressed location to be inner
498            cacheable and sharable which IRAM isn't. */
499         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
500         dsb();
501
502         if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
503                 last_cpu = true;
504 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
505         else if (cpu == 1)
506                 tegra2_cpu_set_resettable_soon();
507 #endif
508
509         spin_unlock(&tegra_lp2_lock);
510         return last_cpu;
511 }
512
513 bool tegra_is_cpu_in_lp2(int cpu)
514 {
515         bool in_lp2;
516
517         spin_lock(&tegra_lp2_lock);
518         in_lp2 = cpumask_test_cpu(cpu, &tegra_in_lp2);
519         spin_unlock(&tegra_lp2_lock);
520         return in_lp2;
521 }
522
523 static void tegra_sleep_core(enum tegra_suspend_mode mode,
524                              unsigned long v2p)
525 {
526 #ifdef CONFIG_TRUSTED_FOUNDATIONS
527         if (mode == TEGRA_SUSPEND_LP0) {
528                 tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE3,
529                                            virt_to_phys(tegra_resume));
530         } else {
531                 tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE6,
532                                            (TEGRA_RESET_HANDLER_BASE +
533                                             tegra_cpu_reset_handler_offset));
534         }
535 #endif
536 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
537         cpu_suspend(v2p, tegra2_sleep_core_finish);
538 #else
539         cpu_suspend(v2p, tegra3_sleep_core_finish);
540 #endif
541 }
542
543 static inline void tegra_sleep_cpu(unsigned long v2p)
544 {
545 #ifdef CONFIG_TRUSTED_FOUNDATIONS
546         tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE4,
547                                    (TEGRA_RESET_HANDLER_BASE +
548                                     tegra_cpu_reset_handler_offset));
549 #endif
550         cpu_suspend(v2p, tegra_sleep_cpu_finish);
551 }
552
553 unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
554 {
555         u32 reg;
556         unsigned int remain;
557         pgd_t *pgd;
558
559         /* Only the last cpu down does the final suspend steps */
560         reg = readl(pmc + PMC_CTRL);
561         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
562         if (pdata->combined_req)
563                 reg &= ~TEGRA_POWER_PWRREQ_OE;
564         else
565                 reg |= TEGRA_POWER_PWRREQ_OE;
566
567         reg &= ~TEGRA_POWER_EFFECT_LP0;
568         writel(reg, pmc + PMC_CTRL);
569
570         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
571
572         /*
573          * We can use clk_get_rate_all_locked() here, because all other cpus
574          * are in LP2 state and irqs are disabled
575          */
576         if (flags & TEGRA_POWER_CLUSTER_MASK) {
577                 trace_cpu_cluster(POWER_CPU_CLUSTER_START);
578                 set_power_timers(pdata->cpu_timer, 0,
579                         clk_get_rate_all_locked(tegra_pclk));
580                 if (flags & TEGRA_POWER_CLUSTER_G) {
581                         /*
582                          * To reduce the vdd_cpu up latency when LP->G
583                          * transition. Before the transition, enable
584                          * the vdd_cpu rail.
585                          */
586                         if (is_lp_cluster())
587                                 writel(UN_PWRGATE_CPU,
588                                        pmc + PMC_PWRGATE_TOGGLE);
589                 }
590                 tegra_cluster_switch_prolog(flags);
591         } else {
592                 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
593                         clk_get_rate_all_locked(tegra_pclk));
594 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
595                 reg = readl(FLOW_CTRL_CPU_CSR(0));
596                 reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
597                 if (is_lp_cluster()) {
598                         /* for LP cluster, there is no option for rail gating */
599                         if ((flags & TEGRA_POWER_CLUSTER_PART_MASK) ==
600                                                 TEGRA_POWER_CLUSTER_PART_MASK)
601                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_EMU;
602                         else if (flags)
603                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
604                 }
605                 else {
606                         if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
607                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
608                         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
609                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
610                 }
611                 writel(reg, FLOW_CTRL_CPU_CSR(0));
612 #endif
613         }
614
615         if (sleep_time)
616                 tegra_lp2_set_trigger(sleep_time);
617
618         cpu_cluster_pm_enter();
619         suspend_cpu_complex(flags);
620         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
621         flush_cache_all();
622         /*
623          * No need to flush complete L2. Cleaning kernel and IO mappings
624          * is enough for the LP code sequence that has L2 disabled but
625          * MMU on.
626          */
627         pgd = cpu_get_pgd();
628         outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
629                           __pa(pgd + PTRS_PER_PGD));
630         outer_disable();
631
632         tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
633
634         tegra_init_cache(false);
635         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
636         restore_cpu_complex(flags);
637         cpu_cluster_pm_exit();
638
639         remain = tegra_lp2_timer_remain();
640         if (sleep_time)
641                 tegra_lp2_set_trigger(0);
642
643         if (flags & TEGRA_POWER_CLUSTER_MASK) {
644                 tegra_cluster_switch_epilog(flags);
645                 trace_cpu_cluster(POWER_CPU_CLUSTER_DONE);
646         }
647         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
648
649 #if INSTRUMENT_CLUSTER_SWITCH
650         if (flags & TEGRA_POWER_CLUSTER_MASK) {
651                 pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
652                         is_lp_cluster() ? "G=>LP" : "LP=>G",
653                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
654                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
655                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
656                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
657                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
658                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
659                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
660                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
661         }
662 #endif
663         return remain;
664 }
665
666 /*
667  * alloc_suspend_context
668  *
669  * Allocate a non-cacheable page to hold the CPU contexts.
670  */
671 static int alloc_suspend_context(void)
672 {
673 #if CONFIG_TRUSTED_FOUNDATIONS
674         pgprot_t prot = __pgprot_modify(pgprot_kernel, L_PTE_MT_MASK,
675                 L_PTE_MT_BUFFERABLE | L_PTE_XN);
676         struct page *ctx_page;
677
678         ctx_page = alloc_pages(GFP_KERNEL, 0);
679         if (IS_ERR_OR_NULL(ctx_page))
680                 goto fail;
681
682         tegra_cpu_context = vm_map_ram(&ctx_page, 1, -1, prot);
683         if (IS_ERR_OR_NULL(tegra_cpu_context))
684                 goto fail;
685
686         return 0;
687
688 fail:
689         if (ctx_page)
690                 __free_page(ctx_page);
691         if (tegra_cpu_context)
692                 vm_unmap_ram((void*)tegra_cpu_context, 1);
693         tegra_cpu_context = NULL;
694
695         return -ENOMEM;
696 #else
697         return 0;
698 #endif
699 }
700
701 static int tegra_common_suspend(void)
702 {
703         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
704
705         tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
706         tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
707         tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
708
709         /* copy the reset vector and SDRAM shutdown code into IRAM */
710         memcpy(iram_save, iram_code, iram_save_size);
711         memcpy(iram_code, tegra_iram_start(), iram_save_size);
712
713         return 0;
714 }
715
716 static void tegra_common_resume(void)
717 {
718         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
719 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
720         void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
721 #endif
722
723         /* Clear DPD sample */
724         writel(0x0, pmc + PMC_DPD_SAMPLE);
725
726         writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
727         writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
728         writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
729 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
730         /* trigger emc mode write */
731         writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
732 #endif
733         /* clear scratch registers shared by suspend and the reset pen */
734         writel(0x0, pmc + PMC_SCRATCH39);
735         writel(0x0, pmc + PMC_SCRATCH41);
736
737         /* restore IRAM */
738         memcpy(iram_code, iram_save, iram_save_size);
739 }
740
741 static int tegra_suspend_prepare_late(void)
742 {
743 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
744         disable_irq(INT_SYS_STATS_MON);
745 #endif
746         return 0;
747 }
748
749 static void tegra_suspend_wake(void)
750 {
751 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
752         enable_irq(INT_SYS_STATS_MON);
753 #endif
754 }
755
756 static void tegra_pm_set(enum tegra_suspend_mode mode)
757 {
758         u32 reg, boot_flag;
759         unsigned long rate = 32768;
760
761         reg = readl(pmc + PMC_CTRL);
762         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
763         if (pdata->combined_req)
764                 reg &= ~TEGRA_POWER_PWRREQ_OE;
765         else
766                 reg |= TEGRA_POWER_PWRREQ_OE;
767         reg &= ~TEGRA_POWER_EFFECT_LP0;
768
769         switch (mode) {
770         case TEGRA_SUSPEND_LP0:
771 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
772                 rate = clk_get_rate_all_locked(tegra_pclk);
773 #endif
774                 if (pdata->combined_req) {
775                         reg |= TEGRA_POWER_PWRREQ_OE;
776                         reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
777                 }
778
779                 /*
780                  * LP0 boots through the AVP, which then resumes the AVP to
781                  * the address in scratch 39, and the cpu to the address in
782                  * scratch 41 to tegra_resume
783                  */
784                 writel(0x0, pmc + PMC_SCRATCH39);
785
786                 /* Enable DPD sample to trigger sampling pads data and direction
787                  * in which pad will be driven during lp0 mode*/
788                 writel(0x1, pmc + PMC_DPD_SAMPLE);
789 #if !defined(CONFIG_ARCH_TEGRA_3x_SOC) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
790                 writel(0x800fffff, pmc + PMC_IO_DPD_REQ_0);
791                 writel(0x80001fff, pmc + PMC_IO_DPD2_REQ_0);
792 #endif
793                 /* Set warmboot flag */
794                 boot_flag = readl(pmc + PMC_SCRATCH0);
795                 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
796
797                 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
798
799                 reg |= TEGRA_POWER_EFFECT_LP0;
800                 /* No break here. LP0 code falls through to write SCRATCH41 */
801         case TEGRA_SUSPEND_LP1:
802                 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
803                 wmb();
804                 break;
805         case TEGRA_SUSPEND_LP2:
806                 rate = clk_get_rate(tegra_pclk);
807                 break;
808         case TEGRA_SUSPEND_NONE:
809                 return;
810         default:
811                 BUG();
812         }
813
814         set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
815
816         pmc_32kwritel(reg, PMC_CTRL);
817 }
818
819 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
820         [TEGRA_SUSPEND_NONE] = "none",
821         [TEGRA_SUSPEND_LP2] = "LP2",
822         [TEGRA_SUSPEND_LP1] = "LP1",
823         [TEGRA_SUSPEND_LP0] = "LP0",
824 };
825
826 static int tegra_suspend_enter(suspend_state_t state)
827 {
828         int ret;
829         ktime_t delta;
830         struct timespec ts_entry, ts_exit;
831
832         if (pdata && pdata->board_suspend)
833                 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
834
835         read_persistent_clock(&ts_entry);
836
837         ret = tegra_suspend_dram(current_suspend_mode, 0);
838         if (ret) {
839                 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
840                 goto abort_suspend;
841         }
842
843         read_persistent_clock(&ts_exit);
844
845         if (timespec_compare(&ts_exit, &ts_entry) > 0) {
846                 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
847
848                 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
849                 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
850                         tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
851                 else
852                         tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
853         }
854
855 abort_suspend:
856         if (pdata && pdata->board_resume)
857                 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
858
859         return ret;
860 }
861
862 static void tegra_suspend_check_pwr_stats(void)
863 {
864         /* cpus and l2 are powered off later */
865         unsigned long pwrgate_partid_mask =
866 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
867                 (1 << TEGRA_POWERGATE_HEG)      |
868                 (1 << TEGRA_POWERGATE_SATA)     |
869                 (1 << TEGRA_POWERGATE_3D1)      |
870 #endif
871                 (1 << TEGRA_POWERGATE_3D)       |
872                 (1 << TEGRA_POWERGATE_VENC)     |
873                 (1 << TEGRA_POWERGATE_PCIE)     |
874                 (1 << TEGRA_POWERGATE_VDEC)     |
875                 (1 << TEGRA_POWERGATE_MPE);
876
877         int partid;
878
879         for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
880                 if ((1 << partid) & pwrgate_partid_mask)
881                         if (tegra_powergate_is_powered(partid))
882                                 pr_warning("partition %s is left on before suspend\n",
883                                         tegra_powergate_get_name(partid));
884
885         return;
886 }
887
888 int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
889 {
890         int err = 0;
891         u32 scratch37 = 0xDEADBEEF;
892
893         if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
894                 mode >= TEGRA_MAX_SUSPEND_MODE)) {
895                 err = -ENXIO;
896                 goto fail;
897         }
898
899         if (tegra_is_voice_call_active()) {
900                 u32 reg;
901
902                 /* backup the current value of scratch37 */
903                 scratch37 = readl(pmc + PMC_SCRATCH37);
904
905                 /* If voice call is active, set a flag in PMC_SCRATCH37 */
906                 reg = TEGRA_POWER_LP1_AUDIO;
907                 pmc_32kwritel(reg, PMC_SCRATCH37);
908         }
909
910         if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
911                 pr_info("LP0 not used due to unsupported wakeup events\n");
912                 mode = TEGRA_SUSPEND_LP1;
913         }
914
915         if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
916                 tegra_suspend_check_pwr_stats();
917
918         tegra_common_suspend();
919
920         tegra_pm_set(mode);
921
922         if (pdata && pdata->board_suspend)
923                 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
924
925         local_fiq_disable();
926
927         trace_cpu_suspend(CPU_SUSPEND_START);
928
929         if (mode == TEGRA_SUSPEND_LP0) {
930 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
931                 u32 reg = readl(pmc + PMC_SCRATCH4);
932                 if (is_lp_cluster())
933                         reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
934                 else
935                         reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
936                 pmc_32kwritel(reg, PMC_SCRATCH4);
937 #endif
938                 tegra_tsc_suspend();
939                 tegra_lp0_suspend_mc();
940                 tegra_cpu_reset_handler_save();
941                 tegra_tsc_wait_for_suspend();
942         }
943         else if (mode == TEGRA_SUSPEND_LP1)
944                 *iram_cpu_lp1_mask = 1;
945
946         suspend_cpu_complex(flags);
947
948         flush_cache_all();
949         outer_flush_all();
950         outer_disable();
951
952         if (mode == TEGRA_SUSPEND_LP2)
953                 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
954         else
955                 tegra_sleep_core(mode, PHYS_OFFSET - PAGE_OFFSET);
956
957         tegra_init_cache(true);
958
959         if (mode == TEGRA_SUSPEND_LP0) {
960                 tegra_tsc_resume();
961                 tegra_cpu_reset_handler_restore();
962                 tegra_lp0_resume_mc();
963                 tegra_tsc_wait_for_resume();
964         } else if (mode == TEGRA_SUSPEND_LP1)
965                 *iram_cpu_lp1_mask = 0;
966
967         /* if scratch37 was clobbered during LP1, restore it */
968         if (scratch37 != 0xDEADBEEF)
969                 pmc_32kwritel(scratch37, PMC_SCRATCH37);
970
971         restore_cpu_complex(flags);
972
973         /* for platforms where the core & CPU power requests are
974          * combined as a single request to the PMU, transition out
975          * of LP0 state by temporarily enabling both requests
976          */
977         if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
978                 u32 reg;
979                 reg = readl(pmc + PMC_CTRL);
980                 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
981                 pmc_32kwritel(reg, PMC_CTRL);
982                 reg &= ~TEGRA_POWER_PWRREQ_OE;
983                 pmc_32kwritel(reg, PMC_CTRL);
984         }
985
986         if (pdata && pdata->board_resume)
987                 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
988
989         trace_cpu_suspend(CPU_SUSPEND_DONE);
990
991         local_fiq_enable();
992
993         tegra_common_resume();
994
995 fail:
996         return err;
997 }
998
999 /*
1000  * Function pointers to optional board specific function
1001  */
1002 void (*tegra_deep_sleep)(int);
1003 EXPORT_SYMBOL(tegra_deep_sleep);
1004
1005 static int tegra_suspend_prepare(void)
1006 {
1007         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1008                 tegra_deep_sleep(1);
1009         return 0;
1010 }
1011
1012 static void tegra_suspend_finish(void)
1013 {
1014         if (pdata && pdata->cpu_resume_boost) {
1015                 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
1016                 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
1017                         pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
1018         }
1019
1020         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1021                 tegra_deep_sleep(0);
1022 }
1023
1024 static const struct platform_suspend_ops tegra_suspend_ops = {
1025         .valid          = suspend_valid_only_mem,
1026         .prepare        = tegra_suspend_prepare,
1027         .finish         = tegra_suspend_finish,
1028         .prepare_late   = tegra_suspend_prepare_late,
1029         .wake           = tegra_suspend_wake,
1030         .enter          = tegra_suspend_enter,
1031 };
1032
1033 static ssize_t suspend_mode_show(struct kobject *kobj,
1034                                         struct kobj_attribute *attr, char *buf)
1035 {
1036         char *start = buf;
1037         char *end = buf + PAGE_SIZE;
1038
1039         start += scnprintf(start, end - start, "%s ", \
1040                                 tegra_suspend_name[current_suspend_mode]);
1041         start += scnprintf(start, end - start, "\n");
1042
1043         return start - buf;
1044 }
1045
1046 static ssize_t suspend_mode_store(struct kobject *kobj,
1047                                         struct kobj_attribute *attr,
1048                                         const char *buf, size_t n)
1049 {
1050         int len;
1051         const char *name_ptr;
1052         enum tegra_suspend_mode new_mode;
1053
1054         name_ptr = buf;
1055         while (*name_ptr && !isspace(*name_ptr))
1056                 name_ptr++;
1057         len = name_ptr - buf;
1058         if (!len)
1059                 goto bad_name;
1060         /* TEGRA_SUSPEND_NONE not allowed as suspend state */
1061         if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
1062                 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
1063                 pr_info("Illegal tegra suspend state: %s\n", buf);
1064                 goto bad_name;
1065         }
1066
1067         for (new_mode = TEGRA_SUSPEND_NONE; \
1068                         new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
1069                 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
1070                         current_suspend_mode = new_mode;
1071                         break;
1072                 }
1073         }
1074
1075 bad_name:
1076         return n;
1077 }
1078
1079 static struct kobj_attribute suspend_mode_attribute =
1080         __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
1081
1082 static struct kobject *suspend_kobj;
1083
1084 static int tegra_pm_enter_suspend(void)
1085 {
1086         pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1087         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1088                 tegra_lp0_cpu_mode(true);
1089         return 0;
1090 }
1091
1092 static void tegra_pm_enter_resume(void)
1093 {
1094         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1095                 tegra_lp0_cpu_mode(false);
1096         pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1097 }
1098
1099 static struct syscore_ops tegra_pm_enter_syscore_ops = {
1100         .suspend = tegra_pm_enter_suspend,
1101         .resume = tegra_pm_enter_resume,
1102 };
1103
1104 static __init int tegra_pm_enter_syscore_init(void)
1105 {
1106         register_syscore_ops(&tegra_pm_enter_syscore_ops);
1107         return 0;
1108 }
1109 subsys_initcall(tegra_pm_enter_syscore_init);
1110 #endif
1111
1112 void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1113 {
1114         u32 reg;
1115         u32 mode;
1116
1117         tegra_cpu_rail = tegra_dvfs_get_rail_by_name("vdd_cpu");
1118         tegra_core_rail = tegra_dvfs_get_rail_by_name("vdd_core");
1119         tegra_pclk = clk_get_sys(NULL, "pclk");
1120         BUG_ON(IS_ERR(tegra_pclk));
1121         pdata = plat;
1122         (void)reg;
1123         (void)mode;
1124
1125         if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1126                 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1127
1128 #ifndef CONFIG_PM_SLEEP
1129         if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1130                 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1131                            "disabling suspend\n", __func__);
1132                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1133         }
1134 #else
1135         if (create_suspend_pgtable() < 0) {
1136                 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1137                                 __func__);
1138                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1139                 goto fail;
1140         }
1141
1142         if (alloc_suspend_context() < 0) {
1143                 pr_err("%s: CPU context alloc failed -- LP0/LP1/LP2 unavailable\n",
1144                         __func__);
1145                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1146                 goto fail;
1147         }
1148
1149         if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1150             (tegra_revision == TEGRA_REVISION_A01) &&
1151             (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1152                 /* Tegra 3 A01 supports only LP1 */
1153                 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1154                            "-- disabling LP0\n", __func__);
1155                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1156         }
1157         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1158                 tegra_lp0_vec_relocate) {
1159                 unsigned char *reloc_lp0;
1160                 unsigned long tmp;
1161                 void __iomem *orig;
1162                 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1163                                         GFP_KERNEL);
1164                 WARN_ON(!reloc_lp0);
1165                 if (!reloc_lp0) {
1166                         pr_err("%s: Failed to allocate reloc_lp0\n",
1167                                 __func__);
1168                         goto out;
1169                 }
1170
1171                 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1172                 WARN_ON(!orig);
1173                 if (!orig) {
1174                         pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1175                                 __func__, tegra_lp0_vec_start);
1176                         kfree(reloc_lp0);
1177                         goto out;
1178                 }
1179
1180                 tmp = (unsigned long) reloc_lp0;
1181                 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1182                 reloc_lp0 = (unsigned char *)tmp;
1183                 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1184                 iounmap(orig);
1185                 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1186         }
1187
1188 out:
1189         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1190                 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1191                            "provided by bootlader -- disabling LP0\n",
1192                            __func__);
1193                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1194         }
1195
1196         iram_save_size = tegra_iram_end() - tegra_iram_start();
1197
1198         iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1199         if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1200                 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1201                        "-- LP0/LP1 unavailable\n", __func__);
1202                 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1203         }
1204
1205         /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1206         /* Initialize scratch registers used for CPU LP2 synchronization */
1207         writel(0, pmc + PMC_SCRATCH37);
1208         writel(0, pmc + PMC_SCRATCH38);
1209         writel(0, pmc + PMC_SCRATCH39);
1210         writel(0, pmc + PMC_SCRATCH41);
1211
1212         /* Always enable CPU power request; just normal polarity is supported */
1213         reg = readl(pmc + PMC_CTRL);
1214         BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1215         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1216         pmc_32kwritel(reg, PMC_CTRL);
1217
1218         /* Configure core power request and system clock control if LP0
1219            is supported */
1220         __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1221         __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1222
1223         reg = readl(pmc + PMC_CTRL);
1224
1225         if (!pdata->sysclkreq_high)
1226                 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1227         else
1228                 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1229
1230         if (!pdata->corereq_high)
1231                 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1232         else
1233                 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1234
1235         /* configure output inverters while the request is tristated */
1236         pmc_32kwritel(reg, PMC_CTRL);
1237
1238         /* now enable requests */
1239         reg |= TEGRA_POWER_SYSCLK_OE;
1240         if (!pdata->combined_req)
1241                 reg |= TEGRA_POWER_PWRREQ_OE;
1242         pmc_32kwritel(reg, PMC_CTRL);
1243
1244         if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1245                 tegra_lp0_suspend_init();
1246
1247         suspend_set_ops(&tegra_suspend_ops);
1248
1249         /* Create /sys/power/suspend/type */
1250         suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1251         if (suspend_kobj) {
1252                 if (sysfs_create_file(suspend_kobj, \
1253                                                 &suspend_mode_attribute.attr))
1254                         pr_err("%s: sysfs_create_file suspend type failed!\n",
1255                                                                 __func__);
1256         }
1257
1258         iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1259         iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1260
1261         /* clear io dpd settings before kernel */
1262         tegra_bl_io_dpd_cleanup();
1263
1264 fail:
1265 #endif
1266         if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1267                 tegra_lp2_in_idle(false);
1268
1269         current_suspend_mode = plat->suspend_mode;
1270 }
1271
1272 unsigned long debug_uart_port_base = 0;
1273 EXPORT_SYMBOL(debug_uart_port_base);
1274
1275 static int tegra_debug_uart_suspend(void)
1276 {
1277         void __iomem *uart;
1278         u32 lcr;
1279
1280         if (!debug_uart_port_base)
1281                 return 0;
1282
1283         uart = IO_ADDRESS(debug_uart_port_base);
1284
1285         lcr = readb(uart + UART_LCR * 4);
1286
1287         tegra_sctx.uart[0] = lcr;
1288         tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1289
1290         /* DLAB = 0 */
1291         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1292
1293         tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1294
1295         /* DLAB = 1 */
1296         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1297
1298         tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1299         tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1300
1301         writeb(lcr, uart + UART_LCR * 4);
1302
1303         return 0;
1304 }
1305
1306 static void tegra_debug_uart_resume(void)
1307 {
1308         void __iomem *uart;
1309         u32 lcr;
1310
1311         if (!debug_uart_port_base)
1312                 return;
1313
1314         uart = IO_ADDRESS(debug_uart_port_base);
1315
1316         lcr = tegra_sctx.uart[0];
1317
1318         writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1319
1320         /* DLAB = 0 */
1321         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1322
1323         writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1324                         uart + UART_FCR * 4);
1325
1326         writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1327
1328         /* DLAB = 1 */
1329         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1330
1331         writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1332         writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1333
1334         writeb(lcr, uart + UART_LCR * 4);
1335 }
1336
1337 static struct syscore_ops tegra_debug_uart_syscore_ops = {
1338         .suspend = tegra_debug_uart_suspend,
1339         .resume = tegra_debug_uart_resume,
1340 };
1341
1342 struct clk *debug_uart_clk = NULL;
1343 EXPORT_SYMBOL(debug_uart_clk);
1344
1345 void tegra_console_uart_suspend(void)
1346 {
1347         if (console_suspend_enabled && debug_uart_clk)
1348                 clk_disable(debug_uart_clk);
1349 }
1350
1351 void tegra_console_uart_resume(void)
1352 {
1353         if (console_suspend_enabled && debug_uart_clk)
1354                 clk_enable(debug_uart_clk);
1355 }
1356
1357 static int tegra_debug_uart_syscore_init(void)
1358 {
1359         register_syscore_ops(&tegra_debug_uart_syscore_ops);
1360         return 0;
1361 }
1362 arch_initcall(tegra_debug_uart_syscore_init);