3752aad288d38f20d2df223478a3082434f028af
[linux-3.10.git] / arch / arm / mach-tegra / pm.c
1 /*
2  * arch/arm/mach-tegra/pm.c
3  *
4  * CPU complex suspend & resume functions for Tegra SoCs
5  *
6  * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
26 #include <linux/io.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/clk.h>
32 #include <linux/err.h>
33 #include <linux/debugfs.h>
34 #include <linux/delay.h>
35 #include <linux/suspend.h>
36 #include <linux/slab.h>
37 #include <linux/serial_reg.h>
38 #include <linux/seq_file.h>
39 #include <linux/uaccess.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/clk/tegra.h>
43 #include <linux/export.h>
44 #include <linux/vmalloc.h>
45 #include <linux/memblock.h>
46 #include <linux/console.h>
47 #include <linux/tegra_audio.h>
48
49 #include <trace/events/power.h>
50
51 #include <asm/cacheflush.h>
52 #include <asm/idmap.h>
53 #include <asm/localtimer.h>
54 #include <asm/pgalloc.h>
55 #include <asm/pgtable.h>
56 #include <asm/tlbflush.h>
57 #include <asm/suspend.h>
58 #include <asm/smp_plat.h>
59
60 #include <mach/irqs.h>
61 #include <mach/powergate.h>
62 #include <mach/hardware.h>
63
64 #include "board.h"
65 #include "clock.h"
66 #include "common.h"
67 #include "cpuidle.h"
68 #include "fuse.h"
69 #include "gic.h"
70 #include "iomap.h"
71 #include "pm.h"
72 #include "pm-irq.h"
73 #include "reset.h"
74 #include "sleep.h"
75 #include "timer.h"
76 #include "dvfs.h"
77 #include "cpu-tegra.h"
78
79 struct suspend_context {
80         /*
81          * The next 7 values are referenced by offset in __restart_plls
82          * in headsmp-t2.S, and should not be moved
83          */
84         u32 pllx_misc;
85         u32 pllx_base;
86         u32 pllp_misc;
87         u32 pllp_base;
88         u32 pllp_outa;
89         u32 pllp_outb;
90         u32 pll_timeout;
91
92         u32 cpu_burst;
93         u32 clk_csite_src;
94         u32 cclk_divider;
95
96         u32 mc[3];
97         u8 uart[5];
98
99         struct tegra_twd_context twd;
100 #ifdef CONFIG_ARM_ARCH_TIMER
101         struct arch_timer_context arch_timer;
102 #endif
103 };
104
105 #ifdef CONFIG_PM_SLEEP
106 void *tegra_cpu_context;        /* non-cacheable page for CPU context */
107 phys_addr_t tegra_pgd_phys;     /* pgd used by hotplug & LP2 bootup */
108 static pgd_t *tegra_pgd;
109 static DEFINE_SPINLOCK(tegra_lp2_lock);
110 static cpumask_t tegra_in_lp2;
111 static cpumask_t *iram_cpu_lp2_mask;
112 static unsigned long *iram_cpu_lp1_mask;
113 static u8 *iram_save;
114 static unsigned long iram_save_size;
115 static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
116 static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
117 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
118 static int tegra_last_pclk;
119 #endif
120
121 struct suspend_context tegra_sctx;
122
123 #define TEGRA_POWER_PWRREQ_POLARITY     (1 << 8)   /* core power request polarity */
124 #define TEGRA_POWER_PWRREQ_OE           (1 << 9)   /* core power request enable */
125 #define TEGRA_POWER_SYSCLK_POLARITY     (1 << 10)  /* sys clk polarity */
126 #define TEGRA_POWER_SYSCLK_OE           (1 << 11)  /* system clock enable */
127 #define TEGRA_POWER_PWRGATE_DIS         (1 << 12)  /* power gate disabled */
128 #define TEGRA_POWER_EFFECT_LP0          (1 << 14)  /* enter LP0 when CPU pwr gated */
129 #define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15)  /* CPU power request polarity */
130 #define TEGRA_POWER_CPU_PWRREQ_OE       (1 << 16)  /* CPU power request enable */
131 #define TEGRA_POWER_CPUPWRGOOD_EN       (1 << 19)  /* CPU power good enable */
132
133 #define PMC_CTRL                0x0
134 #define PMC_CTRL_LATCH_WAKEUPS  (1 << 5)
135 #define PMC_WAKE_MASK           0xc
136 #define PMC_WAKE_LEVEL          0x10
137 #define PMC_DPAD_ORIDE          0x1C
138 #define PMC_WAKE_DELAY          0xe0
139 #define PMC_DPD_SAMPLE          0x20
140 #define PMC_IO_DPD_REQ_0        0x1b8
141 #define PMC_IO_DPD2_REQ_0       0X1C0
142
143 #define PMC_WAKE_STATUS         0x14
144 #define PMC_SW_WAKE_STATUS      0x18
145 #define PMC_COREPWRGOOD_TIMER   0x3c
146 #define PMC_CPUPWRGOOD_TIMER    0xc8
147 #define PMC_CPUPWROFF_TIMER     0xcc
148 #define PMC_COREPWROFF_TIMER    PMC_WAKE_DELAY
149
150 #define PMC_PWRGATE_TOGGLE      0x30
151 #define PWRGATE_TOGGLE_START    (1 << 8)
152 #define UN_PWRGATE_CPU          \
153         (PWRGATE_TOGGLE_START | TEGRA_CPU_POWERGATE_ID(TEGRA_POWERGATE_CPU))
154
155 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
156 #define PMC_SCRATCH4_WAKE_CLUSTER_MASK  (1<<31)
157 #endif
158
159 #define CLK_RESET_CCLK_BURST    0x20
160 #define CLK_RESET_CCLK_DIVIDER  0x24
161 #define CLK_RESET_PLLC_BASE     0x80
162 #define CLK_RESET_PLLM_BASE     0x90
163 #define CLK_RESET_PLLX_BASE     0xe0
164 #define CLK_RESET_PLLX_MISC     0xe4
165 #define CLK_RESET_PLLP_BASE     0xa0
166 #define CLK_RESET_PLLP_OUTA     0xa4
167 #define CLK_RESET_PLLP_OUTB     0xa8
168 #define CLK_RESET_PLLP_MISC     0xac
169
170 #define CLK_RESET_SOURCE_CSITE  0x1d4
171
172 #define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
173 #define CLK_RESET_CCLK_RUN_POLICY_SHIFT    4
174 #define CLK_RESET_CCLK_IDLE_POLICY_SHIFT   0
175 #define CLK_RESET_CCLK_IDLE_POLICY         1
176 #define CLK_RESET_CCLK_RUN_POLICY          2
177 #define CLK_RESET_CCLK_BURST_POLICY_PLLM   3
178 #define CLK_RESET_CCLK_BURST_POLICY_PLLX   8
179
180 #define EMC_MRW_0               0x0e8
181 #define EMC_MRW_DEV_SELECTN     30
182 #define EMC_MRW_DEV_NONE        (3 << EMC_MRW_DEV_SELECTN)
183
184 #define MC_SECURITY_START       0x6c
185 #define MC_SECURITY_SIZE        0x70
186 #define MC_SECURITY_CFG2        0x7c
187
188 static struct clk *tegra_pclk;
189 static const struct tegra_suspend_platform_data *pdata;
190 static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
191
192 #if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
193 enum tegra_cluster_switch_time_id {
194         tegra_cluster_switch_time_id_start = 0,
195         tegra_cluster_switch_time_id_prolog,
196         tegra_cluster_switch_time_id_switch,
197         tegra_cluster_switch_time_id_epilog,
198         tegra_cluster_switch_time_id_max
199 };
200
201 static unsigned long
202                 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
203 #define tegra_cluster_switch_time(flags, id) \
204         do { \
205                 barrier(); \
206                 if (flags & TEGRA_POWER_CLUSTER_MASK) { \
207                         void __iomem *timer_us = \
208                                                 IO_ADDRESS(TEGRA_TMRUS_BASE); \
209                         if (id < tegra_cluster_switch_time_id_max) \
210                                 tegra_cluster_switch_times[id] = \
211                                                         readl(timer_us); \
212                                 wmb(); \
213                 } \
214                 barrier(); \
215         } while(0)
216 #else
217 #define tegra_cluster_switch_time(flags, id) do {} while(0)
218 #endif
219
220 #ifdef CONFIG_PM_SLEEP
221 static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
222         [TEGRA_SUSPEND_NONE]    = "none",
223         [TEGRA_SUSPEND_LP2]     = "lp2",
224         [TEGRA_SUSPEND_LP1]     = "lp1",
225         [TEGRA_SUSPEND_LP0]     = "lp0",
226 };
227
228 unsigned long tegra_cpu_power_good_time(void)
229 {
230         if (WARN_ON_ONCE(!pdata))
231                 return 5000;
232
233         return pdata->cpu_timer;
234 }
235
236 unsigned long tegra_cpu_power_off_time(void)
237 {
238         if (WARN_ON_ONCE(!pdata))
239                 return 5000;
240
241         return pdata->cpu_off_timer;
242 }
243
244 unsigned long tegra_cpu_lp2_min_residency(void)
245 {
246         if (WARN_ON_ONCE(!pdata))
247                 return 2000;
248
249         return pdata->cpu_lp2_min_residency;
250 }
251
252 /*
253  * create_suspend_pgtable
254  *
255  * Creates a page table with identity mappings of physical memory and IRAM
256  * for use when the MMU is off, in addition to all the regular kernel mappings.
257  */
258 static __init int create_suspend_pgtable(void)
259 {
260         tegra_pgd = pgd_alloc(&init_mm);
261         if (!tegra_pgd)
262                 return -ENOMEM;
263
264         /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
265         identity_mapping_add(tegra_pgd, phys_to_virt(PHYS_OFFSET),
266                 high_memory, 0);
267         identity_mapping_add(tegra_pgd, IO_IRAM_VIRT,
268                 IO_IRAM_VIRT + SECTION_SIZE, 0);
269
270         /* inner/outer write-back/write-allocate, sharable */
271         tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
272
273         return 0;
274 }
275 /*
276  * alloc_suspend_context
277  *
278  * Allocate a non-cacheable page to hold the CPU contexts.
279  * The standard ARM CPU context save functions don't work if there's
280  * an external L2 cache controller (like a PL310) in system.
281  */
282 static __init int alloc_suspend_context(void)
283 {
284         pgprot_t prot = __pgprot_modify(pgprot_kernel, L_PTE_MT_MASK,
285                                         L_PTE_MT_BUFFERABLE | L_PTE_XN);
286         struct page *ctx_page;
287
288         ctx_page = alloc_pages(GFP_KERNEL, 0);
289         if (IS_ERR_OR_NULL(ctx_page))
290                 goto fail;
291
292         tegra_cpu_context = vm_map_ram(&ctx_page, 1, -1, prot);
293         if (IS_ERR_OR_NULL(tegra_cpu_context))
294                 goto fail;
295
296         return 0;
297
298 fail:
299         if (!IS_ERR(ctx_page) && ctx_page)
300                 __free_page(ctx_page);
301         if (!IS_ERR(tegra_cpu_context) && tegra_cpu_context)
302                 vm_unmap_ram((void *)tegra_cpu_context, 1);
303         tegra_cpu_context = NULL;
304         return -ENOMEM;
305 }
306
307 /* ensures that sufficient time is passed for a register write to
308  * serialize into the 32KHz domain */
309 static void pmc_32kwritel(u32 val, unsigned long offs)
310 {
311         writel(val, pmc + offs);
312         udelay(130);
313 }
314
315 static void set_power_timers(unsigned long us_on, unsigned long us_off,
316                              long rate)
317 {
318         static unsigned long last_us_off = 0;
319         unsigned long long ticks;
320         unsigned long long pclk;
321
322         if (WARN_ON_ONCE(rate <= 0))
323                 pclk = 100000000;
324         else
325                 pclk = rate;
326
327         if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
328                 ticks = (us_on * pclk) + 999999ull;
329                 do_div(ticks, 1000000);
330                 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
331
332                 ticks = (us_off * pclk) + 999999ull;
333                 do_div(ticks, 1000000);
334                 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
335                 wmb();
336         }
337         tegra_last_pclk = pclk;
338         last_us_off = us_off;
339 }
340
341 /*
342  * restore_cpu_complex
343  *
344  * restores cpu clock setting, clears flow controller
345  *
346  * Always called on CPU 0.
347  */
348 static void restore_cpu_complex(u32 mode)
349 {
350         int cpu = smp_processor_id();
351         unsigned int reg;
352 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
353         unsigned int policy;
354 #endif
355
356         BUG_ON(cpu != 0);
357
358 #ifdef CONFIG_SMP
359         cpu = cpu_logical_map(cpu);
360 #endif
361
362 /*
363  * On Tegra11x PLLX and CPU burst policy is either preserved across LP2,
364  * or restored by common clock suspend/resume procedures. Hence, we don't
365  * need it here.
366  */
367 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
368         /* Is CPU complex already running on PLLX? */
369         reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
370         policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
371
372         if (policy == CLK_RESET_CCLK_IDLE_POLICY)
373                 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
374         else if (policy == CLK_RESET_CCLK_RUN_POLICY)
375                 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
376         else
377                 BUG();
378
379         if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
380                 /* restore PLLX settings if CPU is on different PLL */
381                 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
382                 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
383
384                 /* wait for PLL stabilization if PLLX was enabled */
385                 if (tegra_sctx.pllx_base & (1<<30)) {
386 #if USE_PLL_LOCK_BITS
387                         /* Enable lock detector */
388                         reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
389                         reg |= 1<<18;
390                         writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
391                         while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
392                                  (1<<27)))
393                                 cpu_relax();
394
395                         udelay(PLL_POST_LOCK_DELAY);
396 #else
397                         udelay(300);
398 #endif
399                 }
400         }
401
402         /* Restore original burst policy setting for calls resulting from CPU
403            LP2 in idle or system suspend; keep cluster switch prolog setting
404            intact. */
405         if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
406                 writel(tegra_sctx.cclk_divider, clk_rst +
407                        CLK_RESET_CCLK_DIVIDER);
408                 writel(tegra_sctx.cpu_burst, clk_rst +
409                        CLK_RESET_CCLK_BURST);
410         }
411 #endif
412         writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
413
414         /* Do not power-gate CPU 0 when flow controlled */
415         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
416         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
417         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
418         reg &= ~FLOW_CTRL_CSR_ENABLE;           /* clear enable */
419         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr */
420         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event */
421         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
422
423         /* If an immedidate cluster switch is being perfomed, restore the
424            local timer registers. For calls resulting from CPU LP2 in
425            idle or system suspend, the local timer was shut down and
426            timekeeping switched over to the global system timer. In this
427            case keep local timer disabled, and restore only periodic load. */
428         if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
429                       TEGRA_POWER_CLUSTER_IMMEDIATE))) {
430 #ifdef CONFIG_ARM_ARCH_TIMER
431                 tegra_sctx.arch_timer.cntp_ctl = 0;
432 #endif
433 #ifdef CONFIG_HAVE_ARM_TWD
434                 tegra_sctx.twd.twd_ctrl = 0;
435 #endif
436         }
437 #ifdef CONFIG_ARM_ARCH_TIMER
438         arch_timer_resume(&tegra_sctx.arch_timer);
439 #endif
440 #ifdef CONFIG_HAVE_ARM_TWD
441         tegra_twd_resume(&tegra_sctx.twd);
442 #endif
443 }
444
445 /*
446  * suspend_cpu_complex
447  *
448  * saves pll state for use by restart_plls, prepares flow controller for
449  * transition to suspend state
450  *
451  * Must always be called on cpu 0.
452  */
453 static void suspend_cpu_complex(u32 mode)
454 {
455         int cpu = smp_processor_id();
456         unsigned int reg;
457         int i;
458
459         BUG_ON(cpu != 0);
460
461 #ifdef CONFIG_SMP
462         cpu = cpu_logical_map(cpu);
463 #endif
464         /* switch coresite to clk_m, save off original source */
465         tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
466         writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
467
468         tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
469         tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
470         tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
471         tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
472         tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
473         tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
474         tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
475         tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
476
477 #ifdef CONFIG_HAVE_ARM_TWD
478         tegra_twd_suspend(&tegra_sctx.twd);
479 #endif
480 #ifdef CONFIG_ARM_ARCH_TIMER
481         arch_timer_suspend(&tegra_sctx.arch_timer);
482 #endif
483
484         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
485         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
486         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
487         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr flag */
488         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event flag */
489 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
490         reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu;   /* enable power gating on wfe */
491 #else
492         reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu;   /* enable power gating on wfi */
493 #endif
494         reg |= FLOW_CTRL_CSR_ENABLE;            /* enable power gating */
495         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
496
497         for (i = 0; i < num_possible_cpus(); i++) {
498                 if (i == cpu)
499                         continue;
500                 reg = readl(FLOW_CTRL_CPU_CSR(i));
501                 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
502                 reg |= FLOW_CTRL_CSR_INTR_FLAG;
503                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
504         }
505
506         tegra_gic_cpu_disable(true);
507 }
508
509 void tegra_clear_cpu_in_lp2(int cpu)
510 {
511         spin_lock(&tegra_lp2_lock);
512         BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
513         cpumask_clear_cpu(cpu, &tegra_in_lp2);
514
515         /* Update the IRAM copy used by the reset handler. The IRAM copy
516            can't use used directly by cpumask_clear_cpu() because it uses
517            LDREX/STREX which requires the addressed location to be inner
518            cacheable and sharable which IRAM isn't. */
519         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
520         dsb();
521
522         spin_unlock(&tegra_lp2_lock);
523 }
524
525 bool tegra_set_cpu_in_lp2(int cpu)
526 {
527         bool last_cpu = false;
528
529         spin_lock(&tegra_lp2_lock);
530         BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
531         cpumask_set_cpu(cpu, &tegra_in_lp2);
532
533         /* Update the IRAM copy used by the reset handler. The IRAM copy
534            can't use used directly by cpumask_set_cpu() because it uses
535            LDREX/STREX which requires the addressed location to be inner
536            cacheable and sharable which IRAM isn't. */
537         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
538         dsb();
539
540         if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
541                 last_cpu = true;
542 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
543         else if (cpu == 1)
544                 tegra2_cpu_set_resettable_soon();
545 #endif
546
547         spin_unlock(&tegra_lp2_lock);
548         return last_cpu;
549 }
550
551 bool tegra_is_cpu_in_lp2(int cpu)
552 {
553         bool in_lp2;
554
555         spin_lock(&tegra_lp2_lock);
556         in_lp2 = cpumask_test_cpu(cpu, &tegra_in_lp2);
557         spin_unlock(&tegra_lp2_lock);
558         return in_lp2;
559 }
560
561 static void tegra_sleep_core(enum tegra_suspend_mode mode,
562                              unsigned long v2p)
563 {
564 #ifdef CONFIG_TRUSTED_FOUNDATIONS
565         if (mode == TEGRA_SUSPEND_LP0) {
566                 tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE3,
567                                   virt_to_phys(tegra_resume));
568         } else {
569                 tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE6,
570                                   (TEGRA_RESET_HANDLER_BASE +
571                                    tegra_cpu_reset_handler_offset));
572         }
573 #endif
574 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
575         cpu_suspend(v2p, tegra2_sleep_core_finish);
576 #else
577         cpu_suspend(v2p, tegra3_sleep_core_finish);
578 #endif
579 }
580
581 static inline void tegra_sleep_cpu(unsigned long v2p)
582 {
583 #ifdef CONFIG_TRUSTED_FOUNDATIONS
584         tegra_generic_smc_uncached(0xFFFFFFFC, 0xFFFFFFE4,
585                           (TEGRA_RESET_HANDLER_BASE +
586                            tegra_cpu_reset_handler_offset));
587 #endif
588         cpu_suspend(v2p, tegra_sleep_cpu_finish);
589 }
590
591 unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
592 {
593         u32 reg;
594         unsigned int remain;
595         pgd_t *pgd;
596
597         /* Only the last cpu down does the final suspend steps */
598         reg = readl(pmc + PMC_CTRL);
599         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
600         if (pdata->combined_req)
601                 reg &= ~TEGRA_POWER_PWRREQ_OE;
602         else
603                 reg |= TEGRA_POWER_PWRREQ_OE;
604
605         reg &= ~TEGRA_POWER_EFFECT_LP0;
606         writel(reg, pmc + PMC_CTRL);
607
608         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
609
610         /*
611          * We can use clk_get_rate_all_locked() here, because all other cpus
612          * are in LP2 state and irqs are disabled
613          */
614         if (flags & TEGRA_POWER_CLUSTER_MASK) {
615                 if (is_idle_task(current))
616                         trace_cpu_cluster_rcuidle(POWER_CPU_CLUSTER_START);
617                 else
618                         trace_cpu_cluster(POWER_CPU_CLUSTER_START);
619                 set_power_timers(pdata->cpu_timer, 0,
620                         clk_get_rate_all_locked(tegra_pclk));
621                 if (flags & TEGRA_POWER_CLUSTER_G) {
622                         /*
623                          * To reduce the vdd_cpu up latency when LP->G
624                          * transition. Before the transition, enable
625                          * the vdd_cpu rail.
626                          */
627                         if (is_lp_cluster())
628                                 writel(UN_PWRGATE_CPU,
629                                        pmc + PMC_PWRGATE_TOGGLE);
630                 }
631                 tegra_cluster_switch_prolog(flags);
632         } else {
633                 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
634                         clk_get_rate_all_locked(tegra_pclk));
635 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
636                 reg = readl(FLOW_CTRL_CPU_CSR(0));
637                 reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
638                 if (is_lp_cluster()) {
639                         /* for LP cluster, there is no option for rail gating */
640                         if ((flags & TEGRA_POWER_CLUSTER_PART_MASK) ==
641                                                 TEGRA_POWER_CLUSTER_PART_MASK)
642                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_EMU;
643                         else if (flags)
644                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
645                 }
646                 else {
647                         if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
648                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
649                         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
650                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
651                 }
652                 writel(reg, FLOW_CTRL_CPU_CSR(0));
653 #endif
654         }
655
656         if (sleep_time)
657                 tegra_lp2_set_trigger(sleep_time);
658
659         cpu_cluster_pm_enter();
660         suspend_cpu_complex(flags);
661         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
662         flush_cache_all();
663         /*
664          * No need to flush complete L2. Cleaning kernel and IO mappings
665          * is enough for the LP code sequence that has L2 disabled but
666          * MMU on.
667          */
668         pgd = cpu_get_pgd();
669         outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
670                           __pa(pgd + PTRS_PER_PGD));
671         outer_disable();
672
673         tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
674
675         tegra_init_cache(false);
676         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
677         restore_cpu_complex(flags);
678         cpu_cluster_pm_exit();
679
680         remain = tegra_lp2_timer_remain();
681         if (sleep_time)
682                 tegra_lp2_set_trigger(0);
683
684         if (flags & TEGRA_POWER_CLUSTER_MASK) {
685                 tegra_cluster_switch_epilog(flags);
686                 if (is_idle_task(current))
687                         trace_cpu_cluster_rcuidle(POWER_CPU_CLUSTER_DONE);
688                 else
689                         trace_cpu_cluster(POWER_CPU_CLUSTER_DONE);
690         }
691         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
692
693 #if INSTRUMENT_CLUSTER_SWITCH
694         if (flags & TEGRA_POWER_CLUSTER_MASK) {
695                 pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
696                         is_lp_cluster() ? "G=>LP" : "LP=>G",
697                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
698                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
699                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
700                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
701                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
702                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
703                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
704                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
705         }
706 #endif
707         return remain;
708 }
709
710 static int tegra_common_suspend(void)
711 {
712         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
713
714         tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
715         tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
716         tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
717
718         /* copy the reset vector and SDRAM shutdown code into IRAM */
719         memcpy(iram_save, iram_code, iram_save_size);
720         memcpy(iram_code, tegra_iram_start(), iram_save_size);
721
722         return 0;
723 }
724
725 static void tegra_common_resume(void)
726 {
727         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
728 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
729         void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
730 #endif
731
732         /* Clear DPD sample */
733         writel(0x0, pmc + PMC_DPD_SAMPLE);
734
735         writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
736         writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
737         writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
738 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
739         /* trigger emc mode write */
740         writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
741 #endif
742         /* clear scratch registers shared by suspend and the reset pen */
743         writel(0x0, pmc + PMC_SCRATCH39);
744         writel(0x0, pmc + PMC_SCRATCH41);
745
746         /* restore IRAM */
747         memcpy(iram_code, iram_save, iram_save_size);
748 }
749
750 static int tegra_suspend_prepare_late(void)
751 {
752 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
753         disable_irq(INT_SYS_STATS_MON);
754 #endif
755         return 0;
756 }
757
758 static void tegra_suspend_wake(void)
759 {
760 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
761         enable_irq(INT_SYS_STATS_MON);
762 #endif
763 }
764
765 static void tegra_pm_set(enum tegra_suspend_mode mode)
766 {
767         u32 reg, boot_flag;
768         unsigned long rate = 32768;
769
770         reg = readl(pmc + PMC_CTRL);
771         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
772         if (pdata->combined_req)
773                 reg &= ~TEGRA_POWER_PWRREQ_OE;
774         else
775                 reg |= TEGRA_POWER_PWRREQ_OE;
776         reg &= ~TEGRA_POWER_EFFECT_LP0;
777
778         switch (mode) {
779         case TEGRA_SUSPEND_LP0:
780 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
781                 rate = clk_get_rate_all_locked(tegra_pclk);
782 #endif
783                 if (pdata->combined_req) {
784                         reg |= TEGRA_POWER_PWRREQ_OE;
785                         reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
786                 }
787
788                 /*
789                  * LP0 boots through the AVP, which then resumes the AVP to
790                  * the address in scratch 39, and the cpu to the address in
791                  * scratch 41 to tegra_resume
792                  */
793                 writel(0x0, pmc + PMC_SCRATCH39);
794
795                 /* Enable DPD sample to trigger sampling pads data and direction
796                  * in which pad will be driven during lp0 mode*/
797                 writel(0x1, pmc + PMC_DPD_SAMPLE);
798 #if !defined(CONFIG_ARCH_TEGRA_3x_SOC) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
799                 writel(0x800fffff, pmc + PMC_IO_DPD_REQ_0);
800                 writel(0x80001fff, pmc + PMC_IO_DPD2_REQ_0);
801 #endif
802 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
803                 /* this is needed only for T11x, not for other chips */
804                 reg &= ~TEGRA_POWER_CPUPWRGOOD_EN;
805 #endif
806
807                 /* Set warmboot flag */
808                 boot_flag = readl(pmc + PMC_SCRATCH0);
809                 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
810
811                 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
812
813                 reg |= TEGRA_POWER_EFFECT_LP0;
814                 /* No break here. LP0 code falls through to write SCRATCH41 */
815         case TEGRA_SUSPEND_LP1:
816                 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
817                 wmb();
818                 break;
819         case TEGRA_SUSPEND_LP2:
820                 rate = clk_get_rate(tegra_pclk);
821                 break;
822         case TEGRA_SUSPEND_NONE:
823                 return;
824         default:
825                 BUG();
826         }
827
828         set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
829
830         pmc_32kwritel(reg, PMC_CTRL);
831 }
832
833 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
834         [TEGRA_SUSPEND_NONE] = "none",
835         [TEGRA_SUSPEND_LP2] = "LP2",
836         [TEGRA_SUSPEND_LP1] = "LP1",
837         [TEGRA_SUSPEND_LP0] = "LP0",
838 };
839
840 static int tegra_suspend_enter(suspend_state_t state)
841 {
842         int ret;
843         ktime_t delta;
844         struct timespec ts_entry, ts_exit;
845
846         if (pdata && pdata->board_suspend)
847                 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
848
849         read_persistent_clock(&ts_entry);
850
851         ret = tegra_suspend_dram(current_suspend_mode, 0);
852         if (ret) {
853                 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
854                 goto abort_suspend;
855         }
856
857         read_persistent_clock(&ts_exit);
858
859         if (timespec_compare(&ts_exit, &ts_entry) > 0) {
860                 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
861
862                 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
863                 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
864                         tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
865                 else
866                         tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
867         }
868
869 abort_suspend:
870         if (pdata && pdata->board_resume)
871                 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
872
873         return ret;
874 }
875
876 static void tegra_suspend_check_pwr_stats(void)
877 {
878         /* cpus and l2 are powered off later */
879         unsigned long pwrgate_partid_mask =
880 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
881                 (1 << TEGRA_POWERGATE_HEG)      |
882                 (1 << TEGRA_POWERGATE_SATA)     |
883                 (1 << TEGRA_POWERGATE_3D1)      |
884 #endif
885                 (1 << TEGRA_POWERGATE_3D)       |
886                 (1 << TEGRA_POWERGATE_VENC)     |
887                 (1 << TEGRA_POWERGATE_PCIE)     |
888                 (1 << TEGRA_POWERGATE_VDEC)     |
889                 (1 << TEGRA_POWERGATE_MPE);
890
891         int partid;
892
893         for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
894                 if ((1 << partid) & pwrgate_partid_mask)
895                         if (tegra_powergate_is_powered(partid))
896                                 pr_warning("partition %s is left on before suspend\n",
897                                         tegra_powergate_get_name(partid));
898
899         return;
900 }
901
902 int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
903 {
904         int err = 0;
905         u32 scratch37 = 0xDEADBEEF;
906         u32 reg;
907
908         if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
909                 mode >= TEGRA_MAX_SUSPEND_MODE)) {
910                 err = -ENXIO;
911                 goto fail;
912         }
913
914         if (tegra_is_voice_call_active()) {
915                 /* backup the current value of scratch37 */
916                 scratch37 = readl(pmc + PMC_SCRATCH37);
917
918                 /* If voice call is active, set a flag in PMC_SCRATCH37 */
919                 reg = TEGRA_POWER_LP1_AUDIO;
920                 pmc_32kwritel(reg, PMC_SCRATCH37);
921         }
922
923         if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
924                 pr_info("LP0 not used due to unsupported wakeup events\n");
925                 mode = TEGRA_SUSPEND_LP1;
926         }
927
928         if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
929                 tegra_suspend_check_pwr_stats();
930
931         tegra_common_suspend();
932
933         tegra_pm_set(mode);
934
935         if (pdata && pdata->board_suspend)
936                 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
937
938         local_fiq_disable();
939
940         trace_cpu_suspend(CPU_SUSPEND_START);
941
942         if (mode == TEGRA_SUSPEND_LP0) {
943 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
944                 reg = readl(pmc + PMC_SCRATCH4);
945                 if (is_lp_cluster())
946                         reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
947                 else
948                         reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
949                 pmc_32kwritel(reg, PMC_SCRATCH4);
950 #endif
951                 tegra_tsc_suspend();
952                 tegra_lp0_suspend_mc();
953                 tegra_cpu_reset_handler_save();
954                 tegra_tsc_wait_for_suspend();
955         }
956         else if (mode == TEGRA_SUSPEND_LP1)
957                 *iram_cpu_lp1_mask = 1;
958
959         suspend_cpu_complex(flags);
960
961         flush_cache_all();
962         outer_flush_all();
963         outer_disable();
964
965         if (mode == TEGRA_SUSPEND_LP2)
966                 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
967         else
968                 tegra_sleep_core(mode, PHYS_OFFSET - PAGE_OFFSET);
969
970         tegra_init_cache(true);
971
972         if (mode == TEGRA_SUSPEND_LP0) {
973 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
974                 reg = readl(pmc+PMC_CTRL);
975                 reg |= TEGRA_POWER_CPUPWRGOOD_EN;
976                 pmc_32kwritel(reg, PMC_CTRL);
977 #endif
978                 tegra_tsc_resume();
979                 tegra_cpu_reset_handler_restore();
980                 tegra_lp0_resume_mc();
981                 tegra_tsc_wait_for_resume();
982         } else if (mode == TEGRA_SUSPEND_LP1)
983                 *iram_cpu_lp1_mask = 0;
984
985         /* if scratch37 was clobbered during LP1, restore it */
986         if (scratch37 != 0xDEADBEEF)
987                 pmc_32kwritel(scratch37, PMC_SCRATCH37);
988
989         restore_cpu_complex(flags);
990
991         /* for platforms where the core & CPU power requests are
992          * combined as a single request to the PMU, transition out
993          * of LP0 state by temporarily enabling both requests
994          */
995         if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
996                 reg = readl(pmc + PMC_CTRL);
997                 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
998                 pmc_32kwritel(reg, PMC_CTRL);
999                 reg &= ~TEGRA_POWER_PWRREQ_OE;
1000                 pmc_32kwritel(reg, PMC_CTRL);
1001         }
1002
1003         if (pdata && pdata->board_resume)
1004                 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
1005
1006         trace_cpu_suspend(CPU_SUSPEND_DONE);
1007
1008         local_fiq_enable();
1009
1010         tegra_common_resume();
1011
1012 fail:
1013         return err;
1014 }
1015
1016 /*
1017  * Function pointers to optional board specific function
1018  */
1019 void (*tegra_deep_sleep)(int);
1020 EXPORT_SYMBOL(tegra_deep_sleep);
1021
1022 static int tegra_suspend_prepare(void)
1023 {
1024         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1025                 tegra_deep_sleep(1);
1026         return 0;
1027 }
1028
1029 static void tegra_suspend_finish(void)
1030 {
1031         if (pdata && pdata->cpu_resume_boost) {
1032                 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
1033                 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
1034                         pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
1035         }
1036
1037         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1038                 tegra_deep_sleep(0);
1039 }
1040
1041 static const struct platform_suspend_ops tegra_suspend_ops = {
1042         .valid          = suspend_valid_only_mem,
1043         .prepare        = tegra_suspend_prepare,
1044         .finish         = tegra_suspend_finish,
1045         .prepare_late   = tegra_suspend_prepare_late,
1046         .wake           = tegra_suspend_wake,
1047         .enter          = tegra_suspend_enter,
1048 };
1049
1050 static ssize_t suspend_mode_show(struct kobject *kobj,
1051                                         struct kobj_attribute *attr, char *buf)
1052 {
1053         char *start = buf;
1054         char *end = buf + PAGE_SIZE;
1055
1056         start += scnprintf(start, end - start, "%s ", \
1057                                 tegra_suspend_name[current_suspend_mode]);
1058         start += scnprintf(start, end - start, "\n");
1059
1060         return start - buf;
1061 }
1062
1063 static ssize_t suspend_mode_store(struct kobject *kobj,
1064                                         struct kobj_attribute *attr,
1065                                         const char *buf, size_t n)
1066 {
1067         int len;
1068         const char *name_ptr;
1069         enum tegra_suspend_mode new_mode;
1070
1071         name_ptr = buf;
1072         while (*name_ptr && !isspace(*name_ptr))
1073                 name_ptr++;
1074         len = name_ptr - buf;
1075         if (!len)
1076                 goto bad_name;
1077         /* TEGRA_SUSPEND_NONE not allowed as suspend state */
1078         if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
1079                 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
1080                 pr_info("Illegal tegra suspend state: %s\n", buf);
1081                 goto bad_name;
1082         }
1083
1084         for (new_mode = TEGRA_SUSPEND_NONE; \
1085                         new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
1086                 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
1087                         current_suspend_mode = new_mode;
1088                         break;
1089                 }
1090         }
1091
1092 bad_name:
1093         return n;
1094 }
1095
1096 static struct kobj_attribute suspend_mode_attribute =
1097         __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
1098
1099 static struct kobject *suspend_kobj;
1100
1101 static int tegra_pm_enter_suspend(void)
1102 {
1103         pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1104         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1105                 tegra_lp0_cpu_mode(true);
1106         return 0;
1107 }
1108
1109 static void tegra_pm_enter_resume(void)
1110 {
1111         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1112                 tegra_lp0_cpu_mode(false);
1113         pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1114 }
1115
1116 static struct syscore_ops tegra_pm_enter_syscore_ops = {
1117         .suspend = tegra_pm_enter_suspend,
1118         .resume = tegra_pm_enter_resume,
1119 };
1120
1121 static __init int tegra_pm_enter_syscore_init(void)
1122 {
1123         register_syscore_ops(&tegra_pm_enter_syscore_ops);
1124         return 0;
1125 }
1126 subsys_initcall(tegra_pm_enter_syscore_init);
1127 #endif
1128
1129 void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1130 {
1131         u32 reg;
1132         u32 mode;
1133
1134         tegra_pclk = clk_get_sys(NULL, "pclk");
1135         BUG_ON(IS_ERR(tegra_pclk));
1136         pdata = plat;
1137         (void)reg;
1138         (void)mode;
1139
1140         if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1141                 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1142
1143 #ifndef CONFIG_PM_SLEEP
1144         if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1145                 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1146                            "disabling suspend\n", __func__);
1147                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1148         }
1149 #else
1150         if (create_suspend_pgtable() < 0) {
1151                 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1152                                 __func__);
1153                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1154                 goto fail;
1155         }
1156
1157         if (alloc_suspend_context() < 0) {
1158                 pr_err("%s: alloc_suspend_context failed -- LP0/LP1/LP2 unavailable\n",
1159                                 __func__);
1160                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1161                 goto fail;
1162         }
1163
1164         if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1165             (tegra_revision == TEGRA_REVISION_A01) &&
1166             (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1167                 /* Tegra 3 A01 supports only LP1 */
1168                 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1169                            "-- disabling LP0\n", __func__);
1170                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1171         }
1172         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1173                 tegra_lp0_vec_relocate) {
1174                 unsigned char *reloc_lp0;
1175                 unsigned long tmp;
1176                 void __iomem *orig;
1177                 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1178                                         GFP_KERNEL);
1179                 WARN_ON(!reloc_lp0);
1180                 if (!reloc_lp0) {
1181                         pr_err("%s: Failed to allocate reloc_lp0\n",
1182                                 __func__);
1183                         goto out;
1184                 }
1185
1186                 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1187                 WARN_ON(!orig);
1188                 if (!orig) {
1189                         pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1190                                 __func__, tegra_lp0_vec_start);
1191                         kfree(reloc_lp0);
1192                         goto out;
1193                 }
1194
1195                 tmp = (unsigned long) reloc_lp0;
1196                 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1197                 reloc_lp0 = (unsigned char *)tmp;
1198                 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1199                 iounmap(orig);
1200                 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1201         }
1202
1203 out:
1204         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1205                 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1206                            "provided by bootlader -- disabling LP0\n",
1207                            __func__);
1208                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1209         }
1210
1211         iram_save_size = tegra_iram_end() - tegra_iram_start();
1212
1213         iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1214         if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1215                 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1216                        "-- LP0/LP1 unavailable\n", __func__);
1217                 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1218         }
1219
1220 #ifdef CONFIG_TEGRA_LP1_950
1221         if (pdata->lp1_lowvolt_support) {
1222                 u32 lp1_core_lowvolt, lp1_core_highvolt;
1223                 memcpy(tegra_lp1_register_pmuslave_addr(), &pdata->pmuslave_addr, 4);
1224                 memcpy(tegra_lp1_register_i2c_base_addr(), &pdata->i2c_base_addr, 4);
1225
1226                 lp1_core_lowvolt = 0;
1227                 lp1_core_lowvolt = (pdata->lp1_core_volt_low << 8) | pdata->core_reg_addr;
1228                 memcpy(tegra_lp1_register_core_lowvolt(), &lp1_core_lowvolt, 4);
1229
1230                 lp1_core_highvolt = 0;
1231                 lp1_core_highvolt = (pdata->lp1_core_volt_high << 8) | pdata->core_reg_addr;
1232                 memcpy(tegra_lp1_register_core_highvolt(), &lp1_core_highvolt, 4);
1233         }
1234 #endif
1235         /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1236         /* Initialize scratch registers used for CPU LP2 synchronization */
1237         writel(0, pmc + PMC_SCRATCH37);
1238         writel(0, pmc + PMC_SCRATCH38);
1239         writel(0, pmc + PMC_SCRATCH39);
1240         writel(0, pmc + PMC_SCRATCH41);
1241
1242         /* Always enable CPU power request; just normal polarity is supported */
1243         reg = readl(pmc + PMC_CTRL);
1244         BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1245         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1246         pmc_32kwritel(reg, PMC_CTRL);
1247
1248         /* Configure core power request and system clock control if LP0
1249            is supported */
1250         __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1251         __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1252
1253         reg = readl(pmc + PMC_CTRL);
1254
1255         if (!pdata->sysclkreq_high)
1256                 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1257         else
1258                 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1259
1260         if (!pdata->corereq_high)
1261                 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1262         else
1263                 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1264
1265         /* configure output inverters while the request is tristated */
1266         pmc_32kwritel(reg, PMC_CTRL);
1267
1268         /* now enable requests */
1269         reg |= TEGRA_POWER_SYSCLK_OE;
1270         if (!pdata->combined_req)
1271                 reg |= TEGRA_POWER_PWRREQ_OE;
1272         pmc_32kwritel(reg, PMC_CTRL);
1273
1274         if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1275                 tegra_lp0_suspend_init();
1276
1277         suspend_set_ops(&tegra_suspend_ops);
1278
1279         /* Create /sys/power/suspend/type */
1280         suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1281         if (suspend_kobj) {
1282                 if (sysfs_create_file(suspend_kobj, \
1283                                                 &suspend_mode_attribute.attr))
1284                         pr_err("%s: sysfs_create_file suspend type failed!\n",
1285                                                                 __func__);
1286         }
1287
1288         iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1289         iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1290
1291         /* clear io dpd settings before kernel */
1292         tegra_bl_io_dpd_cleanup();
1293
1294 fail:
1295 #endif
1296         if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1297                 tegra_lp2_in_idle(false);
1298
1299         current_suspend_mode = plat->suspend_mode;
1300 }
1301
1302 unsigned long debug_uart_port_base = 0;
1303 EXPORT_SYMBOL(debug_uart_port_base);
1304
1305 static int tegra_debug_uart_suspend(void)
1306 {
1307         void __iomem *uart;
1308         u32 lcr;
1309
1310         if (!debug_uart_port_base)
1311                 return 0;
1312
1313         uart = IO_ADDRESS(debug_uart_port_base);
1314
1315         lcr = readb(uart + UART_LCR * 4);
1316
1317         tegra_sctx.uart[0] = lcr;
1318         tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1319
1320         /* DLAB = 0 */
1321         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1322
1323         tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1324
1325         /* DLAB = 1 */
1326         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1327
1328         tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1329         tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1330
1331         writeb(lcr, uart + UART_LCR * 4);
1332
1333         return 0;
1334 }
1335
1336 static void tegra_debug_uart_resume(void)
1337 {
1338         void __iomem *uart;
1339         u32 lcr;
1340
1341         if (!debug_uart_port_base)
1342                 return;
1343
1344         uart = IO_ADDRESS(debug_uart_port_base);
1345
1346         lcr = tegra_sctx.uart[0];
1347
1348         writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1349
1350         /* DLAB = 0 */
1351         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1352
1353         writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1354                         uart + UART_FCR * 4);
1355
1356         writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1357
1358         /* DLAB = 1 */
1359         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1360
1361         writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1362         writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1363
1364         writeb(lcr, uart + UART_LCR * 4);
1365 }
1366
1367 static struct syscore_ops tegra_debug_uart_syscore_ops = {
1368         .suspend = tegra_debug_uart_suspend,
1369         .resume = tegra_debug_uart_resume,
1370 };
1371
1372 struct clk *debug_uart_clk = NULL;
1373 EXPORT_SYMBOL(debug_uart_clk);
1374
1375 void tegra_console_uart_suspend(void)
1376 {
1377         if (console_suspend_enabled && debug_uart_clk)
1378                 clk_disable(debug_uart_clk);
1379 }
1380
1381 void tegra_console_uart_resume(void)
1382 {
1383         if (console_suspend_enabled && debug_uart_clk)
1384                 clk_enable(debug_uart_clk);
1385 }
1386
1387 static int tegra_debug_uart_syscore_init(void)
1388 {
1389         register_syscore_ops(&tegra_debug_uart_syscore_ops);
1390         return 0;
1391 }
1392 arch_initcall(tegra_debug_uart_syscore_init);