ARM: tegra11x: Support min residency per platform
[linux-3.10.git] / arch / arm / mach-tegra / pm.c
1 /*
2  * arch/arm/mach-tegra/pm.c
3  *
4  * CPU complex suspend & resume functions for Tegra SoCs
5  *
6  * Copyright (c) 2009-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
26 #include <linux/io.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/clk.h>
32 #include <linux/err.h>
33 #include <linux/debugfs.h>
34 #include <linux/delay.h>
35 #include <linux/suspend.h>
36 #include <linux/slab.h>
37 #include <linux/serial_reg.h>
38 #include <linux/seq_file.h>
39 #include <linux/uaccess.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/cpu_pm.h>
42 #include <linux/clk/tegra.h>
43 #include <linux/export.h>
44 #include <linux/vmalloc.h>
45 #include <linux/memblock.h>
46 #include <linux/console.h>
47 #include <linux/tegra_audio.h>
48
49 #include <trace/events/power.h>
50 #include <trace/events/nvsecurity.h>
51
52 #include <asm/cacheflush.h>
53 #include <asm/idmap.h>
54 #include <asm/localtimer.h>
55 #include <asm/pgalloc.h>
56 #include <asm/pgtable.h>
57 #include <asm/tlbflush.h>
58 #include <asm/suspend.h>
59 #include <asm/smp_plat.h>
60
61 #include <mach/irqs.h>
62 #include <mach/powergate.h>
63 #include <mach/hardware.h>
64
65 #include "board.h"
66 #include "clock.h"
67 #include "common.h"
68 #include "cpuidle.h"
69 #include "fuse.h"
70 #include "gic.h"
71 #include "iomap.h"
72 #include "pm.h"
73 #include "pm-irq.h"
74 #include "reset.h"
75 #include "sleep.h"
76 #include "timer.h"
77 #include "dvfs.h"
78 #include "cpu-tegra.h"
79
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/nvpower.h>
82
83 struct suspend_context {
84         /*
85          * The next 7 values are referenced by offset in __restart_plls
86          * in headsmp-t2.S, and should not be moved
87          */
88         u32 pllx_misc;
89         u32 pllx_base;
90         u32 pllp_misc;
91         u32 pllp_base;
92         u32 pllp_outa;
93         u32 pllp_outb;
94         u32 pll_timeout;
95
96         u32 cpu_burst;
97         u32 clk_csite_src;
98         u32 cclk_divider;
99
100         u32 mc[3];
101         u8 uart[5];
102
103         struct tegra_twd_context twd;
104 };
105
106 #ifdef CONFIG_PM_SLEEP
107 phys_addr_t tegra_pgd_phys;     /* pgd used by hotplug & LP2 bootup */
108 static pgd_t *tegra_pgd;
109 static DEFINE_SPINLOCK(tegra_lp2_lock);
110 static cpumask_t tegra_in_lp2;
111 static cpumask_t *iram_cpu_lp2_mask;
112 static unsigned long *iram_cpu_lp1_mask;
113 static u8 *iram_save;
114 static unsigned long iram_save_size;
115 static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
116 static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
117 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
118 static int tegra_last_pclk;
119 #endif
120
121 struct suspend_context tegra_sctx;
122
123 #define TEGRA_POWER_PWRREQ_POLARITY     (1 << 8)   /* core power request polarity */
124 #define TEGRA_POWER_PWRREQ_OE           (1 << 9)   /* core power request enable */
125 #define TEGRA_POWER_SYSCLK_POLARITY     (1 << 10)  /* sys clk polarity */
126 #define TEGRA_POWER_SYSCLK_OE           (1 << 11)  /* system clock enable */
127 #define TEGRA_POWER_PWRGATE_DIS         (1 << 12)  /* power gate disabled */
128 #define TEGRA_POWER_EFFECT_LP0          (1 << 14)  /* enter LP0 when CPU pwr gated */
129 #define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15)  /* CPU power request polarity */
130 #define TEGRA_POWER_CPU_PWRREQ_OE       (1 << 16)  /* CPU power request enable */
131 #define TEGRA_POWER_CPUPWRGOOD_EN       (1 << 19)  /* CPU power good enable */
132
133 #define PMC_CTRL                0x0
134 #define PMC_CTRL_LATCH_WAKEUPS  (1 << 5)
135 #define PMC_WAKE_MASK           0xc
136 #define PMC_WAKE_LEVEL          0x10
137 #define PMC_DPAD_ORIDE          0x1C
138 #define PMC_WAKE_DELAY          0xe0
139 #define PMC_DPD_SAMPLE          0x20
140 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
141 #define PMC_DPD_ENABLE          0x24
142 #endif
143 #define PMC_IO_DPD_REQ          0x1B8
144 #define PMC_IO_DPD2_REQ         0x1C0
145
146
147 #define PMC_WAKE_STATUS         0x14
148 #define PMC_SW_WAKE_STATUS      0x18
149 #define PMC_COREPWRGOOD_TIMER   0x3c
150 #define PMC_CPUPWRGOOD_TIMER    0xc8
151 #define PMC_CPUPWROFF_TIMER     0xcc
152 #define PMC_COREPWROFF_TIMER    PMC_WAKE_DELAY
153
154 #define PMC_PWRGATE_TOGGLE      0x30
155 #define PWRGATE_TOGGLE_START    (1 << 8)
156 #define UN_PWRGATE_CPU          \
157         (PWRGATE_TOGGLE_START | TEGRA_CPU_POWERGATE_ID(TEGRA_POWERGATE_CPU))
158
159 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
160 #define PMC_SCRATCH4_WAKE_CLUSTER_MASK  (1<<31)
161 #endif
162
163 #define CLK_RESET_CCLK_BURST    0x20
164 #define CLK_RESET_CCLK_DIVIDER  0x24
165 #define CLK_RESET_PLLC_BASE     0x80
166 #define CLK_RESET_PLLM_BASE     0x90
167 #define CLK_RESET_PLLX_BASE     0xe0
168 #define CLK_RESET_PLLX_MISC     0xe4
169 #define CLK_RESET_PLLP_BASE     0xa0
170 #define CLK_RESET_PLLP_OUTA     0xa4
171 #define CLK_RESET_PLLP_OUTB     0xa8
172 #define CLK_RESET_PLLP_MISC     0xac
173
174 #define CLK_RESET_SOURCE_CSITE  0x1d4
175
176 #define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
177 #define CLK_RESET_CCLK_RUN_POLICY_SHIFT    4
178 #define CLK_RESET_CCLK_IDLE_POLICY_SHIFT   0
179 #define CLK_RESET_CCLK_IDLE_POLICY         1
180 #define CLK_RESET_CCLK_RUN_POLICY          2
181 #define CLK_RESET_CCLK_BURST_POLICY_PLLM   3
182 #define CLK_RESET_CCLK_BURST_POLICY_PLLX   8
183
184 #define EMC_MRW_0               0x0e8
185 #define EMC_MRW_DEV_SELECTN     30
186 #define EMC_MRW_DEV_NONE        (3 << EMC_MRW_DEV_SELECTN)
187
188 #define MC_SECURITY_START       0x6c
189 #define MC_SECURITY_SIZE        0x70
190 #define MC_SECURITY_CFG2        0x7c
191
192 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
193 static struct clk *tegra_dfll;
194 #endif
195 static struct clk *tegra_pclk;
196 static const struct tegra_suspend_platform_data *pdata;
197 static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
198
199 #if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
200 enum tegra_cluster_switch_time_id {
201         tegra_cluster_switch_time_id_start = 0,
202         tegra_cluster_switch_time_id_prolog,
203         tegra_cluster_switch_time_id_switch,
204         tegra_cluster_switch_time_id_epilog,
205         tegra_cluster_switch_time_id_max
206 };
207
208 static unsigned long
209                 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
210 #define tegra_cluster_switch_time(flags, id) \
211         do { \
212                 barrier(); \
213                 if (flags & TEGRA_POWER_CLUSTER_MASK) { \
214                         void __iomem *timer_us = \
215                                                 IO_ADDRESS(TEGRA_TMRUS_BASE); \
216                         if (id < tegra_cluster_switch_time_id_max) \
217                                 tegra_cluster_switch_times[id] = \
218                                                         readl(timer_us); \
219                                 wmb(); \
220                 } \
221                 barrier(); \
222         } while(0)
223 #else
224 #define tegra_cluster_switch_time(flags, id) do {} while(0)
225 #endif
226
227 #ifdef CONFIG_PM_SLEEP
228 static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
229         [TEGRA_SUSPEND_NONE]    = "none",
230         [TEGRA_SUSPEND_LP2]     = "lp2",
231         [TEGRA_SUSPEND_LP1]     = "lp1",
232         [TEGRA_SUSPEND_LP0]     = "lp0",
233 };
234
235 unsigned long tegra_cpu_power_good_time(void)
236 {
237         if (WARN_ON_ONCE(!pdata))
238                 return 5000;
239
240         return pdata->cpu_timer;
241 }
242
243 unsigned long tegra_cpu_power_off_time(void)
244 {
245         if (WARN_ON_ONCE(!pdata))
246                 return 5000;
247
248         return pdata->cpu_off_timer;
249 }
250
251 unsigned long tegra_cpu_lp2_min_residency(void)
252 {
253         if (WARN_ON_ONCE(!pdata))
254                 return 2000;
255
256         return pdata->cpu_lp2_min_residency;
257 }
258
259 #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
260 #define TEGRA_MIN_RESIDENCY_VMIN_FMIN   2000
261 #define TEGRA_MIN_RESIDENCY_NCPU_SLOW   2000
262 #define TEGRA_MIN_RESIDENCY_NCPU_FAST   13000
263 #define TEGRA_MIN_RESIDENCY_CRAIL       20000
264
265 unsigned long tegra_min_residency_vmin_fmin(void)
266 {
267         return pdata && pdata->min_residency_vmin_fmin
268                         ? pdata->min_residency_vmin_fmin
269                         : TEGRA_MIN_RESIDENCY_VMIN_FMIN;
270 }
271
272 unsigned long tegra_min_residency_ncpu()
273 {
274         if (is_lp_cluster()) {
275                 return pdata && pdata->min_residency_ncpu_slow
276                         ? pdata->min_residency_ncpu_slow
277                         : TEGRA_MIN_RESIDENCY_NCPU_SLOW;
278         } else
279                 return pdata && pdata->min_residency_ncpu_fast
280                         ? pdata->min_residency_ncpu_fast
281                         : TEGRA_MIN_RESIDENCY_NCPU_FAST;
282 }
283
284 unsigned long tegra_min_residency_crail(void)
285 {
286         return pdata && pdata->min_residency_crail
287                         ? pdata->min_residency_crail
288                         : TEGRA_MIN_RESIDENCY_CRAIL;
289 }
290 #endif
291
292 static void suspend_cpu_dfll_mode(void)
293 {
294 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
295         /* If DFLL is used as CPU clock source go to open loop mode */
296         if (!is_lp_cluster() && tegra_dfll &&
297             tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
298                 tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 0);
299 #endif
300 }
301
302 static void resume_cpu_dfll_mode(void)
303 {
304 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
305         /* If DFLL is used as CPU clock source restore closed loop mode */
306         if (!is_lp_cluster() && tegra_dfll &&
307             tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
308                 tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 1);
309 #endif
310 }
311
312 /*
313  * create_suspend_pgtable
314  *
315  * Creates a page table with identity mappings of physical memory and IRAM
316  * for use when the MMU is off, in addition to all the regular kernel mappings.
317  */
318 static __init int create_suspend_pgtable(void)
319 {
320         tegra_pgd = pgd_alloc(&init_mm);
321         if (!tegra_pgd)
322                 return -ENOMEM;
323
324         /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
325         identity_mapping_add(tegra_pgd, phys_to_virt(PHYS_OFFSET),
326                 high_memory, 0);
327         identity_mapping_add(tegra_pgd, IO_IRAM_VIRT,
328                 IO_IRAM_VIRT + SECTION_SIZE, 0);
329
330         /* inner/outer write-back/write-allocate, sharable */
331         tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
332
333         return 0;
334 }
335
336 /* ensures that sufficient time is passed for a register write to
337  * serialize into the 32KHz domain */
338 static void pmc_32kwritel(u32 val, unsigned long offs)
339 {
340         writel(val, pmc + offs);
341         udelay(130);
342 }
343
344 static void set_power_timers(unsigned long us_on, unsigned long us_off,
345                              long rate)
346 {
347         static unsigned long last_us_off = 0;
348         unsigned long long ticks;
349         unsigned long long pclk;
350
351         if (WARN_ON_ONCE(rate <= 0))
352                 pclk = 100000000;
353         else
354                 pclk = rate;
355
356         if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
357                 ticks = (us_on * pclk) + 999999ull;
358                 do_div(ticks, 1000000);
359                 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
360
361                 ticks = (us_off * pclk) + 999999ull;
362                 do_div(ticks, 1000000);
363                 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
364                 wmb();
365         }
366         tegra_last_pclk = pclk;
367         last_us_off = us_off;
368 }
369
370 /*
371  * restore_cpu_complex
372  *
373  * restores cpu clock setting, clears flow controller
374  *
375  * Always called on CPU 0.
376  */
377 static void restore_cpu_complex(u32 mode)
378 {
379         int cpu = cpu_logical_map(smp_processor_id());
380         unsigned int reg;
381 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
382         unsigned int policy;
383 #endif
384
385 /*
386  * On Tegra11x PLLX and CPU burst policy is either preserved across LP2,
387  * or restored by common clock suspend/resume procedures. Hence, we don't
388  * need it here.
389  */
390 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
391         /* Is CPU complex already running on PLLX? */
392         reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
393         policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
394
395         if (policy == CLK_RESET_CCLK_IDLE_POLICY)
396                 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
397         else if (policy == CLK_RESET_CCLK_RUN_POLICY)
398                 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
399         else
400                 BUG();
401
402         if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
403                 /* restore PLLX settings if CPU is on different PLL */
404                 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
405                 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
406
407                 /* wait for PLL stabilization if PLLX was enabled */
408                 if (tegra_sctx.pllx_base & (1<<30)) {
409 #if USE_PLL_LOCK_BITS
410                         /* Enable lock detector */
411                         reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
412                         reg |= 1<<18;
413                         writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
414                         while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
415                                  (1<<27)))
416                                 cpu_relax();
417
418                         udelay(PLL_POST_LOCK_DELAY);
419 #else
420                         udelay(300);
421 #endif
422                 }
423         }
424
425         /* Restore original burst policy setting for calls resulting from CPU
426            LP2 in idle or system suspend; keep cluster switch prolog setting
427            intact. */
428         if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
429                 writel(tegra_sctx.cclk_divider, clk_rst +
430                        CLK_RESET_CCLK_DIVIDER);
431                 writel(tegra_sctx.cpu_burst, clk_rst +
432                        CLK_RESET_CCLK_BURST);
433         }
434 #endif
435         writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
436
437         /* Do not power-gate CPU 0 when flow controlled */
438         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
439         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
440         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
441         reg &= ~FLOW_CTRL_CSR_ENABLE;           /* clear enable */
442         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr */
443         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event */
444         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
445
446         /* If an immedidate cluster switch is being perfomed, restore the
447            local timer registers. For calls resulting from CPU LP2 in
448            idle or system suspend, the local timer was shut down and
449            timekeeping switched over to the global system timer. In this
450            case keep local timer disabled, and restore only periodic load. */
451 #ifdef CONFIG_HAVE_ARM_TWD
452         if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
453                       TEGRA_POWER_CLUSTER_IMMEDIATE))) {
454                 tegra_sctx.twd.twd_ctrl = 0;
455         }
456         tegra_twd_resume(&tegra_sctx.twd);
457 #endif
458 }
459
460 /*
461  * suspend_cpu_complex
462  *
463  * saves pll state for use by restart_plls, prepares flow controller for
464  * transition to suspend state
465  *
466  * Must always be called on cpu 0.
467  */
468 static void suspend_cpu_complex(u32 mode)
469 {
470         int cpu = cpu_logical_map(smp_processor_id());
471         unsigned int reg;
472         int i;
473
474         BUG_ON(cpu != 0);
475
476         /* switch coresite to clk_m, save off original source */
477         tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
478         writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
479
480         tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
481         tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
482         tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
483         tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
484         tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
485         tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
486         tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
487         tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
488
489 #ifdef CONFIG_HAVE_ARM_TWD
490         tegra_twd_suspend(&tegra_sctx.twd);
491 #endif
492
493         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
494         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
495         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
496         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr flag */
497         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event flag */
498 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
499         reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu;   /* enable power gating on wfe */
500 #else
501         reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu;   /* enable power gating on wfi */
502 #endif
503         reg |= FLOW_CTRL_CSR_ENABLE;            /* enable power gating */
504         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
505
506         for (i = 0; i < num_possible_cpus(); i++) {
507                 if (i == cpu)
508                         continue;
509                 reg = readl(FLOW_CTRL_CPU_CSR(i));
510                 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
511                 reg |= FLOW_CTRL_CSR_INTR_FLAG;
512                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
513         }
514
515         tegra_gic_cpu_disable(true);
516 }
517
518 void tegra_clear_cpu_in_pd(int cpu)
519 {
520         spin_lock(&tegra_lp2_lock);
521         BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
522         cpumask_clear_cpu(cpu, &tegra_in_lp2);
523
524         /* Update the IRAM copy used by the reset handler. The IRAM copy
525            can't use used directly by cpumask_clear_cpu() because it uses
526            LDREX/STREX which requires the addressed location to be inner
527            cacheable and sharable which IRAM isn't. */
528         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
529         dsb();
530
531         spin_unlock(&tegra_lp2_lock);
532 }
533
534 bool tegra_set_cpu_in_pd(int cpu)
535 {
536         bool last_cpu = false;
537
538         spin_lock(&tegra_lp2_lock);
539         BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
540         cpumask_set_cpu(cpu, &tegra_in_lp2);
541
542         /* Update the IRAM copy used by the reset handler. The IRAM copy
543            can't use used directly by cpumask_set_cpu() because it uses
544            LDREX/STREX which requires the addressed location to be inner
545            cacheable and sharable which IRAM isn't. */
546         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
547         dsb();
548
549         if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
550                 last_cpu = true;
551 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
552         else if (cpu == 1)
553                 tegra2_cpu_set_resettable_soon();
554 #endif
555
556         spin_unlock(&tegra_lp2_lock);
557         return last_cpu;
558 }
559
560 static void tegra_sleep_core(enum tegra_suspend_mode mode,
561                              unsigned long v2p)
562 {
563 #ifdef CONFIG_TRUSTED_FOUNDATIONS
564         outer_flush_range(__pa(&tegra_resume_timestamps_start),
565                           __pa(&tegra_resume_timestamps_end));
566
567         if (mode == TEGRA_SUSPEND_LP0) {
568                 trace_smc_sleep_core(NVSEC_SMC_START);
569
570                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
571                                   virt_to_phys(tegra_resume));
572         } else {
573                 trace_smc_sleep_core(NVSEC_SMC_START);
574
575                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
576                                   (TEGRA_RESET_HANDLER_BASE +
577                                    tegra_cpu_reset_handler_offset));
578         }
579
580         trace_smc_sleep_core(NVSEC_SMC_DONE);
581 #endif
582 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
583         cpu_suspend(v2p, tegra2_sleep_core_finish);
584 #else
585         cpu_suspend(v2p, tegra3_sleep_core_finish);
586 #endif
587 }
588
589 static inline void tegra_sleep_cpu(unsigned long v2p)
590 {
591         cpu_suspend(v2p, tegra_sleep_cpu_finish);
592 }
593
594 unsigned int tegra_idle_power_down_last(unsigned int sleep_time,
595                                         unsigned int flags)
596 {
597         u32 reg;
598         unsigned int remain;
599 #if defined(CONFIG_CACHE_L2X0) && !defined(CONFIG_ARCH_TEGRA_14x_SOC)
600         pgd_t *pgd;
601 #endif
602
603         /* Only the last cpu down does the final suspend steps */
604         reg = readl(pmc + PMC_CTRL);
605         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
606         if (pdata->combined_req)
607                 reg &= ~TEGRA_POWER_PWRREQ_OE;
608         else
609                 reg |= TEGRA_POWER_PWRREQ_OE;
610
611         reg &= ~TEGRA_POWER_EFFECT_LP0;
612         writel(reg, pmc + PMC_CTRL);
613
614         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
615
616         /*
617          * We can use clk_get_rate_all_locked() here, because all other cpus
618          * are in LP2 state and irqs are disabled
619          */
620         if (flags & TEGRA_POWER_CLUSTER_MASK) {
621                 trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START);
622                 set_power_timers(pdata->cpu_timer, 2,
623                         clk_get_rate_all_locked(tegra_pclk));
624                 if (flags & TEGRA_POWER_CLUSTER_G) {
625                         /*
626                          * To reduce the vdd_cpu up latency when LP->G
627                          * transition. Before the transition, enable
628                          * the vdd_cpu rail.
629                          */
630                         if (is_lp_cluster()) {
631 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
632                                 reg = readl(FLOW_CTRL_CPU_PWR_CSR);
633                                 reg |= FLOW_CTRL_CPU_PWR_CSR_RAIL_ENABLE;
634                                 writel(reg, FLOW_CTRL_CPU_PWR_CSR);
635 #else
636                                 writel(UN_PWRGATE_CPU,
637                                        pmc + PMC_PWRGATE_TOGGLE);
638 #endif
639                         }
640                 }
641                 tegra_cluster_switch_prolog(flags);
642         } else {
643                 suspend_cpu_dfll_mode();
644                 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
645                         clk_get_rate_all_locked(tegra_pclk));
646 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
647                 reg = readl(FLOW_CTRL_CPU_CSR(0));
648                 reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
649                 if (is_lp_cluster()) {
650                         /* for LP cluster, there is no option for rail gating */
651                         if ((flags & TEGRA_POWER_CLUSTER_PART_MASK) ==
652                                                 TEGRA_POWER_CLUSTER_PART_MASK)
653                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_EMU;
654                         else if (flags)
655                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
656                 }
657                 else {
658                         if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
659                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
660                         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
661                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
662                 }
663                 writel(reg, FLOW_CTRL_CPU_CSR(0));
664 #endif
665         }
666
667         if (sleep_time)
668                 tegra_pd_set_trigger(sleep_time);
669
670         cpu_cluster_pm_enter();
671         suspend_cpu_complex(flags);
672         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
673 #ifdef CONFIG_CACHE_L2X0
674         flush_cache_all();
675 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
676         outer_flush_all();
677 #else
678         /*
679          * No need to flush complete L2. Cleaning kernel and IO mappings
680          * is enough for the LP code sequence that has L2 disabled but
681          * MMU on.
682          */
683         pgd = cpu_get_pgd();
684         outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
685                           __pa(pgd + PTRS_PER_PGD));
686 #endif
687         outer_disable();
688 #endif
689         tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
690
691 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
692         tegra_init_cache(true);
693 #else
694         tegra_init_cache(false);
695 #endif
696
697 #ifdef CONFIG_TRUSTED_FOUNDATIONS
698 #ifndef CONFIG_ARCH_TEGRA_11x_SOC
699         trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
700         trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
701 #endif
702 #endif
703
704         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
705         restore_cpu_complex(flags);
706         cpu_cluster_pm_exit();
707
708         remain = tegra_pd_timer_remain();
709         if (sleep_time)
710                 tegra_pd_set_trigger(0);
711
712         if (flags & TEGRA_POWER_CLUSTER_MASK) {
713                 tegra_cluster_switch_epilog(flags);
714                 if (is_idle_task(current))
715                         trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE);
716                 else
717                         trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE);
718         } else {
719                 resume_cpu_dfll_mode();
720         }
721         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
722
723 #if INSTRUMENT_CLUSTER_SWITCH
724         if (flags & TEGRA_POWER_CLUSTER_MASK) {
725                 pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
726                         is_lp_cluster() ? "G=>LP" : "LP=>G",
727                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
728                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
729                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
730                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
731                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
732                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
733                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
734                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
735         }
736 #endif
737         return remain;
738 }
739
740 static int tegra_common_suspend(void)
741 {
742         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
743
744         tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
745         tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
746         tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
747
748         /* copy the reset vector and SDRAM shutdown code into IRAM */
749         memcpy(iram_save, iram_code, iram_save_size);
750         memcpy(iram_code, tegra_iram_start(), iram_save_size);
751
752         return 0;
753 }
754
755 static void tegra_common_resume(void)
756 {
757         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
758 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
759         void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
760 #endif
761
762 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
763         /* Clear DPD Enable */
764         writel(0x0, pmc + PMC_DPD_ENABLE);
765 #endif
766
767         writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
768         writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
769         writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
770 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
771         /* trigger emc mode write */
772         writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
773         /* clear scratch registers shared by suspend and the reset pen */
774         writel(0x0, pmc + PMC_SCRATCH39);
775 #endif
776         writel(0x0, pmc + PMC_SCRATCH41);
777
778         /* restore IRAM */
779         memcpy(iram_code, iram_save, iram_save_size);
780 }
781
782 static int tegra_suspend_prepare_late(void)
783 {
784 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
785         disable_irq(INT_SYS_STATS_MON);
786 #endif
787         return 0;
788 }
789
790 static void tegra_suspend_wake(void)
791 {
792 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
793         enable_irq(INT_SYS_STATS_MON);
794 #endif
795 }
796
797 static void tegra_pm_set(enum tegra_suspend_mode mode)
798 {
799         u32 reg, boot_flag;
800         unsigned long rate = 32768;
801
802         reg = readl(pmc + PMC_CTRL);
803         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
804         if (pdata->combined_req)
805                 reg &= ~TEGRA_POWER_PWRREQ_OE;
806         else
807                 reg |= TEGRA_POWER_PWRREQ_OE;
808         reg &= ~TEGRA_POWER_EFFECT_LP0;
809
810         switch (mode) {
811         case TEGRA_SUSPEND_LP0:
812 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
813                 rate = clk_get_rate_all_locked(tegra_pclk);
814 #endif
815                 if (pdata->combined_req) {
816                         reg |= TEGRA_POWER_PWRREQ_OE;
817                         reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
818                 }
819
820 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
821                 /*
822                  * LP0 boots through the AVP, which then resumes the AVP to
823                  * the address in scratch 39, and the cpu to the address in
824                  * scratch 41 to tegra_resume
825                  */
826                 writel(0x0, pmc + PMC_SCRATCH39);
827 #endif
828
829                 /* Enable DPD sample to trigger sampling pads data and direction
830                  * in which pad will be driven during lp0 mode*/
831                 writel(0x1, pmc + PMC_DPD_SAMPLE);
832 #if !defined(CONFIG_ARCH_TEGRA_3x_SOC) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
833                 writel(0x800fdfff, pmc + PMC_IO_DPD_REQ);
834                 writel(0x80001fff, pmc + PMC_IO_DPD2_REQ);
835 #endif
836
837 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
838                 /* this is needed only for T11x, not for other chips */
839                 reg &= ~TEGRA_POWER_CPUPWRGOOD_EN;
840 #endif
841
842                 /* Set warmboot flag */
843                 boot_flag = readl(pmc + PMC_SCRATCH0);
844                 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
845
846                 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
847
848                 reg |= TEGRA_POWER_EFFECT_LP0;
849                 /* No break here. LP0 code falls through to write SCRATCH41 */
850         case TEGRA_SUSPEND_LP1:
851                 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
852                 wmb();
853                 break;
854         case TEGRA_SUSPEND_LP2:
855                 rate = clk_get_rate(tegra_pclk);
856                 break;
857         case TEGRA_SUSPEND_NONE:
858                 return;
859         default:
860                 BUG();
861         }
862
863         set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
864
865         pmc_32kwritel(reg, PMC_CTRL);
866 }
867
868 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
869         [TEGRA_SUSPEND_NONE] = "none",
870         [TEGRA_SUSPEND_LP2] = "LP2",
871         [TEGRA_SUSPEND_LP1] = "LP1",
872         [TEGRA_SUSPEND_LP0] = "LP0",
873 };
874
875 static int tegra_suspend_enter(suspend_state_t state)
876 {
877         int ret;
878         ktime_t delta;
879         struct timespec ts_entry, ts_exit;
880
881         if (pdata && pdata->board_suspend)
882                 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
883
884         read_persistent_clock(&ts_entry);
885
886         ret = tegra_suspend_dram(current_suspend_mode, 0);
887         if (ret) {
888                 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
889                 goto abort_suspend;
890         }
891
892         read_persistent_clock(&ts_exit);
893
894         if (timespec_compare(&ts_exit, &ts_entry) > 0) {
895                 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
896
897                 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
898                 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
899                         tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
900                 else
901                         tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
902         }
903
904 abort_suspend:
905         if (pdata && pdata->board_resume)
906                 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
907
908         return ret;
909 }
910
911 static void tegra_suspend_check_pwr_stats(void)
912 {
913         /* cpus and l2 are powered off later */
914         unsigned long pwrgate_partid_mask =
915 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
916                 (1 << TEGRA_POWERGATE_HEG)      |
917                 (1 << TEGRA_POWERGATE_SATA)     |
918                 (1 << TEGRA_POWERGATE_3D1)      |
919 #endif
920                 (1 << TEGRA_POWERGATE_3D)       |
921                 (1 << TEGRA_POWERGATE_VENC)     |
922                 (1 << TEGRA_POWERGATE_PCIE)     |
923                 (1 << TEGRA_POWERGATE_VDEC)     |
924                 (1 << TEGRA_POWERGATE_MPE);
925
926         int partid;
927
928         for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
929                 if ((1 << partid) & pwrgate_partid_mask)
930                         if (tegra_powergate_is_powered(partid))
931                                 pr_warning("partition %s is left on before suspend\n",
932                                         tegra_powergate_get_name(partid));
933
934         return;
935 }
936
937 int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
938 {
939         int err = 0;
940         u32 scratch37 = 0xDEADBEEF;
941         u32 reg;
942
943         if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
944                 mode >= TEGRA_MAX_SUSPEND_MODE)) {
945                 err = -ENXIO;
946                 goto fail;
947         }
948
949         if (tegra_is_voice_call_active()) {
950                 /* backup the current value of scratch37 */
951                 scratch37 = readl(pmc + PMC_SCRATCH37);
952
953                 /* If voice call is active, set a flag in PMC_SCRATCH37 */
954                 reg = TEGRA_POWER_LP1_AUDIO;
955                 pmc_32kwritel(reg, PMC_SCRATCH37);
956         }
957
958         if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
959                 pr_info("LP0 not used due to unsupported wakeup events\n");
960                 mode = TEGRA_SUSPEND_LP1;
961         }
962
963         if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
964                 tegra_suspend_check_pwr_stats();
965
966         tegra_common_suspend();
967
968         tegra_pm_set(mode);
969
970         if (pdata && pdata->board_suspend)
971                 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
972
973         local_fiq_disable();
974
975         trace_cpu_suspend(CPU_SUSPEND_START, tegra_rtc_read_ms());
976
977         if (mode == TEGRA_SUSPEND_LP0) {
978 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
979                 reg = readl(pmc + PMC_SCRATCH4);
980                 if (is_lp_cluster())
981                         reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
982                 else
983                         reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
984                 pmc_32kwritel(reg, PMC_SCRATCH4);
985 #endif
986                 tegra_tsc_suspend();
987                 tegra_lp0_suspend_mc();
988                 tegra_cpu_reset_handler_save();
989                 tegra_tsc_wait_for_suspend();
990 #ifndef CONFIG_TEGRA_SIMULATION_PLATFORM
991                 tegra_smp_clear_power_mask();
992 #endif
993         }
994         else if (mode == TEGRA_SUSPEND_LP1)
995                 *iram_cpu_lp1_mask = 1;
996
997         suspend_cpu_complex(flags);
998
999 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
1000         /* In case of LP0/1, program external power gating accordinly */
1001         if (mode == TEGRA_SUSPEND_LP0 || mode == TEGRA_SUSPEND_LP1) {
1002                 reg = readl(FLOW_CTRL_CPU_CSR(0));
1003                 if (is_lp_cluster())
1004                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU; /* Non CPU */
1005                 else
1006                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;  /* CRAIL */
1007                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(0));
1008         }
1009 #endif
1010
1011         flush_cache_all();
1012         outer_flush_all();
1013         outer_disable();
1014
1015         if (mode == TEGRA_SUSPEND_LP2)
1016                 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
1017         else
1018                 tegra_sleep_core(mode, PHYS_OFFSET - PAGE_OFFSET);
1019
1020         tegra_init_cache(true);
1021
1022 #ifdef CONFIG_TRUSTED_FOUNDATIONS
1023 #ifndef CONFIG_ARCH_TEGRA_11x_SOC
1024         trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
1025         trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
1026 #endif
1027
1028         if (mode == TEGRA_SUSPEND_LP0) {
1029                 trace_secureos_init(tegra_resume_entry_time,
1030                         NVSEC_SUSPEND_EXIT_DONE);
1031         }
1032 #endif
1033
1034         if (mode == TEGRA_SUSPEND_LP0) {
1035
1036                 /* CPUPWRGOOD_EN is not enabled in HW so disabling this, *
1037                 * Otherwise it is creating issue in cluster switch after LP0 *
1038 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
1039                 reg = readl(pmc+PMC_CTRL);
1040                 reg |= TEGRA_POWER_CPUPWRGOOD_EN;
1041                 pmc_32kwritel(reg, PMC_CTRL);
1042 #endif
1043                 */
1044
1045                 tegra_tsc_resume();
1046                 tegra_cpu_reset_handler_restore();
1047                 tegra_lp0_resume_mc();
1048                 tegra_tsc_wait_for_resume();
1049         } else if (mode == TEGRA_SUSPEND_LP1)
1050                 *iram_cpu_lp1_mask = 0;
1051
1052         /* if scratch37 was clobbered during LP1, restore it */
1053         if (scratch37 != 0xDEADBEEF)
1054                 pmc_32kwritel(scratch37, PMC_SCRATCH37);
1055
1056         restore_cpu_complex(flags);
1057
1058         /* for platforms where the core & CPU power requests are
1059          * combined as a single request to the PMU, transition out
1060          * of LP0 state by temporarily enabling both requests
1061          */
1062         if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
1063                 reg = readl(pmc + PMC_CTRL);
1064                 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1065                 pmc_32kwritel(reg, PMC_CTRL);
1066                 reg &= ~TEGRA_POWER_PWRREQ_OE;
1067                 pmc_32kwritel(reg, PMC_CTRL);
1068         }
1069
1070         if (pdata && pdata->board_resume)
1071                 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
1072
1073         trace_cpu_suspend(CPU_SUSPEND_DONE, tegra_rtc_read_ms());
1074
1075         local_fiq_enable();
1076
1077         tegra_common_resume();
1078
1079 fail:
1080         return err;
1081 }
1082
1083 /*
1084  * Function pointers to optional board specific function
1085  */
1086 void (*tegra_deep_sleep)(int);
1087 EXPORT_SYMBOL(tegra_deep_sleep);
1088
1089 static int tegra_suspend_prepare(void)
1090 {
1091         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1092                 tegra_deep_sleep(1);
1093         return 0;
1094 }
1095
1096 static void tegra_suspend_finish(void)
1097 {
1098         if (pdata && pdata->cpu_resume_boost) {
1099                 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
1100                 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
1101                         pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
1102         }
1103
1104         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1105                 tegra_deep_sleep(0);
1106 }
1107
1108 static const struct platform_suspend_ops tegra_suspend_ops = {
1109         .valid          = suspend_valid_only_mem,
1110         .prepare        = tegra_suspend_prepare,
1111         .finish         = tegra_suspend_finish,
1112         .prepare_late   = tegra_suspend_prepare_late,
1113         .wake           = tegra_suspend_wake,
1114         .enter          = tegra_suspend_enter,
1115 };
1116
1117 static ssize_t suspend_mode_show(struct kobject *kobj,
1118                                         struct kobj_attribute *attr, char *buf)
1119 {
1120         char *start = buf;
1121         char *end = buf + PAGE_SIZE;
1122
1123         start += scnprintf(start, end - start, "%s ", \
1124                                 tegra_suspend_name[current_suspend_mode]);
1125         start += scnprintf(start, end - start, "\n");
1126
1127         return start - buf;
1128 }
1129
1130 static ssize_t suspend_mode_store(struct kobject *kobj,
1131                                         struct kobj_attribute *attr,
1132                                         const char *buf, size_t n)
1133 {
1134         int len;
1135         const char *name_ptr;
1136         enum tegra_suspend_mode new_mode;
1137
1138         name_ptr = buf;
1139         while (*name_ptr && !isspace(*name_ptr))
1140                 name_ptr++;
1141         len = name_ptr - buf;
1142         if (!len)
1143                 goto bad_name;
1144         /* TEGRA_SUSPEND_NONE not allowed as suspend state */
1145         if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
1146                 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
1147                 pr_info("Illegal tegra suspend state: %s\n", buf);
1148                 goto bad_name;
1149         }
1150
1151         for (new_mode = TEGRA_SUSPEND_NONE; \
1152                         new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
1153                 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
1154                         current_suspend_mode = new_mode;
1155                         break;
1156                 }
1157         }
1158
1159 bad_name:
1160         return n;
1161 }
1162
1163 static struct kobj_attribute suspend_mode_attribute =
1164         __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
1165
1166 static struct kobject *suspend_kobj;
1167
1168 static int tegra_pm_enter_suspend(void)
1169 {
1170         pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1171         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1172                 tegra_lp0_cpu_mode(true);
1173         return 0;
1174 }
1175
1176 static void tegra_pm_enter_resume(void)
1177 {
1178         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1179                 tegra_lp0_cpu_mode(false);
1180         pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1181 }
1182
1183 static struct syscore_ops tegra_pm_enter_syscore_ops = {
1184         .suspend = tegra_pm_enter_suspend,
1185         .resume = tegra_pm_enter_resume,
1186 };
1187
1188 static __init int tegra_pm_enter_syscore_init(void)
1189 {
1190         register_syscore_ops(&tegra_pm_enter_syscore_ops);
1191         return 0;
1192 }
1193 subsys_initcall(tegra_pm_enter_syscore_init);
1194 #endif
1195
1196 void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1197 {
1198         u32 reg;
1199         u32 mode;
1200
1201 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
1202         tegra_dfll = clk_get_sys(NULL, "dfll_cpu");
1203         BUG_ON(IS_ERR(tegra_dfll));
1204 #endif
1205         tegra_pclk = clk_get_sys(NULL, "pclk");
1206         BUG_ON(IS_ERR(tegra_pclk));
1207         pdata = plat;
1208         (void)reg;
1209         (void)mode;
1210
1211         if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1212                 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1213
1214 #ifndef CONFIG_PM_SLEEP
1215         if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1216                 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1217                            "disabling suspend\n", __func__);
1218                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1219         }
1220 #else
1221         if (create_suspend_pgtable() < 0) {
1222                 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1223                                 __func__);
1224                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1225                 goto fail;
1226         }
1227
1228         if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1229             (tegra_revision == TEGRA_REVISION_A01) &&
1230             (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1231                 /* Tegra 3 A01 supports only LP1 */
1232                 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1233                            "-- disabling LP0\n", __func__);
1234                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1235         }
1236         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1237                 tegra_lp0_vec_relocate) {
1238                 unsigned char *reloc_lp0;
1239                 unsigned long tmp;
1240                 void __iomem *orig;
1241                 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1242                                         GFP_KERNEL);
1243                 WARN_ON(!reloc_lp0);
1244                 if (!reloc_lp0) {
1245                         pr_err("%s: Failed to allocate reloc_lp0\n",
1246                                 __func__);
1247                         goto out;
1248                 }
1249
1250                 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1251                 WARN_ON(!orig);
1252                 if (!orig) {
1253                         pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1254                                 __func__, tegra_lp0_vec_start);
1255                         kfree(reloc_lp0);
1256                         goto out;
1257                 }
1258
1259                 tmp = (unsigned long) reloc_lp0;
1260                 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1261                 reloc_lp0 = (unsigned char *)tmp;
1262                 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1263                 iounmap(orig);
1264                 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1265         }
1266
1267 out:
1268         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1269                 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1270                            "provided by bootlader -- disabling LP0\n",
1271                            __func__);
1272                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1273         }
1274
1275         iram_save_size = tegra_iram_end() - tegra_iram_start();
1276
1277         iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1278         if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1279                 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1280                        "-- LP0/LP1 unavailable\n", __func__);
1281                 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1282         }
1283
1284 #ifdef CONFIG_TEGRA_LP1_950
1285         if (pdata->lp1_lowvolt_support) {
1286                 u32 lp1_core_lowvolt, lp1_core_highvolt;
1287                 memcpy(tegra_lp1_register_pmuslave_addr(), &pdata->pmuslave_addr, 4);
1288                 memcpy(tegra_lp1_register_i2c_base_addr(), &pdata->i2c_base_addr, 4);
1289
1290                 lp1_core_lowvolt = 0;
1291                 lp1_core_lowvolt = (pdata->lp1_core_volt_low << 8) | pdata->core_reg_addr;
1292                 memcpy(tegra_lp1_register_core_lowvolt(), &lp1_core_lowvolt, 4);
1293
1294                 lp1_core_highvolt = 0;
1295                 lp1_core_highvolt = (pdata->lp1_core_volt_high << 8) | pdata->core_reg_addr;
1296                 memcpy(tegra_lp1_register_core_highvolt(), &lp1_core_highvolt, 4);
1297         }
1298 #endif
1299         /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1300         /* Initialize scratch registers used for CPU LP2 synchronization */
1301         writel(0, pmc + PMC_SCRATCH37);
1302         writel(0, pmc + PMC_SCRATCH38);
1303 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1304         writel(0, pmc + PMC_SCRATCH39);
1305 #endif
1306         writel(0, pmc + PMC_SCRATCH41);
1307
1308         /* Always enable CPU power request; just normal polarity is supported */
1309         reg = readl(pmc + PMC_CTRL);
1310         BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1311         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1312         pmc_32kwritel(reg, PMC_CTRL);
1313
1314         /* Configure core power request and system clock control if LP0
1315            is supported */
1316         __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1317         __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1318
1319         reg = readl(pmc + PMC_CTRL);
1320
1321         if (!pdata->sysclkreq_high)
1322                 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1323         else
1324                 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1325
1326         if (!pdata->corereq_high)
1327                 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1328         else
1329                 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1330
1331         /* configure output inverters while the request is tristated */
1332         pmc_32kwritel(reg, PMC_CTRL);
1333
1334         /* now enable requests */
1335         reg |= TEGRA_POWER_SYSCLK_OE;
1336         if (!pdata->combined_req)
1337                 reg |= TEGRA_POWER_PWRREQ_OE;
1338         pmc_32kwritel(reg, PMC_CTRL);
1339
1340         if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1341                 tegra_lp0_suspend_init();
1342
1343         suspend_set_ops(&tegra_suspend_ops);
1344
1345         /* Create /sys/power/suspend/type */
1346         suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1347         if (suspend_kobj) {
1348                 if (sysfs_create_file(suspend_kobj, \
1349                                                 &suspend_mode_attribute.attr))
1350                         pr_err("%s: sysfs_create_file suspend type failed!\n",
1351                                                                 __func__);
1352         }
1353
1354         iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1355         iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1356
1357         /* clear io dpd settings before kernel */
1358         tegra_bl_io_dpd_cleanup();
1359
1360 fail:
1361 #endif
1362         if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1363                 tegra_pd_in_idle(false);
1364
1365         current_suspend_mode = plat->suspend_mode;
1366 }
1367
1368 unsigned long debug_uart_port_base = 0;
1369 EXPORT_SYMBOL(debug_uart_port_base);
1370
1371 static int tegra_debug_uart_suspend(void)
1372 {
1373         void __iomem *uart;
1374         u32 lcr;
1375
1376         if (!debug_uart_port_base)
1377                 return 0;
1378
1379         uart = IO_ADDRESS(debug_uart_port_base);
1380
1381         lcr = readb(uart + UART_LCR * 4);
1382
1383         tegra_sctx.uart[0] = lcr;
1384         tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1385
1386         /* DLAB = 0 */
1387         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1388
1389         tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1390
1391         /* DLAB = 1 */
1392         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1393
1394         tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1395         tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1396
1397         writeb(lcr, uart + UART_LCR * 4);
1398
1399         return 0;
1400 }
1401
1402 static void tegra_debug_uart_resume(void)
1403 {
1404         void __iomem *uart;
1405         u32 lcr;
1406
1407         if (!debug_uart_port_base)
1408                 return;
1409
1410         uart = IO_ADDRESS(debug_uart_port_base);
1411
1412         lcr = tegra_sctx.uart[0];
1413
1414         writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1415
1416         /* DLAB = 0 */
1417         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1418
1419         writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1420                         uart + UART_FCR * 4);
1421
1422         writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1423
1424         /* DLAB = 1 */
1425         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1426
1427         writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1428         writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1429
1430         writeb(lcr, uart + UART_LCR * 4);
1431 }
1432
1433 static struct syscore_ops tegra_debug_uart_syscore_ops = {
1434         .suspend = tegra_debug_uart_suspend,
1435         .resume = tegra_debug_uart_resume,
1436 };
1437
1438 struct clk *debug_uart_clk = NULL;
1439 EXPORT_SYMBOL(debug_uart_clk);
1440
1441 void tegra_console_uart_suspend(void)
1442 {
1443         if (console_suspend_enabled && debug_uart_clk)
1444                 tegra_clk_disable_unprepare(debug_uart_clk);
1445 }
1446
1447 void tegra_console_uart_resume(void)
1448 {
1449         if (console_suspend_enabled && debug_uart_clk)
1450                 tegra_clk_prepare_enable(debug_uart_clk);
1451 }
1452
1453 static int tegra_debug_uart_syscore_init(void)
1454 {
1455         register_syscore_ops(&tegra_debug_uart_syscore_ops);
1456         return 0;
1457 }
1458 arch_initcall(tegra_debug_uart_syscore_init);