2 * arch/arm/mach-tegra/pm.c
4 * CPU complex suspend & resume functions for Tegra SoCs
6 * Copyright (c) 2009-2011, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/kernel.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/clk.h>
32 #include <linux/err.h>
33 #include <linux/debugfs.h>
34 #include <linux/delay.h>
35 #include <linux/suspend.h>
36 #include <linux/earlysuspend.h>
37 #include <linux/slab.h>
38 #include <linux/serial_reg.h>
39 #include <linux/seq_file.h>
40 #include <linux/uaccess.h>
41 #include <linux/syscore_ops.h>
42 #include <linux/cpu_pm.h>
43 #include <linux/vmalloc.h>
44 #include <linux/memblock.h>
45 #include <linux/console.h>
46 #include <linux/pm_qos_params.h>
48 #include <asm/cacheflush.h>
49 #include <asm/hardware/cache-l2x0.h>
50 #include <asm/hardware/gic.h>
51 #include <asm/idmap.h>
52 #include <asm/localtimer.h>
53 #include <asm/pgalloc.h>
54 #include <asm/pgtable.h>
55 #include <asm/tlbflush.h>
58 #include <mach/iomap.h>
59 #include <mach/irqs.h>
60 #include <mach/powergate.h>
73 #include "cpu-tegra.h"
75 struct suspend_context {
77 * The next 7 values are referenced by offset in __restart_plls
78 * in headsmp-t2.S, and should not be moved
95 struct tegra_twd_context twd;
98 #ifdef CONFIG_PM_SLEEP
99 #if USE_TEGRA_CPU_SUSPEND
100 void *tegra_cpu_context; /* non-cacheable page for CPU context */
102 phys_addr_t tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
103 static pgd_t *tegra_pgd;
104 static DEFINE_SPINLOCK(tegra_lp2_lock);
105 static cpumask_t tegra_in_lp2;
106 static cpumask_t *iram_cpu_lp2_mask;
107 static unsigned long *iram_cpu_lp1_mask;
108 static u8 *iram_save;
109 static unsigned long iram_save_size;
110 static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
111 static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
112 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
113 static int tegra_last_pclk;
116 struct suspend_context tegra_sctx;
118 #define TEGRA_POWER_PWRREQ_POLARITY (1 << 8) /* core power request polarity */
119 #define TEGRA_POWER_PWRREQ_OE (1 << 9) /* core power request enable */
120 #define TEGRA_POWER_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
121 #define TEGRA_POWER_SYSCLK_OE (1 << 11) /* system clock enable */
122 #define TEGRA_POWER_PWRGATE_DIS (1 << 12) /* power gate disabled */
123 #define TEGRA_POWER_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
124 #define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15) /* CPU power request polarity */
125 #define TEGRA_POWER_CPU_PWRREQ_OE (1 << 16) /* CPU power request enable */
128 #define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
129 #define PMC_WAKE_MASK 0xc
130 #define PMC_WAKE_LEVEL 0x10
131 #define PMC_DPAD_ORIDE 0x1C
132 #define PMC_WAKE_DELAY 0xe0
133 #define PMC_DPD_SAMPLE 0x20
135 #define PMC_WAKE_STATUS 0x14
136 #define PMC_SW_WAKE_STATUS 0x18
137 #define PMC_COREPWRGOOD_TIMER 0x3c
138 #define PMC_CPUPWRGOOD_TIMER 0xc8
139 #define PMC_CPUPWROFF_TIMER 0xcc
140 #define PMC_COREPWROFF_TIMER PMC_WAKE_DELAY
142 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
143 #define PMC_SCRATCH4_WAKE_CLUSTER_MASK (1<<31)
146 #define CLK_RESET_CCLK_BURST 0x20
147 #define CLK_RESET_CCLK_DIVIDER 0x24
148 #define CLK_RESET_PLLC_BASE 0x80
149 #define CLK_RESET_PLLM_BASE 0x90
150 #define CLK_RESET_PLLX_BASE 0xe0
151 #define CLK_RESET_PLLX_MISC 0xe4
152 #define CLK_RESET_PLLP_BASE 0xa0
153 #define CLK_RESET_PLLP_OUTA 0xa4
154 #define CLK_RESET_PLLP_OUTB 0xa8
155 #define CLK_RESET_PLLP_MISC 0xac
157 #define CLK_RESET_SOURCE_CSITE 0x1d4
159 #define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
160 #define CLK_RESET_CCLK_RUN_POLICY_SHIFT 4
161 #define CLK_RESET_CCLK_IDLE_POLICY_SHIFT 0
162 #define CLK_RESET_CCLK_IDLE_POLICY 1
163 #define CLK_RESET_CCLK_RUN_POLICY 2
164 #define CLK_RESET_CCLK_BURST_POLICY_PLLM 3
165 #define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
167 #define EMC_MRW_0 0x0e8
168 #define EMC_MRW_DEV_SELECTN 30
169 #define EMC_MRW_DEV_NONE (3 << EMC_MRW_DEV_SELECTN)
171 #define MC_SECURITY_START 0x6c
172 #define MC_SECURITY_SIZE 0x70
173 #define MC_SECURITY_CFG2 0x7c
175 #define AWAKE_CPU_FREQ_MIN 100000
176 static struct pm_qos_request_list awake_cpu_freq_req;
178 struct dvfs_rail *tegra_cpu_rail;
179 static struct dvfs_rail *tegra_core_rail;
180 static struct clk *tegra_pclk;
181 static const struct tegra_suspend_platform_data *pdata;
182 static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
184 #if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
185 enum tegra_cluster_switch_time_id {
186 tegra_cluster_switch_time_id_start = 0,
187 tegra_cluster_switch_time_id_prolog,
188 tegra_cluster_switch_time_id_switch,
189 tegra_cluster_switch_time_id_epilog,
190 tegra_cluster_switch_time_id_max
194 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
195 #define tegra_cluster_switch_time(flags, id) \
198 if (flags & TEGRA_POWER_CLUSTER_MASK) { \
199 void __iomem *timer_us = \
200 IO_ADDRESS(TEGRA_TMRUS_BASE); \
201 if (id < tegra_cluster_switch_time_id_max) \
202 tegra_cluster_switch_times[id] = \
209 #define tegra_cluster_switch_time(flags, id) do {} while(0)
212 #ifdef CONFIG_PM_SLEEP
213 static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
214 [TEGRA_SUSPEND_NONE] = "none",
215 [TEGRA_SUSPEND_LP2] = "lp2",
216 [TEGRA_SUSPEND_LP1] = "lp1",
217 [TEGRA_SUSPEND_LP0] = "lp0",
220 unsigned long tegra_cpu_power_good_time(void)
222 if (WARN_ON_ONCE(!pdata))
225 return pdata->cpu_timer;
228 unsigned long tegra_cpu_power_off_time(void)
230 if (WARN_ON_ONCE(!pdata))
233 return pdata->cpu_off_timer;
236 unsigned long tegra_cpu_lp2_min_residency(void)
238 if (WARN_ON_ONCE(!pdata))
241 return pdata->cpu_lp2_min_residency;
245 * create_suspend_pgtable
247 * Creates a page table with identity mappings of physical memory and IRAM
248 * for use when the MMU is off, in addition to all the regular kernel mappings.
250 static __init int create_suspend_pgtable(void)
252 tegra_pgd = pgd_alloc(&init_mm);
256 /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
257 identity_mapping_add(tegra_pgd, PHYS_OFFSET,
258 PHYS_OFFSET + (unsigned long)high_memory - PAGE_OFFSET);
259 identity_mapping_add(tegra_pgd, IO_IRAM_PHYS,
260 IO_IRAM_PHYS + SECTION_SIZE);
262 /* inner/outer write-back/write-allocate, sharable */
263 tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
269 * alloc_suspend_context
271 * Allocate a non-cacheable page to hold the CPU contexts.
272 * The standard ARM CPU context save functions don't work if there's
273 * an external L2 cache controller (like a PL310) in system.
275 static __init int alloc_suspend_context(void)
277 #if USE_TEGRA_CPU_SUSPEND
278 pgprot_t prot = __pgprot_modify(pgprot_kernel, L_PTE_MT_MASK,
279 L_PTE_MT_BUFFERABLE | L_PTE_XN);
280 struct page *ctx_page;
281 unsigned long ctx_virt = 0;
286 ctx_page = alloc_pages(GFP_KERNEL, 0);
287 if (IS_ERR_OR_NULL(ctx_page))
290 tegra_cpu_context = vm_map_ram(&ctx_page, 1, -1, prot);
291 if (IS_ERR_OR_NULL(tegra_cpu_context))
294 /* Add the context page to our private pgd. */
295 ctx_virt = (unsigned long)tegra_cpu_context;
297 pgd = tegra_pgd + pgd_index(ctx_virt);
298 if (!pgd_present(*pgd))
300 pmd = pmd_offset(pgd, ctx_virt);
303 pte = pte_alloc_kernel(pmd, ctx_virt);
307 set_pte_ext(pte, mk_pte(ctx_page, prot), 0);
309 outer_clean_range(__pa(pmd), __pa(pmd + 1));
315 __free_page(ctx_page);
317 vm_unmap_ram((void*)ctx_virt, 1);
318 tegra_cpu_context = NULL;
325 /* ensures that sufficient time is passed for a register write to
326 * serialize into the 32KHz domain */
327 static void pmc_32kwritel(u32 val, unsigned long offs)
329 writel(val, pmc + offs);
333 static void set_power_timers(unsigned long us_on, unsigned long us_off,
336 static unsigned long last_us_off = 0;
337 unsigned long long ticks;
338 unsigned long long pclk;
340 if (WARN_ON_ONCE(rate <= 0))
345 if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
346 ticks = (us_on * pclk) + 999999ull;
347 do_div(ticks, 1000000);
348 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
350 ticks = (us_off * pclk) + 999999ull;
351 do_div(ticks, 1000000);
352 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
355 tegra_last_pclk = pclk;
356 last_us_off = us_off;
360 * restore_cpu_complex
362 * restores cpu clock setting, clears flow controller
364 * Always called on CPU 0.
366 static void restore_cpu_complex(u32 mode)
368 int cpu = smp_processor_id();
369 unsigned int reg, policy;
373 /* restore original PLL settings */
374 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
375 writel(tegra_sctx.pllp_misc, clk_rst + CLK_RESET_PLLP_MISC);
376 writel(tegra_sctx.pllp_base, clk_rst + CLK_RESET_PLLP_BASE);
377 writel(tegra_sctx.pllp_outa, clk_rst + CLK_RESET_PLLP_OUTA);
378 writel(tegra_sctx.pllp_outb, clk_rst + CLK_RESET_PLLP_OUTB);
381 /* Is CPU complex already running on PLLX? */
382 reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
383 policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
385 if (policy == CLK_RESET_CCLK_IDLE_POLICY)
386 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
387 else if (policy == CLK_RESET_CCLK_RUN_POLICY)
388 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
392 if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
393 /* restore PLLX settings if CPU is on different PLL */
394 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
395 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
397 /* wait for PLL stabilization if PLLX was enabled */
398 if (tegra_sctx.pllx_base & (1<<30)) {
399 #if USE_PLL_LOCK_BITS
400 /* Enable lock detector */
401 reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
403 writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
404 while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
408 udelay(PLL_POST_LOCK_DELAY);
415 /* Restore original burst policy setting for calls resulting from CPU
416 LP2 in idle or system suspend; keep cluster switch prolog setting
418 if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
419 writel(tegra_sctx.cclk_divider, clk_rst +
420 CLK_RESET_CCLK_DIVIDER);
421 writel(tegra_sctx.cpu_burst, clk_rst +
422 CLK_RESET_CCLK_BURST);
425 writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
427 /* Do not power-gate CPU 0 when flow controlled */
428 reg = readl(FLOW_CTRL_CPU_CSR(cpu));
429 reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
430 reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
431 reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
432 reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
433 reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
434 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
436 /* If an immedidate cluster switch is being perfomed, restore the
437 local timer registers. For calls resulting from CPU LP2 in
438 idle or system suspend, the local timer was shut down and
439 timekeeping switched over to the global system timer. In this
440 case keep local timer disabled, and restore only periodic load. */
441 if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
442 TEGRA_POWER_CLUSTER_IMMEDIATE)))
443 tegra_sctx.twd.twd_ctrl = 0;
444 tegra_twd_resume(&tegra_sctx.twd);
448 * suspend_cpu_complex
450 * saves pll state for use by restart_plls, prepares flow controller for
451 * transition to suspend state
453 * Must always be called on cpu 0.
455 static void suspend_cpu_complex(u32 mode)
457 int cpu = smp_processor_id();
463 /* switch coresite to clk_m, save off original source */
464 tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
465 writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
467 tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
468 tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
469 tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
470 tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
471 tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
472 tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
473 tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
474 tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
476 tegra_twd_suspend(&tegra_sctx.twd);
478 reg = readl(FLOW_CTRL_CPU_CSR(cpu));
479 reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
480 reg &= ~FLOW_CTRL_CSR_WFI_BITMAP; /* clear wfi bitmap */
481 reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
482 reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
483 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
484 reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu; /* enable power gating on wfe */
486 reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu; /* enable power gating on wfi */
488 reg |= FLOW_CTRL_CSR_ENABLE; /* enable power gating */
489 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
491 for (i = 0; i < num_possible_cpus(); i++) {
494 reg = readl(FLOW_CTRL_CPU_CSR(i));
495 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
496 reg |= FLOW_CTRL_CSR_INTR_FLAG;
497 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
500 tegra_gic_cpu_disable();
501 #ifndef CONFIG_ARCH_TEGRA_2x_SOC
502 /* Tegra3 enters LPx states via WFI - do not propagate legacy IRQs
503 to CPU core to avoid fall through WFI (IRQ-to-flow controller wake
504 path is not affected). */
505 tegra_gic_pass_through_disable();
509 void tegra_clear_cpu_in_lp2(int cpu)
511 spin_lock(&tegra_lp2_lock);
512 BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
513 cpumask_clear_cpu(cpu, &tegra_in_lp2);
515 /* Update the IRAM copy used by the reset handler. The IRAM copy
516 can't use used directly by cpumask_clear_cpu() because it uses
517 LDREX/STREX which requires the addressed location to be inner
518 cacheable and sharable which IRAM isn't. */
519 writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
522 spin_unlock(&tegra_lp2_lock);
525 bool tegra_set_cpu_in_lp2(int cpu)
527 bool last_cpu = false;
529 spin_lock(&tegra_lp2_lock);
530 BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
531 cpumask_set_cpu(cpu, &tegra_in_lp2);
533 /* Update the IRAM copy used by the reset handler. The IRAM copy
534 can't use used directly by cpumask_set_cpu() because it uses
535 LDREX/STREX which requires the addressed location to be inner
536 cacheable and sharable which IRAM isn't. */
537 writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
540 if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
542 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
544 tegra2_cpu_set_resettable_soon();
547 spin_unlock(&tegra_lp2_lock);
551 unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
553 u32 mode; /* hardware + software power mode flags */
557 /* Only the last cpu down does the final suspend steps */
558 mode = readl(pmc + PMC_CTRL);
559 mode |= TEGRA_POWER_CPU_PWRREQ_OE;
560 if (pdata->combined_req)
561 mode &= ~TEGRA_POWER_PWRREQ_OE;
563 mode |= TEGRA_POWER_PWRREQ_OE;
564 mode &= ~TEGRA_POWER_EFFECT_LP0;
565 pmc_32kwritel(mode, PMC_CTRL);
568 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
571 * We can use clk_get_rate_all_locked() here, because all other cpus
572 * are in LP2 state and irqs are disabled
574 if (flags & TEGRA_POWER_CLUSTER_MASK) {
575 set_power_timers(pdata->cpu_timer, 0,
576 clk_get_rate_all_locked(tegra_pclk));
577 tegra_cluster_switch_prolog(mode);
579 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
580 clk_get_rate_all_locked(tegra_pclk));
584 tegra_lp2_set_trigger(sleep_time);
586 cpu_cluster_pm_enter();
587 suspend_cpu_complex(mode);
588 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
591 * No need to flush complete L2. Cleaning kernel and IO mappings
592 * is enough for the LP code sequence that has L2 disabled but
596 outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
597 __pa(pgd + PTRS_PER_PGD));
600 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
602 #ifdef CONFIG_CACHE_L2X0
603 tegra_init_cache(false);
606 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
607 restore_cpu_complex(mode);
608 cpu_cluster_pm_exit();
610 remain = tegra_lp2_timer_remain();
612 tegra_lp2_set_trigger(0);
614 if (flags & TEGRA_POWER_CLUSTER_MASK)
615 tegra_cluster_switch_epilog(mode);
617 tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
619 #if INSTRUMENT_CLUSTER_SWITCH
620 if (flags & TEGRA_POWER_CLUSTER_MASK) {
621 pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
622 is_lp_cluster() ? "G=>LP" : "LP=>G",
623 tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
624 tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
625 tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
626 tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
627 tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
628 tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
629 tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
630 tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
636 static int tegra_common_suspend(void)
638 void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
640 tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
641 tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
642 tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
644 /* copy the reset vector and SDRAM shutdown code into IRAM */
645 memcpy(iram_save, iram_code, iram_save_size);
646 memcpy(iram_code, tegra_iram_start(), iram_save_size);
651 static void tegra_common_resume(void)
653 void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
654 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
655 void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
658 /* Clear DPD sample */
659 writel(0x0, pmc + PMC_DPD_SAMPLE);
661 writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
662 writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
663 writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
664 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
665 /* trigger emc mode write */
666 writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
668 /* clear scratch registers shared by suspend and the reset pen */
669 writel(0x0, pmc + PMC_SCRATCH39);
670 writel(0x0, pmc + PMC_SCRATCH41);
673 memcpy(iram_code, iram_save, iram_save_size);
676 static int tegra_suspend_prepare_late(void)
678 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
679 disable_irq(INT_SYS_STATS_MON);
684 static void tegra_suspend_wake(void)
686 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
687 enable_irq(INT_SYS_STATS_MON);
691 static void tegra_pm_set(enum tegra_suspend_mode mode)
694 unsigned long rate = 32768;
696 reg = readl(pmc + PMC_CTRL);
697 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
698 if (pdata->combined_req)
699 reg &= ~TEGRA_POWER_PWRREQ_OE;
701 reg |= TEGRA_POWER_PWRREQ_OE;
702 reg &= ~TEGRA_POWER_EFFECT_LP0;
705 case TEGRA_SUSPEND_LP0:
706 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
707 rate = clk_get_rate_all_locked(tegra_pclk);
709 if (pdata->combined_req) {
710 reg |= TEGRA_POWER_PWRREQ_OE;
711 reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
715 * LP0 boots through the AVP, which then resumes the AVP to
716 * the address in scratch 39, and the cpu to the address in
717 * scratch 41 to tegra_resume
719 writel(0x0, pmc + PMC_SCRATCH39);
721 /* Enable DPD sample to trigger sampling pads data and direction
722 * in which pad will be driven during lp0 mode*/
723 writel(0x1, pmc + PMC_DPD_SAMPLE);
725 /* Set warmboot flag */
726 boot_flag = readl(pmc + PMC_SCRATCH0);
727 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
729 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
731 reg |= TEGRA_POWER_EFFECT_LP0;
732 /* No break here. LP0 code falls through to write SCRATCH41 */
733 case TEGRA_SUSPEND_LP1:
734 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
737 case TEGRA_SUSPEND_LP2:
738 rate = clk_get_rate(tegra_pclk);
740 case TEGRA_SUSPEND_NONE:
746 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
748 pmc_32kwritel(reg, PMC_CTRL);
751 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
752 [TEGRA_SUSPEND_NONE] = "none",
753 [TEGRA_SUSPEND_LP2] = "LP2",
754 [TEGRA_SUSPEND_LP1] = "LP1",
755 [TEGRA_SUSPEND_LP0] = "LP0",
758 static int tegra_suspend_enter(suspend_state_t state)
762 struct timespec ts_entry, ts_exit;
764 if (pdata && pdata->board_suspend)
765 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
767 read_persistent_clock(&ts_entry);
769 ret = tegra_suspend_dram(current_suspend_mode, 0);
771 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
775 read_persistent_clock(&ts_exit);
777 if (timespec_compare(&ts_exit, &ts_entry) > 0) {
778 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
780 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
781 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
782 tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
784 tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
788 if (pdata && pdata->board_resume)
789 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
794 static void tegra_suspend_check_pwr_stats(void)
796 /* cpus and l2 are powered off later */
797 unsigned long pwrgate_partid_mask =
798 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
799 (1 << TEGRA_POWERGATE_HEG) |
800 (1 << TEGRA_POWERGATE_SATA) |
801 (1 << TEGRA_POWERGATE_3D1) |
803 (1 << TEGRA_POWERGATE_3D) |
804 (1 << TEGRA_POWERGATE_VENC) |
805 (1 << TEGRA_POWERGATE_PCIE) |
806 (1 << TEGRA_POWERGATE_VDEC) |
807 (1 << TEGRA_POWERGATE_MPE);
811 for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
812 if ((1 << partid) & pwrgate_partid_mask)
813 if (tegra_powergate_is_powered(partid))
814 pr_warning("partition %s is left on before suspend\n",
815 tegra_powergate_get_name(partid));
820 int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
824 if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
825 mode >= TEGRA_MAX_SUSPEND_MODE)) {
830 if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
831 pr_info("LP0 not used due to unsupported wakeup events\n");
832 mode = TEGRA_SUSPEND_LP1;
835 if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
836 tegra_suspend_check_pwr_stats();
838 tegra_common_suspend();
842 if (pdata && pdata->board_suspend)
843 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
848 cpu_cluster_pm_enter();
850 if (mode == TEGRA_SUSPEND_LP0) {
851 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
852 u32 reg = readl(pmc + PMC_SCRATCH4);
854 reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
856 reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
857 pmc_32kwritel(reg, PMC_SCRATCH4);
859 tegra_lp0_suspend_mc();
860 tegra_cpu_reset_handler_save();
863 else if (mode == TEGRA_SUSPEND_LP1)
864 *iram_cpu_lp1_mask = 1;
866 suspend_cpu_complex(flags);
872 if (mode == TEGRA_SUSPEND_LP2)
873 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
875 tegra_sleep_core(PHYS_OFFSET - PAGE_OFFSET);
877 tegra_init_cache(true);
879 if (mode == TEGRA_SUSPEND_LP0) {
880 tegra_cpu_reset_handler_restore();
881 tegra_lp0_resume_mc();
882 } else if (mode == TEGRA_SUSPEND_LP1)
883 *iram_cpu_lp1_mask = 0;
885 restore_cpu_complex(flags);
887 /* for platforms where the core & CPU power requests are
888 * combined as a single request to the PMU, transition out
889 * of LP0 state by temporarily enabling both requests
891 if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
893 reg = readl(pmc + PMC_CTRL);
894 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
895 pmc_32kwritel(reg, PMC_CTRL);
896 reg &= ~TEGRA_POWER_PWRREQ_OE;
897 pmc_32kwritel(reg, PMC_CTRL);
900 cpu_cluster_pm_exit();
903 if (pdata && pdata->board_resume)
904 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
908 tegra_common_resume();
915 * Function pointers to optional board specific function
917 void (*tegra_deep_sleep)(int);
918 EXPORT_SYMBOL(tegra_deep_sleep);
920 static int tegra_suspend_prepare(void)
922 if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
927 static void tegra_suspend_finish(void)
929 if (pdata && pdata->cpu_resume_boost) {
930 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
931 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
932 pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
935 if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
939 static const struct platform_suspend_ops tegra_suspend_ops = {
940 .valid = suspend_valid_only_mem,
941 .prepare = tegra_suspend_prepare,
942 .finish = tegra_suspend_finish,
943 .prepare_late = tegra_suspend_prepare_late,
944 .wake = tegra_suspend_wake,
945 .enter = tegra_suspend_enter,
948 static ssize_t suspend_mode_show(struct kobject *kobj,
949 struct kobj_attribute *attr, char *buf)
952 char *end = buf + PAGE_SIZE;
954 start += scnprintf(start, end - start, "%s ", \
955 tegra_suspend_name[current_suspend_mode]);
956 start += scnprintf(start, end - start, "\n");
961 static ssize_t suspend_mode_store(struct kobject *kobj,
962 struct kobj_attribute *attr,
963 const char *buf, size_t n)
966 const char *name_ptr;
967 enum tegra_suspend_mode new_mode;
970 while (*name_ptr && !isspace(*name_ptr))
972 len = name_ptr - buf;
975 /* TEGRA_SUSPEND_NONE not allowed as suspend state */
976 if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
977 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
978 pr_info("Illegal tegra suspend state: %s\n", buf);
982 for (new_mode = TEGRA_SUSPEND_NONE; \
983 new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
984 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
985 current_suspend_mode = new_mode;
994 static struct kobj_attribute suspend_mode_attribute =
995 __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
997 static struct kobject *suspend_kobj;
999 static int tegra_pm_enter_suspend(void)
1001 pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1002 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1003 tegra_lp0_cpu_mode(true);
1007 static void tegra_pm_enter_resume(void)
1009 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1010 tegra_lp0_cpu_mode(false);
1011 pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1014 static struct syscore_ops tegra_pm_enter_syscore_ops = {
1015 .suspend = tegra_pm_enter_suspend,
1016 .resume = tegra_pm_enter_resume,
1019 static __init int tegra_pm_enter_syscore_init(void)
1021 register_syscore_ops(&tegra_pm_enter_syscore_ops);
1024 subsys_initcall(tegra_pm_enter_syscore_init);
1027 void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1032 tegra_cpu_rail = tegra_dvfs_get_rail_by_name("vdd_cpu");
1033 tegra_core_rail = tegra_dvfs_get_rail_by_name("vdd_core");
1034 pm_qos_add_request(&awake_cpu_freq_req, PM_QOS_CPU_FREQ_MIN,
1035 AWAKE_CPU_FREQ_MIN);
1037 tegra_pclk = clk_get_sys(NULL, "pclk");
1038 BUG_ON(IS_ERR(tegra_pclk));
1043 if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1044 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1046 #ifndef CONFIG_PM_SLEEP
1047 if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1048 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1049 "disabling suspend\n", __func__);
1050 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1053 if (create_suspend_pgtable() < 0) {
1054 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1056 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1060 if (alloc_suspend_context() < 0) {
1061 pr_err("%s: CPU context alloc failed -- LP0/LP1/LP2 unavailable\n",
1063 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1067 if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1068 (tegra_get_revision() == TEGRA_REVISION_A01) &&
1069 (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1070 /* Tegra 3 A01 supports only LP1 */
1071 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1072 "-- disabling LP0\n", __func__);
1073 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1075 if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1076 tegra_lp0_vec_relocate) {
1077 unsigned char *reloc_lp0;
1080 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1082 WARN_ON(!reloc_lp0);
1084 pr_err("%s: Failed to allocate reloc_lp0\n",
1089 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1092 pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1093 __func__, tegra_lp0_vec_start);
1098 tmp = (unsigned long) reloc_lp0;
1099 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1100 reloc_lp0 = (unsigned char *)tmp;
1101 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1103 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1107 if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1108 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1109 "provided by bootlader -- disabling LP0\n",
1111 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1114 iram_save_size = tegra_iram_end() - tegra_iram_start();
1116 iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1117 if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1118 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1119 "-- LP0/LP1 unavailable\n", __func__);
1120 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1123 /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1124 /* Initialize scratch registers used for CPU LP2 synchronization */
1125 writel(0, pmc + PMC_SCRATCH37);
1126 writel(0, pmc + PMC_SCRATCH38);
1127 writel(0, pmc + PMC_SCRATCH39);
1128 writel(0, pmc + PMC_SCRATCH41);
1130 /* Always enable CPU power request; just normal polarity is supported */
1131 reg = readl(pmc + PMC_CTRL);
1132 BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1133 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1134 pmc_32kwritel(reg, PMC_CTRL);
1136 /* Configure core power request and system clock control if LP0
1138 __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1139 __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1141 reg = readl(pmc + PMC_CTRL);
1143 if (!pdata->sysclkreq_high)
1144 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1146 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1148 if (!pdata->corereq_high)
1149 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1151 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1153 /* configure output inverters while the request is tristated */
1154 pmc_32kwritel(reg, PMC_CTRL);
1156 /* now enable requests */
1157 reg |= TEGRA_POWER_SYSCLK_OE;
1158 if (!pdata->combined_req)
1159 reg |= TEGRA_POWER_PWRREQ_OE;
1160 pmc_32kwritel(reg, PMC_CTRL);
1162 if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1163 tegra_lp0_suspend_init();
1165 suspend_set_ops(&tegra_suspend_ops);
1167 /* Create /sys/power/suspend/type */
1168 suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1170 if (sysfs_create_file(suspend_kobj, \
1171 &suspend_mode_attribute.attr))
1172 pr_err("%s: sysfs_create_file suspend type failed!\n",
1176 iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1177 iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1180 if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1181 tegra_lp2_in_idle(false);
1183 current_suspend_mode = plat->suspend_mode;
1186 unsigned long debug_uart_port_base = 0;
1187 EXPORT_SYMBOL(debug_uart_port_base);
1189 static int tegra_debug_uart_suspend(void)
1194 if (!debug_uart_port_base)
1197 uart = IO_ADDRESS(debug_uart_port_base);
1199 lcr = readb(uart + UART_LCR * 4);
1201 tegra_sctx.uart[0] = lcr;
1202 tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1205 writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1207 tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1210 writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1212 tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1213 tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1215 writeb(lcr, uart + UART_LCR * 4);
1220 static void tegra_debug_uart_resume(void)
1225 if (!debug_uart_port_base)
1228 uart = IO_ADDRESS(debug_uart_port_base);
1230 lcr = tegra_sctx.uart[0];
1232 writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1235 writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1237 writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1238 uart + UART_FCR * 4);
1240 writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1243 writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1245 writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1246 writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1248 writeb(lcr, uart + UART_LCR * 4);
1251 static struct syscore_ops tegra_debug_uart_syscore_ops = {
1252 .suspend = tegra_debug_uart_suspend,
1253 .resume = tegra_debug_uart_resume,
1256 struct clk *debug_uart_clk = NULL;
1257 EXPORT_SYMBOL(debug_uart_clk);
1259 void tegra_console_uart_suspend(void)
1261 if (console_suspend_enabled && debug_uart_clk)
1262 clk_disable(debug_uart_clk);
1265 void tegra_console_uart_resume(void)
1267 if (console_suspend_enabled && debug_uart_clk)
1268 clk_enable(debug_uart_clk);
1271 static int tegra_debug_uart_syscore_init(void)
1273 register_syscore_ops(&tegra_debug_uart_syscore_ops);
1276 arch_initcall(tegra_debug_uart_syscore_init);
1278 #ifdef CONFIG_HAS_EARLYSUSPEND
1279 static void pm_early_suspend(struct early_suspend *h)
1281 pm_qos_update_request(&awake_cpu_freq_req, PM_QOS_DEFAULT_VALUE);
1284 static void pm_late_resume(struct early_suspend *h)
1286 pm_qos_update_request(&awake_cpu_freq_req, (s32)AWAKE_CPU_FREQ_MIN);
1289 static struct early_suspend pm_early_suspender = {
1290 .suspend = pm_early_suspend,
1291 .resume = pm_late_resume,
1294 static int pm_init_wake_behavior(void)
1296 register_early_suspend(&pm_early_suspender);
1300 late_initcall(pm_init_wake_behavior);