ARM: tegra: PMC DT support
[linux-3.10.git] / arch / arm / mach-tegra / pm.c
1 /*
2  * arch/arm/mach-tegra/pm.c
3  *
4  * CPU complex suspend & resume functions for Tegra SoCs
5  *
6  * Copyright (c) 2009-2013, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
26 #include <linux/io.h>
27 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/cpumask.h>
32 #include <linux/delay.h>
33 #include <linux/cpu_pm.h>
34 #include <linux/err.h>
35 #include <linux/debugfs.h>
36 #include <linux/delay.h>
37 #include <linux/suspend.h>
38 #include <linux/slab.h>
39 #include <linux/serial_reg.h>
40 #include <linux/seq_file.h>
41 #include <linux/uaccess.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/cpu_pm.h>
44 #include <linux/clk/tegra.h>
45 #include <linux/export.h>
46 #include <linux/vmalloc.h>
47 #include <linux/memblock.h>
48 #include <linux/console.h>
49 #include <linux/tegra_audio.h>
50
51 #include <trace/events/power.h>
52 #include <trace/events/nvsecurity.h>
53
54 #include <asm/cacheflush.h>
55 #include <asm/idmap.h>
56 #include <asm/localtimer.h>
57 #include <asm/pgalloc.h>
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
60 #include <asm/suspend.h>
61 #include <asm/smp_plat.h>
62
63 #include <mach/irqs.h>
64 #include <mach/powergate.h>
65 #include <mach/hardware.h>
66
67 #include "board.h"
68 #include "clock.h"
69 #include "common.h"
70 #include "cpuidle.h"
71 #include "fuse.h"
72 #include "gic.h"
73 #include "iomap.h"
74 #include "pm.h"
75 #include "pm-irq.h"
76 #include "reset.h"
77 #include "pmc.h"
78 #include "sleep.h"
79 #include "timer.h"
80 #include "dvfs.h"
81 #include "cpu-tegra.h"
82 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
83 #include "tegra14_scratch.h"
84 #endif
85
86 #define CREATE_TRACE_POINTS
87 #include <trace/events/nvpower.h>
88
89 struct suspend_context {
90         /*
91          * The next 7 values are referenced by offset in __restart_plls
92          * in headsmp-t2.S, and should not be moved
93          */
94         u32 pllx_misc;
95         u32 pllx_base;
96         u32 pllp_misc;
97         u32 pllp_base;
98         u32 pllp_outa;
99         u32 pllp_outb;
100         u32 pll_timeout;
101
102         u32 cpu_burst;
103         u32 clk_csite_src;
104         u32 cclk_divider;
105
106         u32 mc[3];
107         u8 uart[5];
108
109         struct tegra_twd_context twd;
110 };
111 #define PMC_CTRL                0x0
112
113 #ifdef CONFIG_PM_SLEEP
114 phys_addr_t tegra_pgd_phys;     /* pgd used by hotplug & LP2 bootup */
115 static pgd_t *tegra_pgd;
116 static DEFINE_SPINLOCK(tegra_lp2_lock);
117 static cpumask_t tegra_in_lp2;
118 static cpumask_t *iram_cpu_lp2_mask;
119 static unsigned long *iram_cpu_lp1_mask;
120 static unsigned long *iram_mc_clk_mask;
121 static u8 *iram_save;
122 static unsigned long iram_save_size;
123 static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
124 static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
125 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
126 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
127 static void __iomem *tert_ictlr = IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE);
128 #endif
129 static void __iomem *tmrus_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
130 static int tegra_last_pclk;
131 static u64 resume_time;
132 static u64 resume_entry_time;
133 static u64 suspend_time;
134 static u64 suspend_entry_time;
135 #endif
136
137 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
138 static void update_pmc_registers(unsigned long rate);
139 #endif
140
141 struct suspend_context tegra_sctx;
142 #if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
143 extern struct device *get_se_device(void);
144 extern int se_suspend(struct device *dev, bool pooling);
145 extern struct device *get_smmu_device(void);
146 extern int tegra_smmu_resume(struct device *dev);
147 extern int tegra_smmu_suspend(struct device *dev);
148 #endif
149
150 #define TEGRA_POWER_PWRREQ_POLARITY     (1 << 8)   /* core power request polarity */
151 #define TEGRA_POWER_PWRREQ_OE           (1 << 9)   /* core power request enable */
152 #define TEGRA_POWER_SYSCLK_POLARITY     (1 << 10)  /* sys clk polarity */
153 #define TEGRA_POWER_SYSCLK_OE           (1 << 11)  /* system clock enable */
154 #define TEGRA_POWER_PWRGATE_DIS         (1 << 12)  /* power gate disabled */
155 #define TEGRA_POWER_EFFECT_LP0          (1 << 14)  /* enter LP0 when CPU pwr gated */
156 #define TEGRA_POWER_CPU_PWRREQ_POLARITY (1 << 15)  /* CPU power request polarity */
157 #define TEGRA_POWER_CPU_PWRREQ_OE       (1 << 16)  /* CPU power request enable */
158 #define TEGRA_POWER_CPUPWRGOOD_EN       (1 << 19)  /* CPU power good enable */
159
160 #define TEGRA_DPAD_ORIDE_SYS_CLK_REQ    (1 << 21)
161
162 #define PMC_CTRL                0x0
163 #define PMC_CTRL_LATCH_WAKEUPS  (1 << 5)
164 #define PMC_WAKE_MASK           0xc
165 #define PMC_WAKE_LEVEL          0x10
166 #define PMC_DPAD_ORIDE          0x1C
167 #define PMC_WAKE_DELAY          0xe0
168 #define PMC_DPD_SAMPLE          0x20
169 #if defined(CONFIG_ARCH_TEGRA_14x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
170 #define PMC_DPD_ENABLE          0x24
171 #endif
172 #define PMC_IO_DPD_REQ          0x1B8
173 #define PMC_IO_DPD2_REQ         0x1C0
174
175
176 #define PMC_WAKE_STATUS         0x14
177 #define PMC_SW_WAKE_STATUS      0x18
178 #define PMC_COREPWRGOOD_TIMER   0x3c
179 #define PMC_CPUPWRGOOD_TIMER    0xc8
180 #define PMC_CPUPWROFF_TIMER     0xcc
181 #define PMC_COREPWROFF_TIMER    PMC_WAKE_DELAY
182
183 #define PMC_PWRGATE_TOGGLE      0x30
184 #define PWRGATE_TOGGLE_START    (1 << 8)
185 #define UN_PWRGATE_CPU          \
186         (PWRGATE_TOGGLE_START | TEGRA_CPU_POWERGATE_ID(TEGRA_POWERGATE_CPU))
187
188 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
189 #define PMC_SCRATCH4_WAKE_CLUSTER_MASK  (1<<31)
190 #endif
191
192 #define CLK_RESET_CCLK_BURST    0x20
193 #define CLK_RESET_CCLK_DIVIDER  0x24
194 #define CLK_RESET_PLLC_BASE     0x80
195 #define CLK_RESET_PLLM_BASE     0x90
196 #define CLK_RESET_PLLX_BASE     0xe0
197 #define CLK_RESET_PLLX_MISC     0xe4
198 #define CLK_RESET_PLLP_BASE     0xa0
199 #define CLK_RESET_PLLP_OUTA     0xa4
200 #define CLK_RESET_PLLP_OUTB     0xa8
201 #define CLK_RESET_PLLP_MISC     0xac
202
203 #define CLK_RESET_SOURCE_CSITE  0x1d4
204
205 #define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
206 #define CLK_RESET_CCLK_RUN_POLICY_SHIFT    4
207 #define CLK_RESET_CCLK_IDLE_POLICY_SHIFT   0
208 #define CLK_RESET_CCLK_IDLE_POLICY         1
209 #define CLK_RESET_CCLK_RUN_POLICY          2
210 #define CLK_RESET_CCLK_BURST_POLICY_PLLM   3
211 #define CLK_RESET_CCLK_BURST_POLICY_PLLX   8
212
213 #define EMC_MRW_0               0x0e8
214 #define EMC_MRW_DEV_SELECTN     30
215 #define EMC_MRW_DEV_NONE        (3 << EMC_MRW_DEV_SELECTN)
216
217 #define MC_SECURITY_START       0x6c
218 #define MC_SECURITY_SIZE        0x70
219 #define MC_SECURITY_CFG2        0x7c
220
221 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
222 static struct clk *tegra_dfll;
223 #endif
224 static struct clk *tegra_pclk;
225 static struct tegra_suspend_platform_data *pdata;
226 static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
227
228 #if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
229 static unsigned long
230                 tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
231 struct tegra_cluster_switch_time_stats {
232         unsigned long sum;
233         unsigned long avg;
234         unsigned long exp_avg;
235         unsigned long max;
236         int cnt;
237 };
238
239 static struct tegra_cluster_switch_time_stats lp2g_stats;
240 static struct tegra_cluster_switch_time_stats g2lp_stats;
241
242 void tegra_cluster_switch_time(unsigned int flags, int id)
243 {
244         unsigned long t;
245         struct tegra_cluster_switch_time_stats *stats;
246
247         if (!(flags & TEGRA_POWER_CLUSTER_MASK) ||
248             (id >= tegra_cluster_switch_time_id_max))
249                 return;
250
251         tegra_cluster_switch_times[id] = tegra_read_usec_raw();
252         wmb();
253         if (id != tegra_cluster_switch_time_id_end)
254                 return;
255
256         stats = flags & TEGRA_POWER_CLUSTER_G ? &lp2g_stats : &g2lp_stats;
257
258         t = tegra_cluster_switch_times[tegra_cluster_switch_time_id_end] -
259                 tegra_cluster_switch_times[tegra_cluster_switch_time_id_start];
260         if (stats->max < t)
261                 stats->max = t;
262
263         stats->sum += t;
264         stats->cnt++;
265         if (stats->cnt < CLUSTER_SWITCH_AVG_SAMPLES)
266                 return;
267
268         stats->avg = stats->sum;
269         stats->cnt = stats->sum = 0;
270         if (!stats->exp_avg) {
271                 stats->exp_avg = stats->avg;    /* 1st window sample */
272                 return;
273         }
274         stats->exp_avg = (stats->exp_avg * (CLUSTER_SWITCH_AVG_SAMPLES - 1) +
275                           stats->avg) >> CLUSTER_SWITCH_TIME_AVG_SHIFT;
276 }
277 #endif
278
279 #ifdef CONFIG_PM_SLEEP
280 static const char *tegra_suspend_name[TEGRA_MAX_SUSPEND_MODE] = {
281         [TEGRA_SUSPEND_NONE]    = "none",
282         [TEGRA_SUSPEND_LP2]     = "lp2",
283         [TEGRA_SUSPEND_LP1]     = "lp1",
284         [TEGRA_SUSPEND_LP0]     = "lp0",
285 };
286
287 void tegra_log_resume_time(void)
288 {
289         u64 resume_end_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
290
291         if (resume_entry_time > resume_end_time)
292                 resume_end_time |= 1ull<<32;
293         resume_time = resume_end_time - resume_entry_time;
294 }
295
296 void tegra_log_suspend_time(void)
297 {
298         suspend_entry_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
299 }
300
301 static void tegra_get_suspend_time(void)
302 {
303         u64 suspend_end_time;
304         suspend_end_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
305
306         if (suspend_entry_time > suspend_end_time)
307                 suspend_end_time |= 1ull<<32;
308         suspend_time = suspend_end_time - suspend_entry_time;
309 }
310
311 unsigned long tegra_cpu_power_good_time(void)
312 {
313         if (WARN_ON_ONCE(!pdata))
314                 return 5000;
315
316         return pdata->cpu_timer;
317 }
318
319 unsigned long tegra_cpu_power_off_time(void)
320 {
321         if (WARN_ON_ONCE(!pdata))
322                 return 5000;
323
324         return pdata->cpu_off_timer;
325 }
326
327 unsigned long tegra_cpu_lp2_min_residency(void)
328 {
329         if (WARN_ON_ONCE(!pdata))
330                 return 2000;
331
332         return pdata->cpu_lp2_min_residency;
333 }
334
335 unsigned long tegra_mc_clk_stop_min_residency(void)
336 {
337         return 20000;
338 }
339
340 #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
341 #define TEGRA_MIN_RESIDENCY_VMIN_FMIN   2000
342 #define TEGRA_MIN_RESIDENCY_NCPU_SLOW   2000
343 #define TEGRA_MIN_RESIDENCY_NCPU_FAST   13000
344 #define TEGRA_MIN_RESIDENCY_CRAIL       20000
345
346 unsigned long tegra_min_residency_vmin_fmin(void)
347 {
348         return pdata && pdata->min_residency_vmin_fmin
349                         ? pdata->min_residency_vmin_fmin
350                         : TEGRA_MIN_RESIDENCY_VMIN_FMIN;
351 }
352
353 unsigned long tegra_min_residency_ncpu()
354 {
355         if (is_lp_cluster()) {
356                 return pdata && pdata->min_residency_ncpu_slow
357                         ? pdata->min_residency_ncpu_slow
358                         : TEGRA_MIN_RESIDENCY_NCPU_SLOW;
359         } else
360                 return pdata && pdata->min_residency_ncpu_fast
361                         ? pdata->min_residency_ncpu_fast
362                         : TEGRA_MIN_RESIDENCY_NCPU_FAST;
363 }
364
365 unsigned long tegra_min_residency_crail(void)
366 {
367         return pdata && pdata->min_residency_crail
368                         ? pdata->min_residency_crail
369                         : TEGRA_MIN_RESIDENCY_CRAIL;
370 }
371
372 bool tegra_crail_can_start_early(void)
373 {
374         return pdata && pdata->crail_up_early;
375 }
376 #endif
377
378 static void suspend_cpu_dfll_mode(unsigned int flags)
379 {
380 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
381         /* If DFLL is used as CPU clock source go to open loop mode */
382         if (!(flags & TEGRA_POWER_CLUSTER_MASK)) {
383                 if (!is_lp_cluster() && tegra_dfll &&
384                     tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
385                         tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 0);
386         }
387
388         /* Suspend dfll bypass (safe rail down) on LP or if DFLL is Not used */
389         if (pdata && pdata->suspend_dfll_bypass &&
390             (!tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail) || is_lp_cluster()))
391                 pdata->suspend_dfll_bypass();
392 #endif
393 }
394
395 static void resume_cpu_dfll_mode(unsigned int flags)
396 {
397 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
398         /* If DFLL is Not used and resume on G restore bypass mode */
399         if (pdata && pdata->resume_dfll_bypass && !is_lp_cluster() &&
400             !tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
401                 pdata->resume_dfll_bypass();
402
403         /* If DFLL is used as CPU clock source restore closed loop mode */
404         if (!(flags & TEGRA_POWER_CLUSTER_MASK)) {
405                 if (!is_lp_cluster() && tegra_dfll &&
406                     tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
407                         tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 1);
408         }
409 #endif
410 }
411
412 /*
413  * create_suspend_pgtable
414  *
415  * Creates a page table with identity mappings of physical memory and IRAM
416  * for use when the MMU is off, in addition to all the regular kernel mappings.
417  */
418 static __init int create_suspend_pgtable(void)
419 {
420         tegra_pgd = pgd_alloc(&init_mm);
421         if (!tegra_pgd)
422                 return -ENOMEM;
423
424         /* Only identity-map size of lowmem (high_memory - PAGE_OFFSET) */
425         identity_mapping_add(tegra_pgd, phys_to_virt(PHYS_OFFSET),
426                 high_memory, 0);
427         identity_mapping_add(tegra_pgd, IO_IRAM_VIRT,
428                 IO_IRAM_VIRT + SECTION_SIZE, 0);
429
430 #if defined(CONFIG_ARM_LPAE)
431         tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK);
432 #else
433         /* inner/outer write-back/write-allocate, sharable */
434         tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
435 #endif
436
437         return 0;
438 }
439
440 /* ensures that sufficient time is passed for a register write to
441  * serialize into the 32KHz domain */
442 static void pmc_32kwritel(u32 val, unsigned long offs)
443 {
444         writel(val, pmc + offs);
445         udelay(130);
446 }
447
448 #if !defined(CONFIG_OF) || !defined(CONFIG_COMMON_CLK)
449 static void set_power_timers(unsigned long us_on, unsigned long us_off,
450                              long rate)
451 {
452         static unsigned long last_us_off = 0;
453         unsigned long long ticks;
454         unsigned long long pclk;
455
456         if (WARN_ON_ONCE(rate <= 0))
457                 pclk = 100000000;
458         else
459                 pclk = rate;
460
461         if ((rate != tegra_last_pclk) || (us_off != last_us_off)) {
462                 ticks = (us_on * pclk) + 999999ull;
463                 do_div(ticks, 1000000);
464                 writel((unsigned long)ticks, pmc + PMC_CPUPWRGOOD_TIMER);
465
466                 ticks = (us_off * pclk) + 999999ull;
467                 do_div(ticks, 1000000);
468                 writel((unsigned long)ticks, pmc + PMC_CPUPWROFF_TIMER);
469                 wmb();
470         }
471         tegra_last_pclk = pclk;
472         last_us_off = us_off;
473 }
474 #endif
475
476 void tegra_limit_cpu_power_timers(unsigned long us_on, unsigned long us_off)
477 {
478         /* make sure power timers would not exceed specified limits */
479         set_power_timers(us_on, us_off, clk_get_min_rate(tegra_pclk));
480 }
481
482 void (*tegra_tear_down_cpu)(void);
483
484 /*
485  * restore_cpu_complex
486  *
487  * restores cpu clock setting, clears flow controller
488  *
489  * Always called on CPU 0.
490  */
491 static void restore_cpu_complex(u32 mode)
492 {
493         int cpu = cpu_logical_map(smp_processor_id());
494         unsigned int reg;
495 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
496         unsigned int policy;
497 #endif
498
499 /*
500  * On Tegra11x PLLX and CPU burst policy is either preserved across LP2,
501  * or restored by common clock suspend/resume procedures. Hence, we don't
502  * need it here.
503  */
504 #if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
505         /* Is CPU complex already running on PLLX? */
506         reg = readl(clk_rst + CLK_RESET_CCLK_BURST);
507         policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
508
509         if (policy == CLK_RESET_CCLK_IDLE_POLICY)
510                 reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
511         else if (policy == CLK_RESET_CCLK_RUN_POLICY)
512                 reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
513         else
514                 BUG();
515
516         if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
517                 /* restore PLLX settings if CPU is on different PLL */
518                 writel(tegra_sctx.pllx_misc, clk_rst + CLK_RESET_PLLX_MISC);
519                 writel(tegra_sctx.pllx_base, clk_rst + CLK_RESET_PLLX_BASE);
520
521                 /* wait for PLL stabilization if PLLX was enabled */
522                 if (tegra_sctx.pllx_base & (1<<30)) {
523 #if USE_PLL_LOCK_BITS
524                         /* Enable lock detector */
525                         reg = readl(clk_rst + CLK_RESET_PLLX_MISC);
526                         reg |= 1<<18;
527                         writel(reg, clk_rst + CLK_RESET_PLLX_MISC);
528                         while (!(readl(clk_rst + CLK_RESET_PLLX_BASE) &
529                                  (1<<27)))
530                                 cpu_relax();
531
532                         udelay(PLL_POST_LOCK_DELAY);
533 #else
534                         udelay(300);
535 #endif
536                 }
537         }
538
539         /* Restore original burst policy setting for calls resulting from CPU
540            LP2 in idle or system suspend; keep cluster switch prolog setting
541            intact. */
542         if (!(mode & TEGRA_POWER_CLUSTER_MASK)) {
543                 writel(tegra_sctx.cclk_divider, clk_rst +
544                        CLK_RESET_CCLK_DIVIDER);
545                 writel(tegra_sctx.cpu_burst, clk_rst +
546                        CLK_RESET_CCLK_BURST);
547         }
548 #endif
549         writel(tegra_sctx.clk_csite_src, clk_rst + CLK_RESET_SOURCE_CSITE);
550
551         /* Do not power-gate CPU 0 when flow controlled */
552         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
553         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
554         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
555         reg &= ~FLOW_CTRL_CSR_ENABLE;           /* clear enable */
556         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr */
557         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event */
558         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
559
560         /* If an immedidate cluster switch is being perfomed, restore the
561            local timer registers. For calls resulting from CPU LP2 in
562            idle or system suspend, the local timer was shut down and
563            timekeeping switched over to the global system timer. In this
564            case keep local timer disabled, and restore only periodic load. */
565 #ifdef CONFIG_HAVE_ARM_TWD
566         if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
567                       TEGRA_POWER_CLUSTER_IMMEDIATE))) {
568                 tegra_sctx.twd.twd_ctrl = 0;
569         }
570         tegra_twd_resume(&tegra_sctx.twd);
571 #endif
572 }
573
574 /*
575  * suspend_cpu_complex
576  *
577  * saves pll state for use by restart_plls, prepares flow controller for
578  * transition to suspend state
579  *
580  * Must always be called on cpu 0.
581  */
582 static void suspend_cpu_complex(u32 mode)
583 {
584         int cpu = cpu_logical_map(smp_processor_id());
585         unsigned int reg;
586         int i;
587
588         BUG_ON(cpu != 0);
589
590         /* switch coresite to clk_m, save off original source */
591         tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
592         writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
593
594         tegra_sctx.cpu_burst = readl(clk_rst + CLK_RESET_CCLK_BURST);
595         tegra_sctx.pllx_base = readl(clk_rst + CLK_RESET_PLLX_BASE);
596         tegra_sctx.pllx_misc = readl(clk_rst + CLK_RESET_PLLX_MISC);
597         tegra_sctx.pllp_base = readl(clk_rst + CLK_RESET_PLLP_BASE);
598         tegra_sctx.pllp_outa = readl(clk_rst + CLK_RESET_PLLP_OUTA);
599         tegra_sctx.pllp_outb = readl(clk_rst + CLK_RESET_PLLP_OUTB);
600         tegra_sctx.pllp_misc = readl(clk_rst + CLK_RESET_PLLP_MISC);
601         tegra_sctx.cclk_divider = readl(clk_rst + CLK_RESET_CCLK_DIVIDER);
602
603 #ifdef CONFIG_HAVE_ARM_TWD
604         tegra_twd_suspend(&tegra_sctx.twd);
605 #endif
606
607         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
608         reg &= ~FLOW_CTRL_CSR_WFE_BITMAP;       /* clear wfe bitmap */
609         reg &= ~FLOW_CTRL_CSR_WFI_BITMAP;       /* clear wfi bitmap */
610         reg |= FLOW_CTRL_CSR_INTR_FLAG;         /* clear intr flag */
611         reg |= FLOW_CTRL_CSR_EVENT_FLAG;        /* clear event flag */
612 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
613         reg |= FLOW_CTRL_CSR_WFE_CPU0 << cpu;   /* enable power gating on wfe */
614 #else
615         reg |= FLOW_CTRL_CSR_WFI_CPU0 << cpu;   /* enable power gating on wfi */
616 #endif
617         reg |= FLOW_CTRL_CSR_ENABLE;            /* enable power gating */
618         flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(cpu));
619
620         for (i = 0; i < num_possible_cpus(); i++) {
621                 if (i == cpu)
622                         continue;
623                 reg = readl(FLOW_CTRL_CPU_CSR(i));
624                 reg |= FLOW_CTRL_CSR_EVENT_FLAG;
625                 reg |= FLOW_CTRL_CSR_INTR_FLAG;
626                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(i));
627         }
628
629         tegra_gic_cpu_disable(true);
630 }
631
632 void tegra_clear_cpu_in_pd(int cpu)
633 {
634         spin_lock(&tegra_lp2_lock);
635         BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
636         cpumask_clear_cpu(cpu, &tegra_in_lp2);
637
638         /* Update the IRAM copy used by the reset handler. The IRAM copy
639            can't use used directly by cpumask_clear_cpu() because it uses
640            LDREX/STREX which requires the addressed location to be inner
641            cacheable and sharable which IRAM isn't. */
642         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
643         dsb();
644
645         spin_unlock(&tegra_lp2_lock);
646 }
647
648 bool tegra_set_cpu_in_pd(int cpu)
649 {
650         bool last_cpu = false;
651
652         spin_lock(&tegra_lp2_lock);
653         BUG_ON(cpumask_test_cpu(cpu, &tegra_in_lp2));
654         cpumask_set_cpu(cpu, &tegra_in_lp2);
655
656         /* Update the IRAM copy used by the reset handler. The IRAM copy
657            can't use used directly by cpumask_set_cpu() because it uses
658            LDREX/STREX which requires the addressed location to be inner
659            cacheable and sharable which IRAM isn't. */
660         writel(tegra_in_lp2.bits[0], iram_cpu_lp2_mask);
661         dsb();
662
663         if ((cpu == 0) && cpumask_equal(&tegra_in_lp2, cpu_online_mask))
664                 last_cpu = true;
665 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
666         else if (cpu == 1)
667                 tegra2_cpu_set_resettable_soon();
668 #endif
669
670         spin_unlock(&tegra_lp2_lock);
671         return last_cpu;
672 }
673
674 static void tegra_sleep_core(enum tegra_suspend_mode mode,
675                              unsigned long v2p)
676 {
677 #ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
678         outer_flush_range(__pa(&tegra_resume_timestamps_start),
679                           __pa(&tegra_resume_timestamps_end));
680
681         if (mode == TEGRA_SUSPEND_LP0) {
682                 trace_smc_sleep_core(NVSEC_SMC_START);
683
684                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
685                                   virt_to_phys(tegra_resume));
686         } else {
687                 trace_smc_sleep_core(NVSEC_SMC_START);
688
689                 tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
690                                   (TEGRA_RESET_HANDLER_BASE +
691                                    tegra_cpu_reset_handler_offset));
692         }
693
694         trace_smc_sleep_core(NVSEC_SMC_DONE);
695 #endif
696         tegra_get_suspend_time();
697 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
698         cpu_suspend(v2p, tegra2_sleep_core_finish);
699 #else
700         cpu_suspend(v2p, tegra3_sleep_core_finish);
701 #endif
702 }
703
704 static inline void tegra_sleep_cpu(unsigned long v2p)
705 {
706         cpu_suspend(v2p, tegra_sleep_cpu_finish);
707 }
708
709 static inline void tegra_stop_mc_clk(unsigned long v2p)
710 {
711 #ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
712         outer_flush_range(__pa(&tegra_resume_timestamps_start),
713                           __pa(&tegra_resume_timestamps_end));
714         trace_smc_sleep_core(NVSEC_SMC_START);
715
716         tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE5,
717                           (TEGRA_RESET_HANDLER_BASE +
718                            tegra_cpu_reset_handler_offset));
719
720         trace_smc_sleep_core(NVSEC_SMC_DONE);
721 #endif
722         cpu_suspend(v2p, tegra3_stop_mc_clk_finish);
723 }
724
725 unsigned int tegra_idle_power_down_last(unsigned int sleep_time,
726                                         unsigned int flags)
727 {
728         u32 reg;
729         unsigned int remain;
730
731         /* Only the last cpu down does the final suspend steps */
732         reg = readl(pmc + PMC_CTRL);
733         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
734         if (pdata->combined_req)
735                 reg &= ~TEGRA_POWER_PWRREQ_OE;
736         else
737                 reg |= TEGRA_POWER_PWRREQ_OE;
738
739         reg &= ~TEGRA_POWER_EFFECT_LP0;
740         writel(reg, pmc + PMC_CTRL);
741
742         /*
743          * We can use clk_get_rate_all_locked() here, because all other cpus
744          * are in LP2 state and irqs are disabled
745          */
746         suspend_cpu_dfll_mode(flags);
747         if (flags & TEGRA_POWER_CLUSTER_MASK) {
748                 if (is_idle_task(current))
749                         trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START);
750                 else
751                         trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START);
752 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
753                 set_power_timers(pdata->cpu_timer, 2);
754 #else
755                 set_power_timers(pdata->cpu_timer, 2,
756                         clk_get_rate_all_locked(tegra_pclk));
757 #endif
758                 if (flags & TEGRA_POWER_CLUSTER_G) {
759                         /*
760                          * To reduce the vdd_cpu up latency when LP->G
761                          * transition. Before the transition, enable
762                          * the vdd_cpu rail.
763                          */
764                         if (!tegra_crail_can_start_early() && is_lp_cluster()) {
765 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
766                                 reg = readl(FLOW_CTRL_CPU_PWR_CSR);
767                                 reg |= FLOW_CTRL_CPU_PWR_CSR_RAIL_ENABLE;
768                                 writel(reg, FLOW_CTRL_CPU_PWR_CSR);
769 #else
770                                 writel(UN_PWRGATE_CPU,
771                                        pmc + PMC_PWRGATE_TOGGLE);
772 #endif
773                         }
774                 }
775                 tegra_cluster_switch_prolog(flags);
776         } else {
777 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
778                 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer);
779 #else
780                 set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
781                         clk_get_rate_all_locked(tegra_pclk));
782 #endif
783 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
784                 reg = readl(FLOW_CTRL_CPU_CSR(0));
785                 reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
786                 if (is_lp_cluster()) {
787                         /* for LP cluster, there is no option for rail gating */
788                         if ((flags & TEGRA_POWER_CLUSTER_PART_MASK) ==
789                                                 TEGRA_POWER_CLUSTER_PART_MASK)
790                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_EMU;
791                         else if (flags)
792                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
793                 }
794                 else {
795                         if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
796                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
797                         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
798                                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
799                 }
800                 writel(reg, FLOW_CTRL_CPU_CSR(0));
801 #endif
802         }
803
804         if (sleep_time)
805                 tegra_pd_set_trigger(sleep_time);
806
807         cpu_cluster_pm_enter();
808         suspend_cpu_complex(flags);
809         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
810 #if defined(CONFIG_CACHE_L2X0)
811 #if defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
812         flush_cache_all();
813         outer_disable();
814 #elif !defined(CONFIG_ARCH_TEGRA_14x_SOC)
815         tegra_resume_l2_init = 1;
816         __cpuc_flush_dcache_area(&tegra_resume_l2_init, sizeof(unsigned long));
817         outer_flush_range(__pa(&tegra_resume_l2_init),
818                           __pa(&tegra_resume_l2_init) + sizeof(unsigned long));
819 #endif
820 #endif
821
822         /* T148: Check for mem_req and mem_req_soon only if it is
823          * MC clock stop state.
824          */
825         if (flags & TEGRA_POWER_STOP_MC_CLK) {
826 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
827                 u32 val;
828
829                 /* Check if mem_req or mem_req_soon is asserted or if voice
830                  * call is active call, if yes then we skip SDRAM
831                  * self-refresh and just do CPU power-gating.
832                  */
833                 val = readl(pmc + PMC_IPC_STS);
834                 if ((val & (PMC_IPC_STS_MEM_REQ | PMC_IPC_STS_MEM_REQ_SOON)) ||
835                         tegra_is_voice_call_active()) {
836
837                         /* Reset LP1 and MC clock mask if we skipping SDRAM
838                          * self-refresh.
839                          */
840                         *iram_cpu_lp1_mask = 0;
841                         *iram_mc_clk_mask = 0;
842                         writel(0, pmc + PMC_SCRATCH41);
843
844                         tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
845                 } else {
846                         /* Clear mem_sts since SDRAM will not be accessible
847                          * to BBC in this state.
848                          */
849                         val = PMC_IPC_CLR_MEM_STS;
850                         writel(val, pmc + PMC_IPC_CLR);
851
852                         tegra_stop_mc_clk(PHYS_OFFSET - PAGE_OFFSET);
853                 }
854 #else
855                 /* If it is not T148 then we do not have to
856                  * check mem_req and mem_req_soon.
857                  */
858                 tegra_stop_mc_clk(PHYS_OFFSET - PAGE_OFFSET);
859 #endif
860         } else {
861                 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
862         }
863
864 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
865         tegra_init_cache(true);
866 #elif defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
867         tegra_init_cache(false);
868 #endif
869
870 #if defined(CONFIG_TRUSTED_FOUNDATIONS)
871 #ifndef CONFIG_ARCH_TEGRA_11x_SOC
872         trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
873         trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
874 #endif
875 #endif
876
877         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
878         restore_cpu_complex(flags);
879         cpu_cluster_pm_exit();
880
881         remain = tegra_pd_timer_remain();
882         if (sleep_time)
883                 tegra_pd_set_trigger(0);
884
885         if (flags & TEGRA_POWER_CLUSTER_MASK) {
886                 tegra_cluster_switch_epilog(flags);
887                 if (is_idle_task(current))
888                         trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE);
889                 else
890                         trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE);
891         }
892         resume_cpu_dfll_mode(flags);
893         tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
894
895 #if INSTRUMENT_CLUSTER_SWITCH
896         if (flags & TEGRA_POWER_CLUSTER_MASK) {
897                 pr_debug("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
898                         is_lp_cluster() ? "G=>LP" : "LP=>G",
899                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
900                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
901                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch] -
902                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog],
903                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
904                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_switch],
905                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_epilog] -
906                         tegra_cluster_switch_times[tegra_cluster_switch_time_id_start]);
907         }
908 #endif
909         return remain;
910 }
911
912 void tegra_mc_clk_prepare(void)
913 {
914         /* copy the reset vector and SDRAM shutdown code into IRAM */
915         memcpy(iram_save, iram_code, iram_save_size);
916         memcpy(iram_code, tegra_iram_start(), iram_save_size);
917
918         *iram_cpu_lp1_mask = 1;
919         *iram_mc_clk_mask = 1;
920
921         __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
922         wmb();
923 }
924
925 void tegra_mc_clk_finish(void)
926 {
927         /* restore IRAM */
928         memcpy(iram_code, iram_save, iram_save_size);
929         *iram_cpu_lp1_mask = 0;
930         *iram_mc_clk_mask = 0;
931         writel(0, pmc + PMC_SCRATCH41);
932 }
933
934 #ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
935 int tegra_is_lp1_suspend_mode(void)
936 {
937         return (current_suspend_mode == TEGRA_SUSPEND_LP1);
938 }
939 #endif
940
941 static int tegra_common_suspend(void)
942 {
943         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
944
945         tegra_sctx.mc[0] = readl(mc + MC_SECURITY_START);
946         tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
947         tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
948
949 #ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
950         if (pdata && pdata->lp1_lowvolt_support) {
951                 u32 lp1_core_lowvolt =
952                         (tegra_is_voice_call_active() ||
953                         tegra_dvfs_rail_get_thermal_floor(tegra_core_rail)) ?
954                         pdata->lp1_core_volt_low_cold << 8 :
955                         pdata->lp1_core_volt_low << 8;
956
957                 lp1_core_lowvolt |= pdata->core_reg_addr;
958                 memcpy(tegra_lp1_register_core_lowvolt(), &lp1_core_lowvolt, 4);
959         }
960 #endif
961
962         /* copy the reset vector and SDRAM shutdown code into IRAM */
963         memcpy(iram_save, iram_code, iram_save_size);
964         memcpy(iram_code, tegra_iram_start(), iram_save_size);
965
966         return 0;
967 }
968
969 static void tegra_common_resume(void)
970 {
971         void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
972 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
973         void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
974 #endif
975
976 #if defined(CONFIG_ARCH_TEGRA_14x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
977         /* Clear DPD Enable */
978         writel(0x0, pmc + PMC_DPD_ENABLE);
979 #endif
980
981         writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
982         writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
983         writel(tegra_sctx.mc[2], mc + MC_SECURITY_CFG2);
984 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
985         /* trigger emc mode write */
986         writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
987         /* clear scratch registers shared by suspend and the reset pen */
988         writel(0x0, pmc + PMC_SCRATCH39);
989 #endif
990         writel(0x0, pmc + PMC_SCRATCH41);
991
992         /* restore IRAM */
993         memcpy(iram_code, iram_save, iram_save_size);
994 }
995
996 static int tegra_suspend_prepare_late(void)
997 {
998 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
999         disable_irq(INT_SYS_STATS_MON);
1000 #endif
1001         return 0;
1002 }
1003
1004 static void tegra_suspend_wake(void)
1005 {
1006 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1007         enable_irq(INT_SYS_STATS_MON);
1008 #endif
1009 }
1010
1011 static void tegra_pm_set(enum tegra_suspend_mode mode)
1012 {
1013         u32 reg, boot_flag;
1014         unsigned long rate = 32768;
1015
1016         reg = readl(pmc + PMC_CTRL);
1017         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1018         if (pdata->combined_req)
1019                 reg &= ~TEGRA_POWER_PWRREQ_OE;
1020         else
1021                 reg |= TEGRA_POWER_PWRREQ_OE;
1022         reg &= ~TEGRA_POWER_EFFECT_LP0;
1023
1024         switch (mode) {
1025         case TEGRA_SUSPEND_LP0:
1026 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
1027                 rate = clk_get_rate_all_locked(tegra_pclk);
1028 #endif
1029                 if (pdata->combined_req) {
1030                         reg |= TEGRA_POWER_PWRREQ_OE;
1031                         reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
1032                 }
1033
1034 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1035                 /*
1036                  * LP0 boots through the AVP, which then resumes the AVP to
1037                  * the address in scratch 39, and the cpu to the address in
1038                  * scratch 41 to tegra_resume
1039                  */
1040                 writel(0x0, pmc + PMC_SCRATCH39);
1041 #endif
1042
1043                 /* Enable DPD sample to trigger sampling pads data and direction
1044                  * in which pad will be driven during lp0 mode*/
1045                 writel(0x1, pmc + PMC_DPD_SAMPLE);
1046 #if !defined(CONFIG_ARCH_TEGRA_3x_SOC) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
1047 #if defined(CONFIG_ARCH_TEGRA_11x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
1048                 writel(0x800fdfff, pmc + PMC_IO_DPD_REQ);
1049 #else
1050                 writel(0x800fffff, pmc + PMC_IO_DPD_REQ);
1051 #endif
1052                 writel(0x80001fff, pmc + PMC_IO_DPD2_REQ);
1053 #endif
1054
1055 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
1056                 /* this is needed only for T11x, not for other chips */
1057                 reg &= ~TEGRA_POWER_CPUPWRGOOD_EN;
1058 #endif
1059
1060                 /* Set warmboot flag */
1061                 boot_flag = readl(pmc + PMC_SCRATCH0);
1062                 pmc_32kwritel(boot_flag | 1, PMC_SCRATCH0);
1063
1064                 pmc_32kwritel(tegra_lp0_vec_start, PMC_SCRATCH1);
1065
1066                 reg |= TEGRA_POWER_EFFECT_LP0;
1067                 /* No break here. LP0 code falls through to write SCRATCH41 */
1068         case TEGRA_SUSPEND_LP1:
1069                 __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
1070                 wmb();
1071                 break;
1072         case TEGRA_SUSPEND_LP2:
1073                 rate = clk_get_rate(tegra_pclk);
1074                 break;
1075         case TEGRA_SUSPEND_NONE:
1076                 return;
1077         default:
1078                 BUG();
1079         }
1080
1081 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
1082         set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer);
1083 #else
1084         set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
1085 #endif
1086
1087         pmc_32kwritel(reg, PMC_CTRL);
1088 }
1089
1090 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
1091         [TEGRA_SUSPEND_NONE] = "none",
1092         [TEGRA_SUSPEND_LP2] = "LP2",
1093         [TEGRA_SUSPEND_LP1] = "LP1",
1094         [TEGRA_SUSPEND_LP0] = "LP0",
1095 };
1096
1097 #if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
1098 static int save_se_context(void)
1099 {
1100         struct device *smmu_dev, *se_dev;
1101         int ret = 0;
1102
1103         smmu_dev = get_smmu_device();
1104         if (!smmu_dev) {
1105                 pr_info("Failed to get smmu device\n");
1106                 goto save_fail;
1107         }
1108
1109         se_dev = get_se_device();
1110         if (!se_dev) {
1111                 pr_info("Failed to get SE device \n");
1112                 goto save_fail;
1113         }
1114
1115         /* smmu resume needs to be called
1116          * for se_suspend() operation */
1117         ret = tegra_smmu_resume(smmu_dev);
1118         if (ret) {
1119                 pr_info("Failed to resume smmu device\n");
1120                 goto save_fail;
1121         }
1122
1123         ret = se_suspend(se_dev, true);
1124         if (ret) {
1125                 pr_info("Failed to suspend SE device\n");
1126                 goto save_fail;
1127         }
1128
1129         ret = tegra_smmu_suspend(smmu_dev);
1130         if (ret) {
1131                 pr_info("Failed to suspend smmu device\n");
1132                 goto save_fail;
1133         }
1134
1135 save_fail:
1136         return ret;
1137 }
1138 #endif
1139
1140 static int tegra_suspend_enter(suspend_state_t state)
1141 {
1142         int ret = 0;
1143         ktime_t delta;
1144         struct timespec ts_entry, ts_exit;
1145
1146         if (pdata && pdata->board_suspend)
1147                 pdata->board_suspend(current_suspend_mode, TEGRA_SUSPEND_BEFORE_PERIPHERAL);
1148
1149         read_persistent_clock(&ts_entry);
1150
1151         ret = tegra_suspend_dram(current_suspend_mode, 0);
1152         if (ret) {
1153                 pr_info("Aborting suspend, tegra_suspend_dram error=%d\n", ret);
1154                 goto abort_suspend;
1155         }
1156
1157 #if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
1158         ret = save_se_context();
1159         if (ret) {
1160                 pr_info("Failed to save SE context\n");
1161                 goto abort_suspend;
1162         }
1163 #endif
1164
1165         read_persistent_clock(&ts_exit);
1166
1167         if (timespec_compare(&ts_exit, &ts_entry) > 0) {
1168                 delta = timespec_to_ktime(timespec_sub(ts_exit, ts_entry));
1169
1170                 tegra_dvfs_rail_pause(tegra_cpu_rail, delta, false);
1171                 if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1172                         tegra_dvfs_rail_pause(tegra_core_rail, delta, false);
1173                 else
1174                         tegra_dvfs_rail_pause(tegra_core_rail, delta, true);
1175         }
1176
1177 abort_suspend:
1178         if (pdata && pdata->board_resume)
1179                 pdata->board_resume(current_suspend_mode, TEGRA_RESUME_AFTER_PERIPHERAL);
1180
1181         return ret;
1182 }
1183
1184 static void tegra_suspend_check_pwr_stats(void)
1185 {
1186         /* cpus and l2 are powered off later */
1187         unsigned long pwrgate_partid_mask =
1188 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
1189                 (1 << TEGRA_POWERGATE_HEG)      |
1190                 (1 << TEGRA_POWERGATE_SATA)     |
1191                 (1 << TEGRA_POWERGATE_3D1)      |
1192 #endif
1193                 (1 << TEGRA_POWERGATE_3D)       |
1194                 (1 << TEGRA_POWERGATE_VENC)     |
1195                 (1 << TEGRA_POWERGATE_PCIE)     |
1196                 (1 << TEGRA_POWERGATE_VDEC)     |
1197                 (1 << TEGRA_POWERGATE_MPE);
1198
1199         int partid;
1200
1201         for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
1202                 if ((1 << partid) & pwrgate_partid_mask)
1203                         if (tegra_powergate_is_powered(partid))
1204                                 pr_debug("partition %s is left on before suspend\n",
1205                                         tegra_powergate_get_name(partid));
1206
1207         return;
1208 }
1209
1210 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1211 /* This is the opposite of the LP1BB related PMC setup that occurs
1212  * during suspend.
1213  */
1214 static void tegra_disable_lp1bb_interrupt(void)
1215 {
1216         unsigned reg;
1217         /* mem_req = 0 was set as an interrupt during LP1BB entry.
1218          * It has to be disabled now
1219          */
1220         reg = readl(pmc + PMC_CTRL2);
1221         reg &= ~(PMC_CTRL2_WAKE_DET_EN);
1222         pmc_32kwritel(reg, PMC_CTRL2);
1223
1224         /* Program mem_req NOT to be a wake event */
1225         reg = readl(pmc + PMC_WAKE2_MASK);
1226         reg &= ~(PMC_WAKE2_BB_MEM_REQ);
1227         pmc_32kwritel(reg, PMC_WAKE2_MASK);
1228
1229         reg = PMC_WAKE2_BB_MEM_REQ;
1230         pmc_32kwritel(reg, PMC_WAKE2_STATUS);
1231
1232         /* Set up the LIC to NOT accept pmc_wake events as interrupts */
1233         reg = TRI_ICTLR_PMC_WAKE_INT;
1234         writel(reg, tert_ictlr + TRI_ICTLR_CPU_IER_CLR);
1235 }
1236 #endif
1237
1238 static void tegra_suspend_powergate_control(int partid, bool turn_off)
1239 {
1240         if (turn_off)
1241                 tegra_powergate_partition(partid);
1242         else
1243                 tegra_unpowergate_partition(partid);
1244 }
1245
1246 int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
1247 {
1248         int err = 0;
1249         u32 scratch37 = 0xDEADBEEF;
1250         u32 reg;
1251
1252 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1253         u32 enter_state = 0;
1254 #endif
1255         bool tegra_suspend_vde_powergated = false;
1256
1257         if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
1258                 mode >= TEGRA_MAX_SUSPEND_MODE)) {
1259                 err = -ENXIO;
1260                 goto fail;
1261         }
1262
1263 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1264         update_pmc_registers(tegra_lp1bb_emc_min_rate_get());
1265 #endif
1266
1267         if (tegra_is_voice_call_active()) {
1268                 /* backup the current value of scratch37 */
1269                 scratch37 = readl(pmc + PMC_SCRATCH37);
1270
1271                 /* If voice call is active, set a flag in PMC_SCRATCH37 */
1272                 reg = TEGRA_POWER_LP1_AUDIO;
1273                 pmc_32kwritel(reg, PMC_SCRATCH37);
1274         }
1275
1276         if ((mode == TEGRA_SUSPEND_LP0) && !tegra_pm_irq_lp0_allowed()) {
1277                 pr_info("LP0 not used due to unsupported wakeup events\n");
1278                 mode = TEGRA_SUSPEND_LP1;
1279         }
1280
1281         if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
1282                 tegra_suspend_check_pwr_stats();
1283
1284         /* turn off VDE partition in LP1 */
1285         if (mode == TEGRA_SUSPEND_LP1 &&
1286                 tegra_powergate_is_powered(TEGRA_POWERGATE_VDEC)) {
1287                 pr_info("turning off partition %s in LP1\n",
1288                         tegra_powergate_get_name(TEGRA_POWERGATE_VDEC));
1289                 tegra_suspend_powergate_control(TEGRA_POWERGATE_VDEC, true);
1290                 tegra_suspend_vde_powergated = true;
1291         }
1292
1293         tegra_common_suspend();
1294
1295         tegra_pm_set(mode);
1296
1297         if (pdata && pdata->board_suspend)
1298                 pdata->board_suspend(mode, TEGRA_SUSPEND_BEFORE_CPU);
1299
1300         local_fiq_disable();
1301
1302 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1303         tegra_smp_save_power_mask();
1304 #endif
1305
1306         trace_cpu_suspend(CPU_SUSPEND_START, tegra_rtc_read_ms());
1307
1308         if (mode == TEGRA_SUSPEND_LP0) {
1309 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
1310                 reg = readl(pmc + PMC_SCRATCH4);
1311                 if (is_lp_cluster())
1312                         reg |= PMC_SCRATCH4_WAKE_CLUSTER_MASK;
1313                 else
1314                         reg &= (~PMC_SCRATCH4_WAKE_CLUSTER_MASK);
1315                 pmc_32kwritel(reg, PMC_SCRATCH4);
1316 #endif
1317                 tegra_tsc_suspend();
1318                 tegra_lp0_suspend_mc();
1319                 tegra_cpu_reset_handler_save();
1320                 tegra_tsc_wait_for_suspend();
1321                 if (!tegra_cpu_is_asim())
1322                         tegra_smp_clear_power_mask();
1323         }
1324
1325 #if !defined(CONFIG_ARCH_TEGRA_14x_SOC)
1326         if (mode == TEGRA_SUSPEND_LP1)
1327 #endif
1328                 *iram_cpu_lp1_mask = 1;
1329
1330         suspend_cpu_complex(flags);
1331
1332 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
1333         /* In case of LP0/1, program external power gating accordinly */
1334         if (mode == TEGRA_SUSPEND_LP0 || mode == TEGRA_SUSPEND_LP1) {
1335                 reg = readl(FLOW_CTRL_CPU_CSR(0));
1336                 if (is_lp_cluster())
1337                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU; /* Non CPU */
1338                 else
1339                         reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;  /* CRAIL */
1340                 flowctrl_writel(reg, FLOW_CTRL_CPU_CSR(0));
1341         }
1342 #endif
1343
1344         flush_cache_all();
1345         outer_disable();
1346
1347         if (mode == TEGRA_SUSPEND_LP2)
1348                 tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
1349         else
1350                 tegra_sleep_core(mode, PHYS_OFFSET - PAGE_OFFSET);
1351
1352         resume_entry_time = 0;
1353         if (mode != TEGRA_SUSPEND_LP0)
1354                 resume_entry_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
1355
1356         tegra_init_cache(true);
1357
1358 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1359         reg = readl(pmc + PMC_LP_STATE_SCRATCH_REG);
1360         enter_state = (reg >> PMC_LP_STATE_BIT_OFFSET) & PMC_LP_STATE_BIT_MASK;
1361         /* If we actually had entered in either LP1 or LP1BB,
1362          * restore power mask and disable mem_req interrupt PMC
1363          */
1364         if (enter_state) {
1365                 pr_info("Exited state is LP1/LP1BB\n");
1366                 tegra_disable_lp1bb_interrupt();
1367                 tegra_smp_restore_power_mask();
1368         }
1369 #endif
1370
1371 #if defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
1372 #ifndef CONFIG_ARCH_TEGRA_11x_SOC
1373         trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
1374         trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
1375 #endif
1376
1377         if (mode == TEGRA_SUSPEND_LP0) {
1378                 trace_secureos_init(tegra_resume_entry_time,
1379                         NVSEC_SUSPEND_EXIT_DONE);
1380         }
1381 #endif
1382
1383         if (mode == TEGRA_SUSPEND_LP0) {
1384
1385                 /* CPUPWRGOOD_EN is not enabled in HW so disabling this, *
1386                 * Otherwise it is creating issue in cluster switch after LP0 *
1387 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
1388                 reg = readl(pmc+PMC_CTRL);
1389                 reg |= TEGRA_POWER_CPUPWRGOOD_EN;
1390                 pmc_32kwritel(reg, PMC_CTRL);
1391 #endif
1392                 */
1393
1394                 tegra_tsc_resume();
1395                 tegra_cpu_reset_handler_restore();
1396                 tegra_lp0_resume_mc();
1397                 tegra_tsc_wait_for_resume();
1398         }
1399
1400 #if !defined(CONFIG_ARCH_TEGRA_14x_SOC)
1401         if (mode == TEGRA_SUSPEND_LP1)
1402 #endif
1403                 *iram_cpu_lp1_mask = 0;
1404
1405         /* if scratch37 was clobbered during LP1, restore it */
1406         if (scratch37 != 0xDEADBEEF)
1407                 pmc_32kwritel(scratch37, PMC_SCRATCH37);
1408
1409         restore_cpu_complex(flags);
1410
1411         /* for platforms where the core & CPU power requests are
1412          * combined as a single request to the PMU, transition out
1413          * of LP0 state by temporarily enabling both requests
1414          */
1415         if (mode == TEGRA_SUSPEND_LP0 && pdata->combined_req) {
1416                 reg = readl(pmc + PMC_CTRL);
1417                 reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1418                 pmc_32kwritel(reg, PMC_CTRL);
1419                 reg &= ~TEGRA_POWER_PWRREQ_OE;
1420                 pmc_32kwritel(reg, PMC_CTRL);
1421         }
1422
1423         if (pdata && pdata->board_resume)
1424                 pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
1425
1426         trace_cpu_suspend(CPU_SUSPEND_DONE, tegra_rtc_read_ms());
1427
1428         local_fiq_enable();
1429
1430         tegra_common_resume();
1431
1432         /* turn on VDE partition in LP1 */
1433         if (mode == TEGRA_SUSPEND_LP1 && tegra_suspend_vde_powergated) {
1434                 pr_info("turning on partition %s in LP1\n",
1435                         tegra_powergate_get_name(TEGRA_POWERGATE_VDEC));
1436                 tegra_suspend_powergate_control(TEGRA_POWERGATE_VDEC, false);
1437         }
1438
1439 fail:
1440         return err;
1441 }
1442
1443 /*
1444  * Function pointers to optional board specific function
1445  */
1446 void (*tegra_deep_sleep)(int);
1447 EXPORT_SYMBOL(tegra_deep_sleep);
1448
1449 static int tegra_suspend_prepare(void)
1450 {
1451         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1452                 tegra_deep_sleep(1);
1453         return 0;
1454 }
1455
1456 static void tegra_suspend_finish(void)
1457 {
1458         if (pdata && pdata->cpu_resume_boost) {
1459                 int ret = tegra_suspended_target(pdata->cpu_resume_boost);
1460                 pr_info("Tegra: resume CPU boost to %u KHz: %s (%d)\n",
1461                         pdata->cpu_resume_boost, ret ? "Failed" : "OK", ret);
1462         }
1463
1464         if ((current_suspend_mode == TEGRA_SUSPEND_LP0) && tegra_deep_sleep)
1465                 tegra_deep_sleep(0);
1466 }
1467
1468 static const struct platform_suspend_ops tegra_suspend_ops = {
1469         .valid          = suspend_valid_only_mem,
1470         .prepare        = tegra_suspend_prepare,
1471         .finish         = tegra_suspend_finish,
1472         .prepare_late   = tegra_suspend_prepare_late,
1473         .wake           = tegra_suspend_wake,
1474         .enter          = tegra_suspend_enter,
1475 };
1476
1477 static ssize_t suspend_mode_show(struct kobject *kobj,
1478                                         struct kobj_attribute *attr, char *buf)
1479 {
1480         char *start = buf;
1481         char *end = buf + PAGE_SIZE;
1482
1483         start += scnprintf(start, end - start, "%s ", \
1484                                 tegra_suspend_name[current_suspend_mode]);
1485         start += scnprintf(start, end - start, "\n");
1486
1487         return start - buf;
1488 }
1489
1490 static ssize_t suspend_mode_store(struct kobject *kobj,
1491                                         struct kobj_attribute *attr,
1492                                         const char *buf, size_t n)
1493 {
1494         int len;
1495         const char *name_ptr;
1496         enum tegra_suspend_mode new_mode;
1497
1498         name_ptr = buf;
1499         while (*name_ptr && !isspace(*name_ptr))
1500                 name_ptr++;
1501         len = name_ptr - buf;
1502         if (!len)
1503                 goto bad_name;
1504         /* TEGRA_SUSPEND_NONE not allowed as suspend state */
1505         if (!(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_NONE], len))
1506                 || !(strncmp(buf, tegra_suspend_name[TEGRA_SUSPEND_LP2], len))) {
1507                 pr_info("Illegal tegra suspend state: %s\n", buf);
1508                 goto bad_name;
1509         }
1510
1511         for (new_mode = TEGRA_SUSPEND_NONE; \
1512                         new_mode < TEGRA_MAX_SUSPEND_MODE; ++new_mode) {
1513                 if (!strncmp(buf, tegra_suspend_name[new_mode], len)) {
1514                         current_suspend_mode = new_mode;
1515                         break;
1516                 }
1517         }
1518
1519 bad_name:
1520         return n;
1521 }
1522
1523 static struct kobj_attribute suspend_mode_attribute =
1524         __ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
1525
1526 static ssize_t suspend_resume_time_show(struct kobject *kobj,
1527                                         struct kobj_attribute *attr,
1528                                         char *buf)
1529 {
1530         return sprintf(buf, "%ums\n", ((u32)resume_time / 1000));
1531 }
1532
1533 static struct kobj_attribute suspend_resume_time_attribute =
1534         __ATTR(resume_time, 0444, suspend_resume_time_show, 0);
1535
1536 static ssize_t suspend_time_show(struct kobject *kobj,
1537                                         struct kobj_attribute *attr,
1538                                         char *buf)
1539 {
1540         return sprintf(buf, "%ums\n", ((u32)suspend_time / 1000));
1541 }
1542
1543 static struct kobj_attribute suspend_time_attribute =
1544         __ATTR(suspend_time, 0444, suspend_time_show, 0);
1545
1546 static struct kobject *suspend_kobj;
1547
1548 static int tegra_pm_enter_suspend(void)
1549 {
1550         pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
1551         suspend_cpu_dfll_mode(0);
1552         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1553                 tegra_lp0_cpu_mode(true);
1554         return 0;
1555 }
1556
1557 static void tegra_pm_enter_resume(void)
1558 {
1559         if (current_suspend_mode == TEGRA_SUSPEND_LP0)
1560                 tegra_lp0_cpu_mode(false);
1561         resume_cpu_dfll_mode(0);
1562         pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
1563 }
1564
1565 static void tegra_pm_enter_shutdown(void)
1566 {
1567         suspend_cpu_dfll_mode(0);
1568         pr_info("Shutting down tegra ...\n");
1569 }
1570
1571 static struct syscore_ops tegra_pm_enter_syscore_ops = {
1572         .suspend = tegra_pm_enter_suspend,
1573         .resume = tegra_pm_enter_resume,
1574         .shutdown = tegra_pm_enter_shutdown,
1575 };
1576
1577 static __init int tegra_pm_enter_syscore_init(void)
1578 {
1579         register_syscore_ops(&tegra_pm_enter_syscore_ops);
1580         return 0;
1581 }
1582 subsys_initcall(tegra_pm_enter_syscore_init);
1583 #endif
1584
1585 void __init tegra_init_suspend(struct tegra_suspend_platform_data *plat)
1586 {
1587         u32 reg;
1588         u32 mode;
1589         struct pmc_pm_data *pm_dat;
1590         bool is_board_pdata = true;
1591
1592 #ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
1593         tegra_dfll = clk_get_sys(NULL, "dfll_cpu");
1594         BUG_ON(IS_ERR(tegra_dfll));
1595 #endif
1596         tegra_pclk = clk_get_sys(NULL, "pclk");
1597         BUG_ON(IS_ERR(tegra_pclk));
1598
1599         /* create the pdata from DT information */
1600         pm_dat = tegra_get_pm_data();
1601         if (pm_dat) {
1602                 pr_err("PMC dt information non-NULL %s\n", __func__);
1603                 is_board_pdata = false;
1604                 pdata = kzalloc(sizeof(struct tegra_suspend_platform_data),
1605                         GFP_KERNEL);
1606                 if (pm_dat->combined_req != plat->combined_req) {
1607                         pr_err("PMC DT attribute combined_req=%d, board value=%d\n",
1608                                 pm_dat->combined_req, plat->combined_req);
1609                         pdata->combined_req = plat->combined_req;
1610                 } else {
1611                         pdata->combined_req = pm_dat->combined_req;
1612                 }
1613                 if (pm_dat->sysclkreq_high != plat->sysclkreq_high) {
1614                         pr_err("PMC DT attribute sysclkreq_high=%d, board value=%d\n",
1615                                 pm_dat->sysclkreq_high, plat->sysclkreq_high);
1616                         pdata->sysclkreq_high = plat->sysclkreq_high;
1617                 } else {
1618                         pdata->sysclkreq_high = pm_dat->sysclkreq_high;
1619                 }
1620                 if (pm_dat->corereq_high != plat->corereq_high) {
1621                         pr_err("PMC DT attribute corereq_high=%d, board value=%d\n",
1622                                 pm_dat->corereq_high, plat->corereq_high);
1623                         pdata->corereq_high = plat->corereq_high;
1624                 } else {
1625                         pdata->corereq_high = pm_dat->corereq_high;
1626                 }
1627                 if (pm_dat->cpu_off_time != plat->cpu_off_timer) {
1628                         pr_err("PMC DT attribute cpu_off_timer=%d, board value=%ld\n",
1629                                 pm_dat->cpu_off_time, plat->cpu_off_timer);
1630                         pdata->cpu_off_timer = plat->cpu_off_timer;
1631                 } else {
1632                         pdata->cpu_off_timer = pm_dat->cpu_off_time;
1633                 }
1634                 if (pm_dat->cpu_good_time != plat->cpu_timer) {
1635                         pr_err("PMC DT attribute cpu_timer=%d, board value=%ld\n",
1636                                 pm_dat->cpu_good_time, plat->cpu_timer);
1637                         pdata->cpu_timer = plat->cpu_timer;
1638                 } else {
1639                         pdata->cpu_timer = pm_dat->cpu_good_time;
1640                 }
1641                 if (pm_dat->suspend_mode != plat->suspend_mode) {
1642                         pr_err("PMC DT attribute suspend_mode=%d, board value=%d\n",
1643                                 pm_dat->suspend_mode, plat->suspend_mode);
1644                         pdata->suspend_mode = plat->suspend_mode;
1645                 } else {
1646                         pdata->suspend_mode = pm_dat->suspend_mode;
1647                 }
1648                 /* FIXME: pmc_pm_data fields to be reused
1649                  *      core_osc_time, core_pmu_time, core_off_time
1650                  *      units of above fields is uSec while
1651                  *      platform data values are in ticks
1652                  */
1653                 /* FIXME: pmc_pm_data unused by downstream code
1654                  *      cpu_pwr_good_en, lp0_vec_size, lp0_vec_phy_addr
1655                  */
1656                 /* FIXME: add missing DT bindings taken from platform data */
1657                 pdata->core_timer = plat->core_timer;
1658                 pdata->core_off_timer = plat->core_off_timer;
1659                 pdata->board_suspend = plat->board_suspend;
1660                 pdata->board_resume = plat->board_resume;
1661                 pdata->sysclkreq_gpio = plat->sysclkreq_gpio;
1662                 pdata->cpu_lp2_min_residency = plat->cpu_lp2_min_residency;
1663                 pdata->cpu_resume_boost = plat->cpu_resume_boost;
1664 #ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
1665                 pdata->lp1_lowvolt_support = plat->lp1_lowvolt_support;
1666                 pdata->i2c_base_addr = plat->i2c_base_addr;
1667                 pdata->pmuslave_addr = plat->pmuslave_addr;
1668                 pdata->core_reg_addr = plat->core_reg_addr;
1669                 pdata->lp1_core_volt_low_cold = plat->lp1_core_volt_low_cold;
1670                 pdata->lp1_core_volt_low = plat->lp1_core_volt_low;
1671                 pdata->lp1_core_volt_high = plat->lp1_core_volt_high;
1672 #endif
1673 #ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
1674                 pdata->min_residency_vmin_fmin = plat->min_residency_vmin_fmin;
1675                 pdata->min_residency_ncpu_slow = plat->min_residency_ncpu_slow;
1676                 pdata->min_residency_ncpu_fast = plat->min_residency_ncpu_fast;
1677                 pdata->min_residency_crail = plat->min_residency_crail;
1678 #endif
1679                 pdata->min_residency_mc_clk = plat->min_residency_mc_clk;
1680                 pdata->usb_vbus_internal_wake = plat->usb_vbus_internal_wake;
1681                 pdata->usb_id_internal_wake = plat->usb_id_internal_wake;
1682         } else {
1683                 pr_err("PMC board data used in %s\n", __func__);
1684                 pdata = plat;
1685         }
1686         (void)reg;
1687         (void)mode;
1688
1689         if (plat->suspend_mode == TEGRA_SUSPEND_LP2)
1690                 plat->suspend_mode = TEGRA_SUSPEND_LP0;
1691
1692 #ifndef CONFIG_PM_SLEEP
1693         if (plat->suspend_mode != TEGRA_SUSPEND_NONE) {
1694                 pr_warning("%s: Suspend requires CONFIG_PM_SLEEP -- "
1695                            "disabling suspend\n", __func__);
1696                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1697         }
1698 #else
1699         if (create_suspend_pgtable() < 0) {
1700                 pr_err("%s: PGD memory alloc failed -- LP0/LP1/LP2 unavailable\n",
1701                                 __func__);
1702                 plat->suspend_mode = TEGRA_SUSPEND_NONE;
1703                 goto fail;
1704         }
1705
1706         if ((tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) &&
1707             (tegra_revision == TEGRA_REVISION_A01) &&
1708             (plat->suspend_mode == TEGRA_SUSPEND_LP0)) {
1709                 /* Tegra 3 A01 supports only LP1 */
1710                 pr_warning("%s: Suspend mode LP0 is not supported on A01 "
1711                            "-- disabling LP0\n", __func__);
1712                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1713         }
1714         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && tegra_lp0_vec_size &&
1715                 tegra_lp0_vec_relocate) {
1716                 unsigned char *reloc_lp0;
1717                 unsigned long tmp;
1718                 void __iomem *orig;
1719                 reloc_lp0 = kmalloc(tegra_lp0_vec_size + L1_CACHE_BYTES - 1,
1720                                         GFP_KERNEL);
1721                 WARN_ON(!reloc_lp0);
1722                 if (!reloc_lp0) {
1723                         pr_err("%s: Failed to allocate reloc_lp0\n",
1724                                 __func__);
1725                         goto out;
1726                 }
1727
1728                 orig = ioremap(tegra_lp0_vec_start, tegra_lp0_vec_size);
1729                 WARN_ON(!orig);
1730                 if (!orig) {
1731                         pr_err("%s: Failed to map tegra_lp0_vec_start %08lx\n",
1732                                 __func__, tegra_lp0_vec_start);
1733                         kfree(reloc_lp0);
1734                         goto out;
1735                 }
1736
1737                 tmp = (unsigned long) reloc_lp0;
1738                 tmp = (tmp + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
1739                 reloc_lp0 = (unsigned char *)tmp;
1740                 memcpy(reloc_lp0, orig, tegra_lp0_vec_size);
1741                 iounmap(orig);
1742                 tegra_lp0_vec_start = virt_to_phys(reloc_lp0);
1743         }
1744
1745 out:
1746         if (plat->suspend_mode == TEGRA_SUSPEND_LP0 && !tegra_lp0_vec_size) {
1747                 pr_warning("%s: Suspend mode LP0 requested, no lp0_vec "
1748                            "provided by bootlader -- disabling LP0\n",
1749                            __func__);
1750                 plat->suspend_mode = TEGRA_SUSPEND_LP1;
1751         }
1752
1753         iram_save_size = tegra_iram_end() - tegra_iram_start();
1754
1755         iram_save = kmalloc(iram_save_size, GFP_KERNEL);
1756         if (!iram_save && (plat->suspend_mode >= TEGRA_SUSPEND_LP1)) {
1757                 pr_err("%s: unable to allocate memory for SDRAM self-refresh "
1758                        "-- LP0/LP1 unavailable\n", __func__);
1759                 plat->suspend_mode = TEGRA_SUSPEND_LP2;
1760         }
1761
1762 #ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
1763         if (pdata->lp1_lowvolt_support) {
1764                 u32 lp1_core_lowvolt, lp1_core_highvolt;
1765                 memcpy(tegra_lp1_register_pmuslave_addr(), &pdata->pmuslave_addr, 4);
1766                 memcpy(tegra_lp1_register_i2c_base_addr(), &pdata->i2c_base_addr, 4);
1767
1768                 lp1_core_lowvolt = 0;
1769                 lp1_core_lowvolt = (pdata->lp1_core_volt_low << 8) | pdata->core_reg_addr;
1770                 memcpy(tegra_lp1_register_core_lowvolt(), &lp1_core_lowvolt, 4);
1771
1772                 lp1_core_highvolt = 0;
1773                 lp1_core_highvolt = (pdata->lp1_core_volt_high << 8) | pdata->core_reg_addr;
1774                 memcpy(tegra_lp1_register_core_highvolt(), &lp1_core_highvolt, 4);
1775         }
1776 #endif
1777         /* !!!FIXME!!! THIS IS TEGRA2 ONLY */
1778         /* Initialize scratch registers used for CPU LP2 synchronization */
1779         writel(0, pmc + PMC_SCRATCH37);
1780         writel(0, pmc + PMC_SCRATCH38);
1781 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1782         writel(0, pmc + PMC_SCRATCH39);
1783 #endif
1784         writel(0, pmc + PMC_SCRATCH41);
1785
1786         /* Always enable CPU power request; just normal polarity is supported */
1787         reg = readl(pmc + PMC_CTRL);
1788         BUG_ON(reg & TEGRA_POWER_CPU_PWRREQ_POLARITY);
1789         reg |= TEGRA_POWER_CPU_PWRREQ_OE;
1790         pmc_32kwritel(reg, PMC_CTRL);
1791
1792         /* Configure core power request and system clock control if LP0
1793            is supported */
1794         __raw_writel(pdata->core_timer, pmc + PMC_COREPWRGOOD_TIMER);
1795         __raw_writel(pdata->core_off_timer, pmc + PMC_COREPWROFF_TIMER);
1796
1797         reg = readl(pmc + PMC_CTRL);
1798
1799         if (!pdata->sysclkreq_high)
1800                 reg |= TEGRA_POWER_SYSCLK_POLARITY;
1801         else
1802                 reg &= ~TEGRA_POWER_SYSCLK_POLARITY;
1803
1804         if (!pdata->corereq_high)
1805                 reg |= TEGRA_POWER_PWRREQ_POLARITY;
1806         else
1807                 reg &= ~TEGRA_POWER_PWRREQ_POLARITY;
1808
1809         /* configure output inverters while the request is tristated */
1810         pmc_32kwritel(reg, PMC_CTRL);
1811
1812         /* now enable requests */
1813         reg |= TEGRA_POWER_SYSCLK_OE;
1814         if (!pdata->combined_req)
1815                 reg |= TEGRA_POWER_PWRREQ_OE;
1816         pmc_32kwritel(reg, PMC_CTRL);
1817
1818         if (pdata->sysclkreq_gpio) {
1819                 reg = readl(pmc + PMC_DPAD_ORIDE);
1820                 reg &= ~TEGRA_DPAD_ORIDE_SYS_CLK_REQ;
1821                 pmc_32kwritel(reg, PMC_DPAD_ORIDE);
1822         }
1823
1824         if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
1825                 tegra_lp0_suspend_init();
1826
1827         suspend_set_ops(&tegra_suspend_ops);
1828
1829         /* Create /sys/power/suspend/type */
1830         suspend_kobj = kobject_create_and_add("suspend", power_kobj);
1831         if (suspend_kobj) {
1832                 if (sysfs_create_file(suspend_kobj, \
1833                                                 &suspend_mode_attribute.attr))
1834                         pr_err("%s: sysfs_create_file suspend type failed!\n",
1835                                                                 __func__);
1836                 if (sysfs_create_file(suspend_kobj, \
1837                                         &suspend_resume_time_attribute.attr))
1838                         pr_err("%s: sysfs_create_file resume_time failed!\n",
1839                                                                 __func__);
1840                 if (sysfs_create_file(suspend_kobj, \
1841                                         &suspend_time_attribute.attr))
1842                         pr_err("%s: sysfs_create_file suspend_time failed!\n",
1843                                                                 __func__);
1844         }
1845
1846         iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
1847         iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
1848         iram_mc_clk_mask = tegra_mc_clk_mask;
1849
1850         /* clear io dpd settings before kernel */
1851         tegra_bl_io_dpd_cleanup();
1852
1853 fail:
1854 #endif
1855         if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
1856                 tegra_pd_in_idle(false);
1857
1858         current_suspend_mode = plat->suspend_mode;
1859 }
1860
1861 void tegra_lp1bb_suspend_emc_rate(unsigned long emc_min, unsigned long emc_max)
1862 {
1863         pdata->lp1bb_emc_rate_min = emc_min;
1864         pdata->lp1bb_emc_rate_max = emc_max;
1865 }
1866
1867 void tegra_lp1bb_suspend_mv_set(int mv)
1868 {
1869         if (WARN_ON_ONCE(!pdata))
1870                 return;
1871
1872         pdata->lp1bb_core_volt_min = mv;
1873 }
1874
1875 unsigned long tegra_lp1bb_emc_min_rate_get(void)
1876 {
1877         if (WARN_ON_ONCE(!pdata) || !pdata->lp1bb_emc_rate_min)
1878                 return 204000000;
1879
1880         return pdata->lp1bb_emc_rate_min;
1881 }
1882
1883 unsigned long debug_uart_port_base = 0;
1884 EXPORT_SYMBOL(debug_uart_port_base);
1885
1886 static int tegra_debug_uart_suspend(void)
1887 {
1888         void __iomem *uart;
1889         u32 lcr;
1890
1891         if (!debug_uart_port_base)
1892                 return 0;
1893
1894         uart = IO_ADDRESS(debug_uart_port_base);
1895
1896         lcr = readb(uart + UART_LCR * 4);
1897
1898         tegra_sctx.uart[0] = lcr;
1899         tegra_sctx.uart[1] = readb(uart + UART_MCR * 4);
1900
1901         /* DLAB = 0 */
1902         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1903
1904         tegra_sctx.uart[2] = readb(uart + UART_IER * 4);
1905
1906         /* DLAB = 1 */
1907         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1908
1909         tegra_sctx.uart[3] = readb(uart + UART_DLL * 4);
1910         tegra_sctx.uart[4] = readb(uart + UART_DLM * 4);
1911
1912         writeb(lcr, uart + UART_LCR * 4);
1913
1914         return 0;
1915 }
1916
1917 static void tegra_debug_uart_resume(void)
1918 {
1919         void __iomem *uart;
1920         u32 lcr;
1921
1922         if (!debug_uart_port_base)
1923                 return;
1924
1925         uart = IO_ADDRESS(debug_uart_port_base);
1926
1927         lcr = tegra_sctx.uart[0];
1928
1929         writeb(tegra_sctx.uart[1], uart + UART_MCR * 4);
1930
1931         /* DLAB = 0 */
1932         writeb(lcr & ~UART_LCR_DLAB, uart + UART_LCR * 4);
1933
1934         writeb(UART_FCR_ENABLE_FIFO | UART_FCR_T_TRIG_01 | UART_FCR_R_TRIG_01,
1935                         uart + UART_FCR * 4);
1936
1937         writeb(tegra_sctx.uart[2], uart + UART_IER * 4);
1938
1939         /* DLAB = 1 */
1940         writeb(lcr | UART_LCR_DLAB, uart + UART_LCR * 4);
1941
1942         writeb(tegra_sctx.uart[3], uart + UART_DLL * 4);
1943         writeb(tegra_sctx.uart[4], uart + UART_DLM * 4);
1944
1945         writeb(lcr, uart + UART_LCR * 4);
1946 }
1947
1948 static struct syscore_ops tegra_debug_uart_syscore_ops = {
1949         .suspend = tegra_debug_uart_suspend,
1950         .resume = tegra_debug_uart_resume,
1951 };
1952
1953 struct clk *debug_uart_clk = NULL;
1954 EXPORT_SYMBOL(debug_uart_clk);
1955
1956 void tegra_console_uart_suspend(void)
1957 {
1958         if (console_suspend_enabled && debug_uart_clk)
1959                 tegra_clk_disable_unprepare(debug_uart_clk);
1960 }
1961
1962 void tegra_console_uart_resume(void)
1963 {
1964         if (console_suspend_enabled && debug_uart_clk)
1965                 tegra_clk_prepare_enable(debug_uart_clk);
1966 }
1967
1968 static int tegra_debug_uart_syscore_init(void)
1969 {
1970         register_syscore_ops(&tegra_debug_uart_syscore_ops);
1971         return 0;
1972 }
1973 arch_initcall(tegra_debug_uart_syscore_init);
1974
1975 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
1976 static inline bool pmc_write_check(int index, int bit_position)
1977 {
1978         if (pmc_write_bitmap[index] & (1 << bit_position))
1979                 return true;
1980         else
1981                 return false;
1982 }
1983
1984 static void update_pmc_registers(unsigned long rate)
1985 {
1986         u32 i, j;
1987         int instance = 1;
1988
1989         /* FIXME: convert rate to instance */
1990
1991         /* Based on index, we select that block of scratches */
1992         u32 base2 = (tegra_wb0_params_address + (instance - 1) *
1993                 tegra_wb0_params_block_size);
1994         void __iomem *base = ioremap(base2, tegra_wb0_params_block_size);
1995
1996 #define copy_dram_to_pmc(index, bit)    \
1997         pmc_32kwritel(readl(base + PMC_REGISTER_OFFSET(index, bit)), \
1998                 PMC_REGISTER_OFFSET(index, bit) + PMC_SCRATCH0)
1999
2000
2001         /* Iterate through the bitmap, and copy those registers
2002          * which are marked in the bitmap
2003          */
2004         for (i = 0, j = 0; j < ARRAY_SIZE(pmc_write_bitmap);) {
2005                 if (pmc_write_bitmap[j] == 0) {
2006                         j++;
2007                         i = 0;
2008                         continue;
2009                 }
2010
2011                 if (pmc_write_check(j, i))
2012                         copy_dram_to_pmc(j, i);
2013
2014                 if (++i > (sizeof(pmc_write_bitmap[0]) * 8)) {
2015                         i = 0;
2016                         j++;
2017                 }
2018         }
2019
2020 #undef copy_dram_to_pmc
2021         iounmap(base);
2022 }
2023 #endif
2024
2025 #ifdef CONFIG_ARM_ARCH_TIMER
2026
2027 static u32 tsc_suspend_start;
2028 static u32 tsc_resume_start;
2029
2030 #define pmc_writel(value, reg) \
2031                 writel(value, (uintptr_t)pmc + (reg))
2032 #define pmc_readl(reg) \
2033                 readl((uintptr_t)pmc + (reg))
2034
2035 #define PMC_DPD_ENABLE                  0x24
2036 #define PMC_DPD_ENABLE_TSC_MULT_ENABLE  (1 << 1)
2037
2038 #define PMC_TSC_MULT                    0x2b4
2039 #define PMC_TSC_MULT_FREQ_STS           (1 << 16)
2040
2041 #define TSC_TIMEOUT_US                  32
2042
2043 void tegra_tsc_suspend(void)
2044 {
2045         if (arch_timer_initialized) {
2046                 u32 reg = pmc_readl(PMC_DPD_ENABLE);
2047                 BUG_ON(reg & PMC_DPD_ENABLE_TSC_MULT_ENABLE);
2048                 reg |= PMC_DPD_ENABLE_TSC_MULT_ENABLE;
2049                 pmc_writel(reg, PMC_DPD_ENABLE);
2050                 tsc_suspend_start = timer_readl(TIMERUS_CNTR_1US);
2051         }
2052 }
2053
2054 void tegra_tsc_resume(void)
2055 {
2056         if (arch_timer_initialized) {
2057                 u32 reg = pmc_readl(PMC_DPD_ENABLE);
2058                 BUG_ON(!(reg & PMC_DPD_ENABLE_TSC_MULT_ENABLE));
2059                 reg &= ~PMC_DPD_ENABLE_TSC_MULT_ENABLE;
2060                 pmc_writel(reg, PMC_DPD_ENABLE);
2061                 tsc_resume_start = timer_readl(TIMERUS_CNTR_1US);
2062         }
2063 }
2064
2065 void tegra_tsc_wait_for_suspend(void)
2066 {
2067         if (arch_timer_initialized) {
2068                 while ((timer_readl(TIMERUS_CNTR_1US) - tsc_suspend_start) <
2069                         TSC_TIMEOUT_US) {
2070                         if (pmc_readl(PMC_TSC_MULT) & PMC_TSC_MULT_FREQ_STS)
2071                                 break;
2072                         cpu_relax();
2073                 }
2074         }
2075 }
2076
2077 void tegra_tsc_wait_for_resume(void)
2078 {
2079         if (arch_timer_initialized) {
2080                 while ((timer_readl(TIMERUS_CNTR_1US) - tsc_resume_start) <
2081                         TSC_TIMEOUT_US) {
2082                         if (!(pmc_readl(PMC_TSC_MULT) & PMC_TSC_MULT_FREQ_STS))
2083                                 break;
2084                         cpu_relax();
2085                 }
2086         }
2087 }
2088 #endif
2089
2090 #if defined(CONFIG_DEBUG_FS) && INSTRUMENT_CLUSTER_SWITCH
2091
2092 static void cluster_switch_stats_show(
2093         struct seq_file *s, struct tegra_cluster_switch_time_stats *stats)
2094 {
2095         seq_printf(s, "%u-samples average:           %lu\n",
2096                    CLUSTER_SWITCH_AVG_SAMPLES,
2097                    stats->avg >> CLUSTER_SWITCH_TIME_AVG_SHIFT);
2098         seq_printf(s, "exponential average:          %lu\n",
2099                    stats->exp_avg >> CLUSTER_SWITCH_TIME_AVG_SHIFT);
2100         seq_printf(s, "maximum since boot:           %lu\n\n", stats->max);
2101 }
2102
2103
2104 static int tegra_cluster_switch_stats_show(struct seq_file *s, void *data)
2105 {
2106         seq_printf(s, "G=>LP cluster switch timing:  (us)\n");
2107         cluster_switch_stats_show(s, &g2lp_stats);
2108         seq_printf(s, "LP=>G cluster switch timing:  (us)\n");
2109         cluster_switch_stats_show(s, &lp2g_stats);
2110         return 0;
2111 }
2112
2113 static int tegra_cluster_switch_stats_open(
2114         struct inode *inode, struct file *file)
2115 {
2116         return single_open(file, tegra_cluster_switch_stats_show,
2117                            inode->i_private);
2118 }
2119
2120 static const struct file_operations tegra_cluster_switch_stats_ops = {
2121         .open           = tegra_cluster_switch_stats_open,
2122         .read           = seq_read,
2123         .llseek         = seq_lseek,
2124         .release        = single_release,
2125 };
2126
2127 static int __init tegra_pm_core_debug_init(void)
2128 {
2129         struct dentry *dir, *d;
2130
2131         dir = debugfs_create_dir("tegra_pm_core", NULL);
2132         if (!dir)
2133                 return -ENOMEM;
2134
2135         d = debugfs_create_file("cluster_switch_stats", S_IRUGO, dir, NULL,
2136                 &tegra_cluster_switch_stats_ops);
2137         if (!d)
2138                 return -ENOMEM;
2139
2140         return 0;
2141 }
2142
2143 late_initcall(tegra_pm_core_debug_init);
2144 #endif