First version
[3rdparty/ote_partner/tlk.git] / platform / tegra / common / pm.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <err.h>
25 #include <errno.h>
26 #include <debug.h>
27 #include <assert.h>
28 #include <malloc.h>
29 #include <string.h>
30 #include <platform.h>
31 #include <arch.h>
32 #include <stdlib.h>
33 #include <lib/heap.h>
34 #include <arch/outercache.h>
35 #include <platform/memmap.h>
36 #include <platform/tzrammap.h>
37 #include <platform/irqs.h>
38 #include <arch/arm.h>
39 #include <arch/arm/mmu.h>
40 #include <platform/platform_p.h>
41 #include <ote_intf.h>
42
43 #define MONITOR_MODE_STACK_SZ   4096
44
45 /* referenced APBDEV_PMC_SECURE registers */
46 #define PMC_SECURE_DISABLE2     0x2c4
47 #define PMC_SECURE_SCRATCH22    0x338
48
49 /*
50  * The PSCI spec from ARM states the following for power mgmt:
51  * SMC         (r0) - CPU_SUSPEND  = 0x84000001
52  * power_state (r1) - Bits [0:15]  = StateID
53  *                    Bit  [16]    = StateType <0=Stdby, 1=PwrDn>
54  *                    Bits [17:23] = MBZ
55  *                    Bits [24:25] = AffinityLevel <0=cpu, 1=cluster>
56  *                    Bits [26:31] = MBZ
57  * entry_addr  (r2) - CPU wake up addr
58  * context_id  (r3) - args in r0 when cpu wakes up from pwrdn state and enters
59  *                    exception level of caller
60  * returns     (r0) - SUCCESS/INVALID PARAM
61  */
62 #define LP0                             ((1 << 16) | (1 << 24) | 1)
63 #define LP1                             ((1 << 16) | 2)
64 #define LP1_MC_CLK_STOP                 ((1 << 16) | 3)
65 #define LP2_CLUSTER_PWR_DN              ((1 << 16) | (1 << 24) | 4)
66 #define LP2_CLUSTER_PWR_DN_LEGACY       ((1 << 16) | 4)
67 #define LP2_NO_FLUSH_LEGACY             ((1 << 16) | 5)
68
69 extern unsigned int monitor_vector_base;
70 extern unsigned long mon_stack_top;
71 extern unsigned long _ns_resume_addr;
72 extern unsigned long boot_secondary_cpu_addr;
73
74 /* location in NS to boot cpus */
75 unsigned long _ns_addr_secondary_cpus = 0;
76
77 unsigned int cpu_power_down_mode = 0;
78
79 /* tracks if we need to load resume handlers into tzram */
80 static bool load_tzram_lp1_resume_handler = true;
81
82 static void pm_save_monitor_stack(void)
83 {
84         /*
85          * Save the (adjusted) value of the monitor stack after the
86          * next frame is popped.
87          */
88         __asm__ volatile (
89                 "mrs    r1, cpsr        \n" // save current mode
90                 "cps    #0x16           \n" // change to monitor mode
91                 "mov    %0, sp          \n" // save current mon sp
92                 "add    %0, %0, %1      \n" // account for current frame
93                 "msr    cpsr_c, r1      \n" // restore previous mode
94                 : "=r" (mon_stack_top)
95                 : "I" (sizeof(struct tz_monitor_frame))
96                 : "r1" );
97 }
98
99 static void pm_set_monitor_stack(void)
100 {
101         void *stack_top_mon;
102         int stack_size = MONITOR_MODE_STACK_SZ;
103
104         stack_top_mon = heap_alloc(stack_size, 0);
105         if (stack_top_mon)
106         {
107                 __asm__ volatile (
108                         "mrs    r2, cpsr        \n" // save current mode
109                         "cps    #0x16           \n" // change to monitor mode
110                         "mov    sp, %0          \n" // set mon_sp
111                         "add    sp, sp, %1      \n" // set mon_sp
112                         "msr    cpsr, r2        \n" // restore previous mode
113                         : : "r" (stack_top_mon),
114                             "r" (stack_size / 2) : "memory"
115                 );
116         } else {
117                 panic("no memory available for monitor stack");
118         }
119
120         memset(stack_top_mon, 0, stack_size);
121         mon_stack_top = (unsigned long)(stack_top_mon + (stack_size / 2));
122 }
123
124 static void pm_set_mvbar(unsigned int val)
125 {
126         __asm__ volatile (
127                 "mcr    p15, 0, %0, c12, c0, 1          \n"
128                 : : "r" (val)
129         );
130 }
131
132 static void pm_set_reset_vector(unsigned long vector_addr)
133 {
134         uint32_t evp_cpu_reset = TEGRA_EXCEPTION_VECTORS_BASE + 0x100;
135         uint32_t sb_ctrl = TEGRA_SB_BASE;
136         uint32_t reg;
137
138         /* set new reset vector */
139         *(volatile uint32_t *)evp_cpu_reset = (uint32_t)vector_addr;
140         __asm__ volatile ("dmb" : : : "memory");
141
142         /* dummy read to ensure successful write */
143         reg = *(volatile uint32_t *)evp_cpu_reset;
144
145         /* lock reset vector */
146         reg = *(volatile uint32_t *)sb_ctrl;
147         reg |= 2;
148         *(volatile uint32_t *)sb_ctrl = reg;
149         __asm__ volatile ("dmb" : : : "memory");
150 }
151
152 static void pm_set_nsacr(void)
153 {
154         /* let normal world enable SMP, lock TLB, access CP10/11 */
155         __asm__ volatile (
156                 "mrc    p15, 0, r0, c1, c1, 2           @ NSACR \n"
157                 "orr    r0, r0, #(0x00000C00)                   \n"
158                 "orr    r0, r0, #(0x00060000)                   \n"
159                 "mcr    p15, 0, r0, c1, c1, 2           @ NSACR \n"
160                 ::: "r0"
161         );
162 }
163
164 void pm_early_init(void)
165 {
166         /* set monitor vector base address (use vaddr, since MMU's enabled) */
167         pm_set_mvbar((unsigned int)&monitor_vector_base);
168
169         /* populate the reset vector to boot all the secondary cpus */
170         pm_set_reset_vector(virtual_to_physical(boot_secondary_cpu_addr));
171 }
172
173 /* LP1 resume code */
174 extern uint32_t _lp1_resume;
175 extern uint32_t _lp1_resume_end;
176
177 #define LP1_RESUME_HANDLER_SIZE \
178         ((uint32_t)&_lp1_resume_end - (uint32_t)&_lp1_resume)
179
180 static void pm_load_tzram_lp1_resume_handler(void)
181 {
182         ASSERT(LP1_RESUME_HANDLER_SIZE < TEGRA_TZRAM_SIZE);
183
184         memcpy((void *)(TZRAM_LP1_RESUME_HANDLER),
185                (void *)&_lp1_resume,
186                LP1_RESUME_HANDLER_SIZE);
187 }
188
189 static void pm_save_lp1_context(void)
190 {
191         /* store any state needed for lp1 resume to tzram */
192         TZRAM_STORE(TZRAM_BOOT_SECONDARY_CPU_ADDR,
193                 virtual_to_physical(boot_secondary_cpu_addr));
194         TZRAM_STORE(TZRAM_MON_STACK_TOP, virtual_to_physical(mon_stack_top));
195         TZRAM_STORE(TZRAM_NS_RESUME_ADDR, _ns_resume_addr);
196         TZRAM_STORE(TZRAM_MVBAR, virtual_to_physical(&monitor_vector_base));
197
198         cpu_copy_context((void *)TZRAM_CPU_CONTEXT);
199 }
200
201 void pm_init(void)
202 {
203         extern unsigned long mon_p2v_offset;
204         extern uint32_t _boot_secondary_phys_base;
205         extern uint32_t __load_phys_size;
206         uint32_t reg;
207
208         pm_early_init();
209
210         /* set monitor vector stack */
211         pm_set_monitor_stack();
212
213         /* set normal world access in NSACR */
214         pm_set_nsacr();
215
216         /* save mmu setup used to bring up secondary cpus */
217         cpu_save_context();
218
219         /* save off values to help with v-to-p operations */
220         _boot_secondary_phys_base = __load_phys_base;
221         mon_p2v_offset = (VMEMBASE - __load_phys_base);
222
223         /* install the cpu resume handler to PMC_SEC_SCRATCH22 */
224         reg = readl(TEGRA_PMC_BASE + PMC_SECURE_DISABLE2);
225         writel(reg & ~(1 << 28), TEGRA_PMC_BASE + PMC_SECURE_DISABLE2); /* unlock */
226
227         writel(virtual_to_physical(boot_secondary_cpu_addr), TEGRA_PMC_BASE + PMC_SECURE_SCRATCH22);
228
229         reg = readl(TEGRA_PMC_BASE + PMC_SECURE_DISABLE2);
230         writel(reg | 1 << 28, TEGRA_PMC_BASE + PMC_SECURE_DISABLE2);    /* lock */
231
232         platform_init_memory(__load_phys_base, __load_phys_size);
233         platform_config_interrupts();
234 }
235
236 /*
237  * Stubs for routines not used by all platforms
238  */
239 __WEAK void pm_handle_smc_l2(unsigned int smc)
240 {
241         dprintf(CRITICAL, "stubbed L2 SMC handler (shouldn't have been issued)\n");
242         return;
243 }
244
245 __WEAK void pm_handle_smc_deep_sleep(void)
246 {
247         return;
248 }
249
250 /*
251  * Suspend-related SMCs.
252  */
253 static void pm_handle_lp0_suspend_smc(struct tz_monitor_frame *frame)
254 {
255         cpu_power_down_mode = CPU_IN_LP0;
256
257         /* store passed in non-secure resume handler address */
258         _ns_resume_addr = frame->r[2];
259
260         /* set our LP0 reset vector */
261         pm_set_reset_vector(virtual_to_physical(boot_secondary_cpu_addr));
262
263         /* save off current state */
264         cpu_save_context();
265         pm_save_monitor_stack();
266
267         /* need to reload LP1 handler into tzram before next LP1 suspend */
268         load_tzram_lp1_resume_handler = true;
269
270         /* handle any chip-specific steps */
271         pm_handle_smc_deep_sleep();
272
273         platform_disable_debug_intf();
274
275         /* flush dcache last */
276         flush_dcache_all();
277 }
278
279 static void pm_handle_lp1_suspend_smc(struct tz_monitor_frame *frame)
280 {
281         cpu_power_down_mode = CPU_IN_LP1;
282
283         /* store passed in non-secure resume handler address */
284         _ns_resume_addr = frame->r[2];
285
286         /* set our LP1 reset vector */
287         pm_set_reset_vector(TZRAM_LP1_RESUME_HANDLER);
288
289         /* save off current state */
290         cpu_save_context();
291         pm_save_monitor_stack();
292
293         /* save off state needed by LP1 resume handler */
294         TZRAM_STORE(TZRAM_CPU_AVOID_CLKM_SWITCH,
295                 frame->r[1] == LP1_MC_CLK_STOP);
296         TZRAM_STORE(TZRAM_BOOT_SECONDARY_CPU_ADDR,
297                 virtual_to_physical(boot_secondary_cpu_addr));
298         pm_save_lp1_context();
299
300         /* load LP1 resume handler to TZRAM if necessary */
301         if (load_tzram_lp1_resume_handler) {
302                 pm_load_tzram_lp1_resume_handler();
303                 load_tzram_lp1_resume_handler = false;
304         }
305
306         /* flush dcache last */
307         flush_dcache_all();
308 }
309
310 static void pm_handle_lp2_suspend_smc(struct tz_monitor_frame *frame)
311 {
312         cpu_power_down_mode = CPU_IN_LP2;
313
314         /* set our LP2 reset vector */
315         pm_set_reset_vector(virtual_to_physical(boot_secondary_cpu_addr));
316
317         /* save off current state */
318         cpu_save_context();
319         pm_save_monitor_stack();
320
321         /* flush dcache last */
322         flush_dcache_all();
323 }
324
325 /*
326  * System related SMCs handled on the current idle stack.
327  * These should be simple operations that can't block.
328  */
329 void pm_handle_platform_smc(struct tz_monitor_frame *frame)
330 {
331         int error = 0;
332
333         switch (frame->r[0]) {
334                 case SMC_SIP_L2_MANAGEMENT:
335                         pm_handle_smc_l2(frame->r[1]);
336                         break;
337
338                 case SMC_SIP_CPU_RESET_VECTOR_LEGACY:
339                 case SMC_SIP_CPU_RESET_VECTOR:
340                         _ns_addr_secondary_cpus = frame->r[1];
341                         if (frame->r[1] == 0)
342                                 _ns_addr_secondary_cpus = frame->r[2];
343 #if defined(ARM_USE_CPU_CACHING)
344                         platform_clean_invalidate_cache_range(
345                                 (vaddr_t)&_ns_addr_secondary_cpus,
346                                 sizeof(_ns_addr_secondary_cpus));
347 #endif
348                         platform_enable_debug_intf();
349                         break;
350
351                 case SMC_SIP_DEVICE_SUSPEND:
352                         switch (frame->r[1]) {
353                         case LP2_CLUSTER_PWR_DN:
354                         case LP2_CLUSTER_PWR_DN_LEGACY:
355                         case LP2_NO_FLUSH_LEGACY:
356                                 pm_handle_lp2_suspend_smc(frame);
357                                 break;
358                         case LP1:
359                         case LP1_MC_CLK_STOP:
360                                 pm_handle_lp1_suspend_smc(frame);
361                                 break;
362                         case LP0:
363                                 pm_handle_lp0_suspend_smc(frame);
364                                 break;
365                         default:
366                                 error = -EINVAL;
367                                 break;
368                         }
369                         break;
370         }
371         frame->r[0] = error;
372 }