ARM: Tegra: Add CONFIG_TEGRA_USE_SECURE_KERNEL
[linux-3.10.git] / arch / arm / mach-tegra / headsmp.S
index ec988b9..06091dd 100644 (file)
@@ -3,7 +3,7 @@
  *
  * CPU initialization routines for Tegra SoCs
  *
- * Copyright (c) 2009-2011, NVIDIA Corporation.
+ * Copyright (c) 2009-2013, NVIDIA CORPORATION.  All rights reserved.
  * Copyright (c) 2011 Google, Inc.
  * Author: Colin Cross <ccross@android.com>
  *         Gary King <gking@nvidia.com>
 #include <linux/linkage.h>
 #include <linux/init.h>
 
+#include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/page.h>
+#include <asm/hardware/cache-l2x0.h>
 
 #include "flowctrl.h"
 #include "iomap.h"
-#include "reset.h"
 #include "sleep.h"
-
-#define DEBUG_CPU_RESET_HANDLER        0       /* Non-zero enables debug code */
+#include "reset.h"
 
 #define APB_MISC_GP_HIDREV     0x804
 #define PMC_SCRATCH41  0x140
 
-#define RESET_DATA(x)  ((TEGRA_RESET_##x)*4)
-
-        .section ".text.head", "ax"
-       __CPUINIT
-
-/*
- *   The secondary kernel init calls v7_flush_dcache_all before it enables
- *   the L1; however, the L1 comes out of reset in an undefined state, so
- *   the clean + invalidate performed by v7_flush_dcache_all causes a bunch
- *   of cache lines with uninitialized data and uninitialized tags to get
- *   written out to memory, which does really unpleasant things to the main
- *   processor.  We fix this by performing an invalidate, rather than a
- *   clean + invalidate, before jumping into the kernel.
- */
-ENTRY(v7_invalidate_l1)
-        mov     r0, #0
-        mcr     p15, 2, r0, c0, c0, 0
-        mrc     p15, 1, r0, c0, c0, 0
-
-        ldr     r1, =0x7fff
-        and     r2, r1, r0, lsr #13
-
-        ldr     r1, =0x3ff
-
-        and     r3, r1, r0, lsr #3  @ NumWays - 1
-        add     r2, r2, #1          @ NumSets
-
-        and     r0, r0, #0x7
-        add     r0, r0, #4          @ SetShift
-
-        clz     r1, r3              @ WayShift
-        add     r4, r3, #1          @ NumWays
-1:      sub     r2, r2, #1          @ NumSets--
-        mov     r3, r4              @ Temp = NumWays
-2:      subs    r3, r3, #1          @ Temp--
-        mov     r5, r3, lsl r1
-        mov     r6, r2, lsl r0
-        orr     r5, r5, r6          @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
-        mcr     p15, 0, r5, c7, c6, 2
-        bgt     2b
-        cmp     r2, #0
-        bgt     1b
-        dsb
-        isb
-        mov     pc, lr
-ENDPROC(v7_invalidate_l1)
+#define DEBUG_CPU_RESET_HANDLER        0       /* Non-zero enables debug code */
 
+#define RESET_DATA_PHYS (TEGRA_RESET_HANDLER_BASE \
+       + __tegra_cpu_reset_handler_data - __tegra_cpu_reset_handler_start)
 
 #ifdef CONFIG_SMP
-/* 
+/*
  *     tegra_secondary_startup
  *
  *      Initial secondary processor boot vector; jumps to kernel's
  *      secondary_startup routine. Used for initial boot and hotplug
  *      of secondary CPUs.
  */
+       __CPUINIT
 ENTRY(tegra_secondary_startup)
-        bl      v7_invalidate_l1
-        b       secondary_startup
+       bl      __invalidate_cpu_state
+
+       /* enable user space perf counter access */
+       /* only accessible in secure state       */
+       mrc     p15, 0, r0, c9, c12, 0
+       lsr     r0, #11
+       and     r0, r0, #0x1f
+       mov     r1, #1
+       lsl     r1, r1, r0
+       sub     r1, r1, #1
+       movt    r1, #0x8000
+       mcr     p15, 0, r1, c9, c14, 2
+       mov     r0, #1
+       mcr     p15, 0, r0, c9, c14, 0
+
+       b       secondary_startup
 ENDPROC(tegra_secondary_startup)
 #endif
 
+        .section ".text.head", "ax"
 #ifdef CONFIG_PM_SLEEP
 /*
  *     tegra_resume
@@ -105,32 +78,262 @@ ENDPROC(tegra_secondary_startup)
  *       re-enabling sdram.
  */
 ENTRY(tegra_resume)
-       bl      tegra_invalidate_l1
+#ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
+       mov32   r1, TEGRA_TMRUS_BASE
+       ldr     r0, [r1]
+       adr     r1, tegra_resume_entry_time
+       str     r0, [r1]
+#endif
+
+       bl      __invalidate_cpu_state
 
        cpu_id  r0
+#ifndef CONFIG_TEGRA_VIRTUAL_CPUID
        cmp     r0, #0                          @ CPU0?
-       bne     tegra_cpu_resume_phys           @ no
+       bne     cpu_resume                      @ no
+#endif
 
 #ifndef CONFIG_ARCH_TEGRA_2x_SOC
        @ Clear the flow controller flags for this CPU.
-       mov32   r2, TEGRA_FLOW_CTRL_BASE+8      @ CPU0 CSR
-       ldr     r1, [r2]
+       cpu_to_csr_reg  r1, r0
+       mov32   r2, TEGRA_FLOW_CTRL_BASE
+       ldr     r1, [r2, r1]
        orr     r1, r1, #(1 << 15) | (1 << 14)  @ write to clear event & intr
-       movw    r0, #0x0FFD     @ enable, cluster_switch, immed, & bitmaps
+       movw    r0, #0x3FFD     @ enable, enable_ext, cluster_switch, immed, & bitmaps
        bic     r1, r1, r0
        str     r1, [r2]
-#endif
+#endif /* !CONFIG_ARCH_TEGRA_2x_SOC */
 
+#if defined(CONFIG_HAVE_ARM_SCU)
        /* enable SCU */
        mov32   r0, TEGRA_ARM_PERIF_BASE
        ldr     r1, [r0]
        orr     r1, r1, #1
+#ifdef CONFIG_ARCH_TEGRA_14x_SOC
+       orr     r1, r1, #8
+#endif
        str     r1, [r0]
-
-       b       tegra_cpu_resume_phys
+#endif /* CONFIG_HAVE_ARM_SCU */
+
+#ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
+#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+       mov32   r1, TEGRA_TMRUS_BASE
+       ldr     r0, [r1]
+       adr     r1, tegra_resume_smc_entry_time
+       str     r0, [r1]
+
+       /* wake up */
+       mov     r0, #0x00000003
+       bl      tegra_generic_smc
+
+       mov32   r1, TEGRA_TMRUS_BASE
+       ldr     r0, [r1]
+       adr     r1, tegra_resume_smc_exit_time
+       str     r0, [r1]
+#endif /* !CONFIG_ARCH_TEGRA_11x_SOC */
+#endif /* CONFIG_TEGRA_USE_SECURE_KERNEL */
+
+#ifdef CONFIG_CACHE_L2X0
+#if !defined(CONFIG_TEGRA_USE_SECURE_KERNEL) && \
+               !defined(CONFIG_ARCH_TEGRA_14x_SOC)
+       adr     r0, tegra_resume_l2_init
+       ldr     r1, [r0]
+       tst     r1, #1
+       beq     no_l2_init
+       /* Enable L2 */
+       bic     r1, #1
+       str     r1, [r0]
+       mov32   r3, TEGRA_ARM_PL310_BASE
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
+       mov32   r0, 0x331                       /* tag latency */
+       mov32   r1, 0x441                       /* data latency */
+#elif defined(CONFIG_ARCH_TEGRA_3x_SOC) || defined(CONFIG_ARCH_TEGRA_14x_SOC)
+#ifdef CONFIG_TEGRA_SILICON_PLATFORM
+       mov32   r0, TEGRA_FLOW_CTRL_BASE + 0x2c /* FLOW_CTRL_CLUSTER_CONTROL */
+       mov32   r2, RESET_DATA_PHYS
+       ldr     r1, [r0]
+       tst     r1, #1                          /* 0 == G, 1 == LP */
+       ldrne   r0, [r2, #RESET_DATA(C1_L2_TAG_LATENCY)]
+       ldrne   r1, [r2, #RESET_DATA(C1_L2_DATA_LATENCY)]
+       ldreq   r0, [r2, #RESET_DATA(C0_L2_TAG_LATENCY)]
+       ldreq   r1, [r2, #RESET_DATA(C0_L2_DATA_LATENCY)]
+#else /* !CONFIG_TEGRA_SILICON_PLATFORM */
+       mov32   r0, #0x770                      /* tag latency */
+       mov32   r1, #0x770                      /* data latency */
+#endif /* ?CONFIG_TEGRA_SILICON_PLATFORM */
+#endif /* CONFIG_ARCH_TEGRA_3x_SOC || CONFIG_ARCH_TEGRA_14x_SOC */
+       str     r0, [r3, #L2X0_TAG_LATENCY_CTRL]
+       str     r1, [r3, #L2X0_DATA_LATENCY_CTRL]
+#ifndef CONFIG_TEGRA_FPGA_PLATFORM
+#ifdef CONFIG_ARCH_TEGRA_14x_SOC
+       mov32   r0, 0x40000007  /* Enable double line fill */
+#else
+       mov     r0, #7
+#endif
+       str     r0, [r3, #L2X0_PREFETCH_CTRL]
+#endif /* !CONFIG_TEGRA_FPGA_PLATFORM */
+       mov     r0, #3
+       str     r0, [r3, #L2X0_POWER_CTRL]
+       /* figure out aux ctrl */
+       ldr     r2, [r3, #L2X0_CACHE_TYPE]
+       and     r2, r2, #0x700
+       lsl     r2, r2, #(17-8)
+       mov32   r4, 0x7C400001
+       orr     r2, r2, r4
+       ldr     r4, [r3, #L2X0_AUX_CTRL]
+       mov32   r5, 0x8200c3fe
+       and     r4, r4, r5
+       orr     r2, r2, r4
+       str     r2, [r3, #L2X0_AUX_CTRL]
+       mov     r2, #1
+       str     r2, [r3, #L2X0_CTRL]
+#endif /* ?CONFIG_TEGRA_USE_SECURE_KERNEL */
+#endif /* CONFIG_CACHE_L2X0 */
+no_l2_init:
+       b       cpu_resume
 ENDPROC(tegra_resume)
+
+#ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
+       .globl tegra_resume_timestamps_start
+#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+       .globl tegra_resume_smc_entry_time
+       .globl tegra_resume_smc_exit_time
+#endif /* !CONFIG_ARCH_TEGRA_11x_SOC */
+       .globl tegra_resume_entry_time
+       .globl tegra_resume_timestamps_end
+tegra_resume_timestamps_start:
+#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+tegra_resume_smc_entry_time:
+       .long   0
+tegra_resume_smc_exit_time:
+       .long   0
+#endif /* !CONFIG_ARCH_TEGRA_11x_SOC */
+tegra_resume_entry_time:
+       .long   0
+tegra_resume_timestamps_end:
+#endif /* CONFIG_TEGRA_USE_SECURE_KERNEL */
+#ifdef CONFIG_CACHE_L2X0
+       .globl tegra_resume_l2_init
+tegra_resume_l2_init:
+       .long 0
+#endif /* CONFIG_CACHE_L2X0 */
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ *     __invalidate_cpu_state
+ *
+ *       Invalidates volatile CPU state (SCU tags, caches, branch address
+ *       arrays, exclusive monitor, etc.) so that they can be safely
+ *       enabled. Instruction caching and branch prediction are enabled.
+ *
+ *       Cortex-A15 has an integrated SCU in L2 memory system, we only
+ *       need to set the correct L2 cache data RAM latency and enable
+ *       i-cache/branch prediction.
+ */
+__invalidate_cpu_state:
+       clrex
+       mov     r0, #0
+       mcr     p15, 0, r0, c1, c0, 1   @ disable SMP, prefetch, broadcast
+       isb
+       mrc     p15, 0, r0, c0, c0, 0   @ main ID register
+       ubfx    r1, r0, #4, #28
+       ldr     r0, =0x00f0000
+       bic     r1, r1, r0
+       ldr     r0, =0x410fc09
+       teq     r1, r0
+       beq     cortex_a9
+
+       mrc     p15, 0x1, r0, c15, c0, 3        @ L2 prefetch control reg
+       tst     r0, #0x1000
+       orreq   r0, r0, #0x1000                 @ disable throttling
+       mcreq   p15, 0x1, r0, c15, c0, 3
+
+       /*      This is only needed for cluster 0 with integrated L2 cache */
+       mrc     p15, 0, r0, c0, c0, 5
+       ubfx    r0, r0, #8, #4
+       tst     r0, #1
+       bne     __enable_i_cache_branch_pred
+       mrc     p15, 0x1, r0, c9, c0, 2
+       and     r1, r0, #7
+       cmp     r1, #2
+       beq     __enable_i_cache_branch_pred
+       bic     r0, r0, #7
+       orr     r0, r0, #2
+       mcr     p15, 0x1, r0, c9, c0, 2
+       mrc     p15, 0x1, r0, c15, c0, 0        @ L2 ACTLR
+       orr     r0, r0, #0x80                   @ hazard detection timeout
+       mcr     p15, 0x1, r0, c15, c0, 0
+__enable_i_cache_branch_pred:
+       mov     r0, #0x1800
+       mcr     p15, 0, r0, c1, c0, 0   @ enable branch prediction, i-cache
+       mov     pc, lr
+       /* no fall through, just return to the caller */
+
+cortex_a9:
+       /* Following is for Cortex-A9 */
+       mcr     p15, 0, r0, c7, c5, 0   @ invalidate BTAC, i-cache
+       mcr     p15, 0, r0, c7, c5, 6   @ invalidate branch pred array
+       mcr     p15, 0, r0, c8, c5, 0   @ invalidate instruction TLB
+       mcr     p15, 0, r0, c8, c6, 0   @ invalidate data TLB
+       mcr     p15, 0, r0, c8, c7, 0   @ invalidate unified TLB
+       dsb
+       isb
+
+#if defined(CONFIG_HAVE_ARM_SCU)
+       cpu_id  r0
+       cmp     r0, #0
+       mov32   r1, (TEGRA_ARM_PERIF_BASE + 0xC)
+       movne   r0, r0, lsl #2
+       movne   r2, #0xf
+       movne   r2, r2, lsl r0
+       strne   r2, [r1]                @ invalidate SCU tags for CPU
 #endif
 
+       dsb
+       mov     r0, #0x1800
+       mcr     p15, 0, r0, c1, c0, 0   @ enable branch prediction, i-cache
+       isb
+
+       /* Invalidates L1 d-cache during initial cpu boot (corrupt r0-r6) */
+
+       mov     r0, #0
+       mcr     p15, 2, r0, c0, c0, 0
+       mrc     p15, 1, r0, c0, c0, 0
+
+       movw    r1, #0x7fff
+       and     r2, r1, r0, lsr #13
+
+       movw    r1, #0x3ff
+
+       and     r3, r1, r0, lsr #3      @ NumWays - 1
+       add     r2, r2, #1      @ NumSets
+
+       and     r0, r0, #0x7
+       add     r0, r0, #4      @ SetShift
+
+       clz     r1, r3          @ WayShift
+       add     r4, r3, #1      @ NumWays
+1:     sub     r2, r2, #1      @ NumSets--
+       mov     r3, r4          @ Temp = NumWays
+2:     subs    r3, r3, #1      @ Temp--
+       mov     r5, r3, lsl r1
+       mov     r6, r2, lsl r0
+       orr     r5, r5, r6      @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+       mcr     p15, 0, r5, c7, c6, 2
+       bgt     2b
+       cmp     r2, #0
+       bgt     1b
+       dsb
+       isb
+       mov     pc, lr
+
+/*
+ * __tegra_cpu_reset_handler_halt_failed:
+ *
+ * Alternate entry point for reset handler for cases where the
+ * WFI halt failed to take effect.
+ *
+ */
        .align L1_CACHE_SHIFT
 ENTRY(__tegra_cpu_reset_handler_start)
 
@@ -155,10 +358,21 @@ ENTRY(__tegra_cpu_reset_handler_start)
        .align L1_CACHE_SHIFT
 ENTRY(__tegra_cpu_reset_handler)
 
+/* DO NOT put any code before the !defined(CONFIG_ARM_SAVE_DEBUG_CONTEXT)
+   block below. It must be the first thing in this subroutine. */
+
+#if !defined(CONFIG_ARM_SAVE_DEBUG_CONTEXT) || DEBUG_CPU_RESET_HANDLER
+       /* If Debug Architecture v7.1 or later, unlock the OS lock. */
+       mrc     p15, 0, r0, c0, c1, 2           @ ID_DFR0
+       and     r0, r0, #0xF                    @ coprocessor debug model
+       cmp     r0, #5                          @ debug arch >= v7.1?
+       movge   r0, #0                          @ yes, unlock debug
+       mcrge   p14, 0, r0, c1, c0, 4           @ DBGOSLAR
+#endif
 #if DEBUG_CPU_RESET_HANDLER
        b       .
 #endif
-#ifndef CONFIG_TRUSTED_FOUNDATIONS
+#ifndef CONFIG_TEGRA_USE_SECURE_KERNEL
        cpsid   aif, 0x13                       @ SVC mode, interrupts disabled
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r5, r0, #0x00f00000             @ variant
@@ -189,18 +403,11 @@ ENTRY(__tegra_cpu_reset_handler)
 #endif
 
 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
-       /* Are we on Tegra20? */
-       mov32   r6, TEGRA_APB_MISC_BASE
-       ldr     r0, [r6, #APB_MISC_GP_HIDREV]
-       and     r0, r0, #0xff00
-       cmp     r0, #(0x20 << 8)
-       bne     1f
-       /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
+       /* If CPU1, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
        mov32   r6, TEGRA_PMC_BASE
        mov     r0, #0
        cmp     r10, #0
        strne   r0, [r6, #PMC_SCRATCH41]
-1:
 #endif
 
 #ifdef CONFIG_PM_SLEEP
@@ -227,13 +434,14 @@ __is_not_lp1:
        bx      lr
 
 __is_not_lp2:
+
 #ifdef CONFIG_SMP
-       /*
-        * Can only be secondary boot (initial or hotplug) but CPU 0
-        * cannot be here.
-        */
+#ifndef CONFIG_TEGRA_VIRTUAL_CPUID
+       /* Can only be secondary boot (initial or hotplug) but CPU 0
+          cannot be here. */
        cmp     r10, #0
        bleq    __die                           @ CPU0 cannot be here
+#endif
        ldr     lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
        cmp     lr, #0
        bleq    __die                           @ no secondary startup handler
@@ -308,6 +516,11 @@ __tegra_cpu_reset_handler_data:
        .rept   TEGRA_RESET_DATA_SIZE
        .long   0
        .endr
+       .size   __tegra_cpu_reset_handler_data, \
+       . - __tegra_cpu_reset_handler_data
        .align L1_CACHE_SHIFT
-
 ENTRY(__tegra_cpu_reset_handler_end)
+
+       .globl  __tegra_cpu_reset_handler_data_offset
+       .equ    __tegra_cpu_reset_handler_data_offset, \
+       __tegra_cpu_reset_handler_data - __tegra_cpu_reset_handler_start