#include <arch/arm.h>
#include <arm64/asm.h>
#include <psci.h>
+#include <arm64/monitor_macros.h>
/* called both for cold reset and boot_secondary */
FUNCTION(mon_init_cpu)
b.ne . // error, if not EL3
/* initialize SCR to secure state */
- mov x3, #MON_INIT_EL3_SCR
+ mov x3, #(MON_SCR_RESV1 | MON_SCR_64BIT)
msr scr_el3, x3
isb
ldr x0, =ARM_SYSTEM_COUNTER_FREQ
msr cntfrq_el0, x0
+ /* enable the cycle count register */
+ mrs x0, pmcr_el0
+ ubfx x0, x0, #11, #5 // read PMCR.N field
+ mov x4, #1
+ lsl x0, x4, x0
+ sub x0, x0, #1 // mask of event counters
+ orr x0, x0, #0x80000000 // disable overflow intrs
+ msr pmintenclr_el1, x0
+ msr pmuserenr_el0, x4 // enable user mode access
+
/* mark per-CPU dist GROUP0 intrs non-secure */
ldr x4, =ARM_GIC_DIST_BASE
mov w3, #(~0)
ret
/*
- * Return to address saved in __mon_cpu_return_addr, which
- * will be the BL during cold reset or address in NS world
- * during PSCI CPU transistions.
+ * Return to address saved in __mon_cpu_return_addr, in
+ * AARCH32 SVC (non-secure) mode.
*/
-FUNCTION(mon_init_return)
- /* load per-cpu return address */
- cpuidx x12
- adr x5, __mon_cpu_return_addr
- ldr x3, [x5, x12, lsl #3]
- msr elr_el3, x3
+FUNCTION(mon_return_aarch32_ns)
+ /* load return address */
+ cpuidx x1
+ adr x2, __mon_cpu_return_addr
+ ldr x2, [x2, x1, lsl #3]
+
+ msr elr_el3, x2
+ mov x2, #(MON_SCR_RESV1 | MON_SCR_32BIT | MON_SCR_NS_MODE)
+ msr scr_el3, x2
+ mov x2, #(MON_SPSR_EXC_MASKED | MODE_SVC)
+ msr spsr_el3, x2
- mov x3, #MON_INIT_EL3_SCR
- orr x3, x3, #1 // return NS=1
- msr scr_el3, x3
- isb
+ eret
+
+/*
+ * Return to address saved in __mon_cpu_return_addr, in
+ * AARCH64 EL2 (non-secure) mode.
+ */
+FUNCTION(mon_return_aarch64_ns)
+ /* load return address */
+ cpuidx x1
+ adr x2, __mon_cpu_return_addr
+ ldr x2, [x2, x1, lsl #3]
+
+ msr elr_el3, x2
+ mov x2, #(MON_SCR_RESV1 | MON_SCR_64BIT | MON_SCR_NS_MODE)
+ msr scr_el3, x2
+ mov x2, #(MON_SPSR_EXC_MASKED | MODE_EL(2))
+ msr spsr_el3, x2
- /* go back non-secure in EL2 */
- mov x3, #MON_INIT_EL2_SPSR_AARCH64
- msr spsr_el3, x3
eret
/*
cpuidx x0
bl platform_psci_cpu_has_reset
- b mon_init_return
+ b mon_return_aarch64_ns
+
+/* get the CPU ID */
+FUNCTION(mon_get_cpu_id)
+ mrs x0, midr_el1
+ ubfx x0, x0, #4, #12
+ ret
.ltorg
.align 3
.rept MONCPUS
.quad 0
.endr
+
+.ltorg
+.align 3
+.global __mon_cpu_return_mode
+__mon_cpu_return_mode:
+ .rept MONCPUS
+ .quad 0
+ .endr