tlk: 5/22 update
Dennis Huang [Thu, 22 May 2014 23:51:17 +0000 (16:51 -0700)]
- fix race during BSS clear
- storage callback for T132
- add WITH_ARM_PSCI_SUPPORT
- fix reset vector programming during lp0
- support new T132 boot flow
- add basic RPMB support
- add dynamic task loading
- make program VPR a true fastcall

Change-Id: I5249fe2ca6a85d878800100a7cbc1f3d80b09766
Reviewed-on: http://git-master/r/413484
Reviewed-by: Dennis Huang <denhuang@nvidia.com>
Tested-by: Dennis Huang <denhuang@nvidia.com>

40 files changed:
arch/arm/arm/cache-ops.S
arch/arm/arm/faults.c
arch/arm/arm/mmu.c
arch/arm/arm/monitor_vectors.S
arch/arm/arm/task.c
arch/arm/arm/thread.c
arch/arm/compile.mk
arch/arm/include/arch/arm/monitor_vectors.h
arch/arm/rules.mk
include/kernel/task.h
include/kernel/task_load.h [new file with mode: 0644]
include/kernel/thread.h
include/lib/monitor/monitor_vector.h
include/lib/ote/ote_protocol.h
include/platform.h
kernel/rules.mk
kernel/syscall.c
kernel/task.c
kernel/task_load.c [new file with mode: 0644]
lib/monitor/arm64/include/arm64/asm.h
lib/monitor/arm64/include/arm64/mmu_ldesc.h
lib/monitor/arm64/include/psci.h
lib/monitor/arm64/monitor_cpu.S
lib/monitor/arm64/monitor_fastcall.S [new file with mode: 0644]
lib/monitor/arm64/monitor_lib.S
lib/monitor/arm64/monitor_mmu.S
lib/monitor/arm64/monitor_start.S
lib/monitor/arm64/monitor_vector.S
lib/monitor/rules.mk
platform/init.c
platform/tegra/common/interrupts.c
platform/tegra/common/memory.c
platform/tegra/common/platform.c
platform/tegra/common/pm.c
platform/tegra/common/tz.c
platform/tegra/include/platform/platform_p.h
platform/tegra/monitor/memory.c
platform/tegra/monitor/platform.c [new file with mode: 0644]
platform/tegra/monitor/psci.c
platform/tegra/monitor/rules.mk

index 3a25dff..43b812e 100644 (file)
@@ -355,9 +355,14 @@ FUNCTION(arch_clean_cache_range)
        cmp             r0, r2
        blo             0b
 
+       /* copied from ops.S */
+#if defined(ARM_CPU_CORTEX_A8) || defined(ARM_CPU_CORTEX_A9) || defined(ARM_CPU_CORTEX_A15)
+       dsb             sy
+#elif ARM_CPU_ARM1136
        mov             r0, #0
        mcr             p15, 0, r0, c7, c10, 4          // data sync barrier
 #endif
+#endif
        bx              lr
 
        /* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
@@ -371,9 +376,14 @@ FUNCTION(arch_clean_invalidate_cache_range)
        cmp             r0, r2
        blo             0b
 
+       /* copied from ops.S */
+#if defined(ARM_CPU_CORTEX_A8) || defined(ARM_CPU_CORTEX_A9) || defined(ARM_CPU_CORTEX_A15)
+       dsb             sy
+#elif ARM_CPU_ARM1136
        mov             r0, #0
        mcr             p15, 0, r0, c7, c10, 4          // data sync barrier
 #endif
+#endif
        bx              lr
 
        /* void arch_invalidate_cache_range(addr_t start, size_t len); */
@@ -387,9 +397,14 @@ FUNCTION(arch_invalidate_cache_range)
        cmp             r0, r2
        blo             0b
 
+       /* copied from ops.S */
+#if defined(ARM_CPU_CORTEX_A8) || defined(ARM_CPU_CORTEX_A9) || defined(ARM_CPU_CORTEX_A15)
+       dsb             sy
+#elif ARM_CPU_ARM1136
        mov             r0, #0
        mcr             p15, 0, r0, c7, c10, 4          // data sync barrier
 #endif
+#endif
        bx              lr
 
        /* void arch_sync_cache_range(addr_t start, size_t len); */
index 2088ba6..69b2d4e 100644 (file)
@@ -92,9 +92,8 @@ void arm_syscall_handler(struct arm_fault_frame *frame)
 {
        ASSERT(in_critical_section() == false);
 
-       /* should intrs be reenabled (svc exception disabled) */
-       if (platform_intr_ready())
-               arch_enable_ints();
+       /*  enable ints as we enter an exception with ints disabled */
+       arch_enable_ints();
 
        if (platform_syscall_handler((void *)frame->r))
                return;
index b46fbda..cf9b7a8 100644 (file)
@@ -90,7 +90,9 @@ status_t arm_mmu_set_attrs_from_mapping(nsaddr_t vaddr, uint32_t type,
 {
        uint64_t par;
 
+       enter_critical_section();
        par = platform_translate_nsaddr(vaddr, type);
+       exit_critical_section();
 
        attrs->faulted = !!(par & PAR_ATTR_FAULTED);
        if (attrs->faulted) {
@@ -125,6 +127,7 @@ status_t arm_mmu_set_attrs_from_mapping(nsaddr_t vaddr, uint32_t type,
                return ERR_NOT_SUPPORTED;
        }
 #endif
+
        return NO_ERROR;
 }
 
@@ -165,7 +168,9 @@ void arm_mmu_translate_range(nsaddr_t vaddr, paddr_t *pagelist, task_map_t *mptr
        if (mptr->map_attrs == NULL)
                return;
 
-       if (mptr->flags & TM_SEC_VA)
+       if (mptr->flags & TM_KERN_SEC_VA)
+               type = V2PCWPW;
+       else if (mptr->flags & TM_SEC_VA)
                type = V2PCWUR;
        else if (mptr->flags & TM_NS_MEM_PRIV)
                type = V2POWPR;
index 72db650..377e0aa 100644 (file)
        movt    \reg, #:upper16:\val
 .endm
 
+.macro RESTORE_NS_STATE
+       /* restore monitor frame from arg */
+       adr     r0, mon_stdcall_frame_addr
+       ldr     r0, [r0]
+       RESTORE_MON_FRAME_FROM_ARG r0
+.endm
+
 /*****************************************************************************/
 /* The monitor entry table stuff                                             */
 /*****************************************************************************/
@@ -78,7 +85,7 @@ handle_exception:
        /* check if is mmu is currently enabled */
        mrc     p15, 0, r1, c1, c0, 0
        tst     r1, #0x1
-       bne     cont_handle_exc         @ enabled, just continue
+       bne     cont_handle_exc         @ enabled, just continue
 
        /* it won't be on exit from LP1, so handle it now */
        adr     r1, mon_p2v_offset
@@ -96,30 +103,30 @@ handle_exception:
        mov     pc, r1                  @ convert pc to virt
 
 cont_handle_exc:
+       tst     r0, #(1 << 31)          @ Check if fastcall
+       bne     handle_fastcall
+
+       /* continue for handling standard calls */
        SAVE_NONSECURE_STATE r1, r2     @ save NS state
        RESTORE_SECURE_STATE r1, r2     @ restore S state
 
-       /* if returning from NS irq, skip CPU save (r1/r2 are still scratch) */
-       mov32   r1, 0x32000005
-       cmp     r0, r1
-       moveq   r0, #0                  @ no incoming SMC
        ldmia   sp!, {r1-r2}            @ restore scratch regs
-       beq     return_go_nonsecure
 
-       /* if returning from FS req, skip CPU save (r1/r2 are still scratch) */
-       stmfd   sp!, {r1-r2}            @ create scratch regs
+       /* handle restart SMC */
+       cmp     r0, #(60 << 24)         @ restart SMC?
+       beq     go_restart
+
+       /* if returning from FS req, skip CPU save (r1 is still scratch) */
+       stmfd   sp!, {r1}               @ create scratch regs
        mov32   r1, 0x32000009
-       cmp     r0, r1
-       moveq   r0, #0                  @ no incoming SMC
-       adreq   r0, fs_return_value
-       ldmia   sp!, {r1-r2}            @ restore scratch regs
-       streq   r1, [r0]                @ return value for the FS request
-       moveq   r0, #0
-       beq     return_go_nonsecure
+       cmp     r0, r1                  @ fs completion?
+       ldmia   sp!, {r1}               @ restore scratch regs
+       beq     go_restart
 
-       /* save tz_monitor_frame to sp_mon (frame in r0) */
-       SAVE_MON_FRAME_TO_STACK
-        mov     r0, sp
+       /* save tz_monitor_frame to r0 */
+       SAVE_MON_FRAME_TO_ARG mon_stdcall_frame_addr
+       adr     r0, mon_stdcall_frame_addr
+       ldr     r0, [r0]
 
 return_go_nonsecure:
        /* frame in r0 (register not part of restored CPU state) */
@@ -128,6 +135,40 @@ return_go_nonsecure:
        msr     cpsr_cfsx, r2           @ switch to it
        ldmia   sp!, { r4-r12, pc }     @ restore CPU state from stack (and return)
 
+go_restart:
+       /* store new r14/spsr to use after RESTART SMC handling */
+       adr     r0, mon_stdcall_frame_addr
+       ldr     r0, [r0]
+       str     r14, [r0, #0x68]
+       mrs     r14, spsr
+       str     r14, [r0, #0x70]
+       mov     r0, #0                  @ no incoming SMC
+       b       return_go_nonsecure
+
+/*
+* Handle fast calls - keep interrupts disabled, do not save/restore secure
+* world context, handle the SMC and return to non-secure world
+*/
+handle_fastcall:
+       /* restore scratch regs */
+       ldmia   sp!, {r1-r2}
+
+       /* save monitor frame */
+       SAVE_MON_FRAME_TO_ARG mon_fastcall_frame_addr
+
+       /* handle the SMC, r0 = smc_frame */
+       adr     r0, mon_fastcall_frame_addr
+       ldr     r0, [r0]
+       bl      tz_fastcall_handler
+
+       SWITCH_SCR_TO_NONSECURE r0
+
+       /* restore monitor frame */
+       adr     r0, mon_fastcall_frame_addr
+       ldr     r0, [r0]
+       RESTORE_MON_FRAME_FROM_ARG r0
+       movs    pc, r14
+
 /*
  * Go to nonsecure world
  *
@@ -146,20 +187,44 @@ go_nonsecure:
        RESTORE_NONSECURE_STATE r2, r3
        SWITCH_SCR_TO_NONSECURE r2
 
-       /* completions always restore from the stack */
-       mov32   r2, SMC_TOS_COMPLETION
-       cmp     r0, r2
-       beq     restore_stack
+       /* completions always restore from the stdcall stack */
+       mov32   r2, SMC_TOS_COMPLETION
+       cmp     r0, r2
+       beq     restore_stdcall_state
+
+       mov32   r2, SMC_TOS_PREEMPT_BY_IRQ
+       cmp     r0, r2
+       beq     preempt_by_irq
 
-       /* for INITIAL_NS_RETURN and PREEMPTs, restore from arg */
+       mov32   r2, SMC_TOS_PREEMPT_BY_FS
+       cmp     r0, r2
+       beq     preempt_by_fs
+
+       /* for INITIAL_NS_RETURN restore from arg */
        RESTORE_MON_FRAME_FROM_ARG r1
-       movs    pc, r14
+       movs    pc, r14
+
+restore_stdcall_state:
+       RESTORE_NS_STATE
+       movs    pc, r14
 
-restore_stack:
-       /* restore from monitor stack */
-       RESTORE_MON_FRAME_FROM_STACK
-       movs    pc, r14
+preempt_by_irq:
+       RESTORE_NS_STATE
+       mov     r0, #SMC_ERR_PREEMPT_BY_IRQ
+       movs    pc, r14
 
+preempt_by_fs:
+       RESTORE_NS_STATE
+       mov     r0, #SMC_ERR_PREEMPT_BY_FS
+       movs    pc, r14
+
+.globl mon_fastcall_frame_addr
+mon_fastcall_frame_addr:
+       .long   0
+
+.globl mon_stdcall_frame_addr
+mon_stdcall_frame_addr:
+       .long   0
 
 .align L1_CACHE_ALIGN
 
@@ -178,10 +243,6 @@ nonsecure_state:
 secure_exit_mode:
        .long   0
 
-.globl fs_return_value
-fs_return_value:
-       .long   0
-
 .globl mon_stack_top
 mon_stack_top:
        .long   0
index 8969bdb..a6cb5a3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
  *
  * Permission is hereby granted, free of charge, to any person obtaining
  * a copy of this software and associated documentation files
@@ -93,6 +93,11 @@ task_map_t *arch_task_map_memory(task_t *task, addr_t addr, u_int size, u_int fl
        task_map_t *mptr;
        u_int npages, align, offset;
 
+       if (flags & TM_KERN_SEC_VA) {
+               if (flags & (TM_NS_MEM | TM_NS_MEM_PRIV))
+                       return NULL;
+       }
+
        mptr = malloc(sizeof(task_map_t));
        if (mptr == NULL)
                return NULL;
index ba680fc..de5ab49 100644 (file)
@@ -129,7 +129,7 @@ void arch_context_switch(thread_t *oldthread, thread_t *newthread)
 
                /* exit critical section created in thread_create */
                dec_critical_section();
-               if ((critical_section_count == 0) && platform_intr_ready())
+               if (critical_section_count == 0)
                        frame->psr &= ~(1 << 7);        /* clear intr disable */
                else
                        frame->psr |= (1 << 7);         /* set intr disable */
index 6b49dc3..524235a 100644 (file)
@@ -1,31 +1,16 @@
 # can override this in local.mk
 ENABLE_THUMB ?= true
 
-ifeq ($(ARM_CPU),cortex-m3)
-MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
-ENABLE_THUMB := true
-endif
 ifeq ($(ARM_CPU),cortex-a15)
 MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
 endif
 ifeq ($(ARM_CPU),cortex-a9)
 MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
 endif
-ifeq ($(ARM_CPU),cortex-a8)
-MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
-endif
-ifeq ($(ARM_CPU),arm1136j-s)
-MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
-endif
-ifeq ($(ARM_CPU),arm1176jzf-s)
-MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
-endif
 ifeq ($(ARM_CPU),arm926ej-s)
 MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
 endif
-ifeq ($(ARM_CPU),arm7tdmi)
-MODULE_COMPILEFLAGS += -mcpu=$(ARM_CPU)
-endif
+
 
 THUMBCFLAGS :=
 THUMBINTERWORK :=
index ac6b328..c37f871 100644 (file)
 .endm
 
 .macro SAVE_SECURE_STATE, base, tmp
-       adr             \base, secure_state
+       ldr             \base, =secure_state
        SAVE_STATE      \base, \tmp
 .endm
 
 .macro RESTORE_SECURE_STATE, base, tmp
-       adr             \base, secure_state
+       ldr             \base, =secure_state
        RESTORE_STATE   \base, \tmp
 .endm
 
 .macro SAVE_NONSECURE_STATE, base, tmp
-       adr             \base, nonsecure_state
+       ldr             \base, =nonsecure_state
        SAVE_STATE      \base, \tmp
 .endm
 
 .macro RESTORE_NONSECURE_STATE, base, tmp
-       adr             \base, nonsecure_state
+       ldr             \base, =nonsecure_state
        RESTORE_STATE   \base, \tmp
 .endm
 
        isb
 .endm
 
-/* tz_monitor_frame is 15 entries of 8 bytes each */
-#define MON_FRAME_ENTRIES       15
-#define MON_FRAME_ENTRY_BITS    3
-#define MON_FRAME_ENTRY_SIZE    (1 << MON_FRAME_ENTRY_BITS)
-
-.macro SAVE_MON_FRAME_TO_STACK
-       sub     sp, sp, #(MON_FRAME_ENTRIES << MON_FRAME_ENTRY_BITS)
-       str     r0,  [sp, #0x00]
-       str     r1,  [sp, #0x08]
-       str     r2,  [sp, #0x10]
-       str     r3,  [sp, #0x18]
-       str     r4,  [sp, #0x20]
-       str     r5,  [sp, #0x28]
-       str     r6,  [sp, #0x30]
-       str     r7,  [sp, #0x38]
-       str     r8,  [sp, #0x40]
-       str     r9,  [sp, #0x48]
-       str     r10, [sp, #0x50]
-       str     r11, [sp, #0x58]
-       str     r12, [sp, #0x60]
-       str     r14, [sp, #0x68]
-       mrs     r14, spsr
-       str     r14, [sp, #0x70]
-.endm
-
-.macro RESTORE_MON_FRAME_FROM_STACK
-       mov     r14, sp
-       add     sp, sp, #(MON_FRAME_ENTRIES << MON_FRAME_ENTRY_BITS)
-       ldr     r1,  [r14, #0x70]
-       msr     spsr_cfsx, r1
-       ldr     r0,  [r14, #0x00]
-       ldr     r1,  [r14, #0x08]
-       ldr     r2,  [r14, #0x10]
-       ldr     r3,  [r14, #0x18]
-       ldr     r4,  [r14, #0x20]
-       ldr     r5,  [r14, #0x28]
-       ldr     r6,  [r14, #0x30]
-       ldr     r7,  [r14, #0x38]
-       ldr     r8,  [r14, #0x40]
-       ldr     r9,  [r14, #0x48]
-       ldr     r10, [r14, #0x50]
-       ldr     r11, [r14, #0x58]
-       ldr     r12, [r14, #0x60]
-       ldr     r14, [r14, #0x68]
+.macro SAVE_MON_FRAME_TO_ARG, arg
+       push    { r14 }
+       adr     r14, \arg
+       ldr     r14, [r14]
+       str     r0, [r14, #0x00]
+       str     r1, [r14, #0x08]
+       str     r2, [r14, #0x10]
+       str     r3, [r14, #0x18]
+       str     r4, [r14, #0x20]
+       str     r5, [r14, #0x28]
+       str     r6, [r14, #0x30]
+       str     r7, [r14, #0x38]
+       str     r8, [r14, #0x40]
+       str     r9, [r14, #0x48]
+       str     r10, [r14, #0x50]
+       str     r11, [r14, #0x58]
+       str     r12, [r14, #0x60]
+       pop     { r12 }
+       str     r12, [r14, #0x68]
+       mrs     r12, spsr
+       str     r12, [r14, #0x70]
 .endm
 
 .macro RESTORE_MON_FRAME_FROM_ARG, arg
index a807bcd..70f8f52 100644 (file)
@@ -10,17 +10,7 @@ DEFINES += \
 
 # do set some options based on the cpu core
 HANDLED_CORE := false
-ifeq ($(ARM_CPU),cortex-m3)
-DEFINES += \
-       ARM_WITH_CP15=1 \
-       ARM_ISA_ARMv7=1 \
-       ARM_ISA_ARMv7M=1 \
-       ARM_WITH_THUMB=1 \
-       ARM_WITH_THUMB2=1
-HANDLED_CORE := true
-ONLY_THUMB := true
-SUBARCH := arm-m
-endif
+
 ifeq ($(ARM_CPU),cortex-a15)
 DEFINES += \
        ARM_WITH_CP15=1         \
@@ -55,43 +45,7 @@ HANDLED_CORE := true
 #CFLAGS += -mfpu=neon -mfloat-abi=softfp
 MODULE_DEPS += $(LOCAL_DIR)/arm/neon
 endif
-ifeq ($(ARM_CPU),cortex-a8)
-DEFINES += \
-       ARM_WITH_CP15=1 \
-       ARM_WITH_MMU=1 \
-       ARM_ISA_ARMv7=1 \
-       ARM_ISA_ARMv7A=1 \
-       ARM_WITH_VFP=1 \
-       ARM_WITH_NEON=1 \
-       ARM_WITH_THUMB=1 \
-       ARM_WITH_THUMB2=1 \
-       ARM_WITH_CACHE=1 \
-       ARM_WITH_L2=1
-HANDLED_CORE := true
-#CFLAGS += -mfpu=neon -mfloat-abi=softfp
-MODULE_DEPS += $(LOCAL_DIR)/arm/neon
-endif
-ifeq ($(ARM_CPU),arm1136j-s)
-DEFINES += \
-       ARM_WITH_CP15=1 \
-       ARM_WITH_MMU=1 \
-       ARM_ISA_ARMv6=1 \
-       ARM_WITH_THUMB=1 \
-       ARM_WITH_CACHE=1 \
-       ARM_CPU_ARM1136=1
-HANDLED_CORE := true
-endif
-ifeq ($(ARM_CPU),arm1176jzf-s)
-DEFINES += \
-       ARM_WITH_CP15=1 \
-       ARM_WITH_MMU=1 \
-       ARM_ISA_ARMv6=1 \
-       ARM_WITH_VFP=1 \
-       ARM_WITH_THUMB=1 \
-       ARM_WITH_CACHE=1 \
-       ARM_CPU_ARM1136=1
-HANDLED_CORE := true
-endif
+
 ifeq ($(ARM_CPU),arm926ej-s)
 DEFINES += \
        ARM_WITH_CP15=1 \
@@ -103,13 +57,6 @@ DEFINES += \
        ARM_CPU_ARM926=1
 HANDLED_CORE := true
 endif
-ifeq ($(ARM_CPU),arm7tdmi)
-DEFINES += \
-       ARM_ISA_ARMv4=1 \
-       ARM_WITH_THUMB=1 \
-       ARM_CPU_ARM7=1
-HANDLED_CORE := true
-endif
 
 ifneq ($(HANDLED_CORE),true)
 $(warning $(LOCAL_DIR)/rules.mk doesnt have logic for arm core $(ARM_CPU))
@@ -171,21 +118,6 @@ endif
 DEFINES += \
        ARCH_DEFAULT_STACK_SIZE=4096
 endif
-ifeq ($(SUBARCH),arm-m)
-MODULE_SRCS += \
-       $(LOCAL_DIR)/arm-m/arch.c \
-       $(LOCAL_DIR)/arm-m/vectab.c \
-       $(LOCAL_DIR)/arm-m/start.c \
-       $(LOCAL_DIR)/arm-m/exceptions.c \
-       $(LOCAL_DIR)/arm-m/thread.c \
-       $(LOCAL_DIR)/arm-m/systick.c
-
-INCLUDES += \
-       -I$(LOCAL_DIR)/arm-m/CMSIS/Include
-
-DEFINES += \
-       ARCH_DEFAULT_STACK_SIZE=1024
-endif
 
 # If platform sets ARM_USE_MMU_RELOC the image will be built based on
 # VMEMBASE and will create page table entries in start.S to the physmem
index e02c5d7..0b6a879 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
  *
  * Permission is hereby granted, free of charge, to any person obtaining
  * a copy of this software and associated documentation files
@@ -48,6 +48,7 @@ typedef enum {
        TM_NS_MEM_PRIV  = (1 << 22),
        TM_SEC_VA       = (1 << 23),
        TM_KERN_VA      = (1 << 24),
+       TM_KERN_SEC_VA  = (1 << 29),
        TM_IO           = (1 << 30),
 } tmflags_t;
 
@@ -138,6 +139,8 @@ typedef struct task
        task_state_t task_state;
        task_type_t task_type;
        u_int task_index;
+       char task_name[ OTE_TASK_NAME_MAX_LENGTH ];
+       unsigned char task_private_data[ OTE_TASK_PRIVATE_DATA_LENGTH ];
 } task_t;
 
 void task_init();
@@ -152,6 +155,16 @@ status_t task_get_physaddr(task_t *taskp, addr_t vaddr, paddr_t *paddr);
 bool task_valid_address(vaddr_t addr, u_int size);
 task_t *task_find_task_by_uuid(te_service_id_t *uuid);
 
+/*! Lookup task by its task slot index in task_list.
+ *
+ * \param index[in] task index (0..(task_count-1)).
+ *
+ * \return Return pointer to task or NULL if index is >= task_count.
+ *
+ * This may return a pointer to inactive task slot.
+ */
+task_t *task_find_task_by_index(uint32_t index);
+
 /*! Log the uuid of TASKP at dprintf LEVEL with optional PREFIX string.
  *
  * \param LEVEL defines dprintf() level to conditionally print at.
@@ -165,5 +178,33 @@ status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp,
 
 status_t task_init_one_task(task_t *task, u_int task_type);
 
-#endif
-#endif
+/*! Atomically commit task (and set it's index) to the known task list
+ * unless there is an error. TASK_P value swapped to track the registered
+ * task header (old referenced task object is copied to task list and
+ * then cleared).
+ *
+ * UUID duplicates are atomically checked here.
+ *
+ * \param task_p[in/out] input task object pointer is atomically swapped to a
+ *                      registered task object pointer.
+ *
+ * \return NO_ERROR on success and on failure:
+ *        ERR_TOO_MANY_TASKS if not more tasks can be installed (out of memory)
+ *        ERR_ALREADY_EXISTS if a task with matching uuid already exists
+ *        ERR_INVALID_ARGS if parameters are invalid.
+ */
+status_t task_register (task_t **task_p);
+
+/*! Get a const reference to start of task list (task 0 object). */
+const task_t *task_get_list(void);
+
+/*! return the number of registered tasks */
+u_int task_get_count(void);
+
+/*! Return the max number of tasks that can be registered to the system.
+ * The actual number may be smaller depending on memory size vs task size.
+ */
+u_int task_get_max_count(void);
+
+#endif /* ASSEMBLY */
+#endif /* __KERNEL_TASK_H */
diff --git a/include/kernel/task_load.h b/include/kernel/task_load.h
new file mode 100644 (file)
index 0000000..e39d5a3
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __KERNEL_TASK_LOAD_H
+#define __KERNEL_TASK_LOAD_H
+
+#include <kernel/task.h>
+
+/*!
+ * Privileged "installer TA" starting the task may request
+ * some overrides for the task being started.
+ *
+ * Currently only supports overriding the max size
+ * of the stack or heap configured in the manifest and the
+ * task name.
+ *
+ * Other types of overrides / restrictions may be supported later.
+ *
+ * te_task_restrictions_t is compatible with this.
+ */
+typedef struct {
+       /* currently supported overrides; all-zero values ignored */
+       uint32_t min_stack_size;
+       uint32_t min_heap_size;
+       char     task_name[ OTE_TASK_NAME_MAX_LENGTH ];
+} task_restrictions_t;
+
+/*!
+ * Contains copy of information in the task manifest fixed fields.
+ * te_task_info_t is compatible with this
+ *
+ * Most manifest fields and some task_t fields are copied here.
+ */
+typedef struct {
+       te_service_id_t uuid;
+       u_int    manifest_exists;
+       u_int    multi_instance;
+       u_int    min_stack_size;
+       u_int    min_heap_size;
+       u_int    map_io_mem_cnt;
+       u_int    restrict_access;
+       u_int    install_priv;
+       u_int    immutable;
+       char     task_name[ OTE_TASK_NAME_MAX_LENGTH ];
+       unsigned char task_private_data[ OTE_TASK_PRIVATE_DATA_LENGTH ];
+} task_info_t;
+
+void task_load_config (vaddr_t vaddr_begin, vaddr_t *vaddr_end_p);
+
+void task_load_init (void);
+
+void task_set_name(task_t *task, const char *name);
+
+u_int task_allowed_to_load_tasks (task_t *taskp);
+
+/*!
+ * Called from syscall ioctl handler to perform task loading and
+ * related operations.
+ */
+struct te_task_request_args_s;
+
+int task_request_handler (struct te_task_request_args_s *args);
+
+#endif /* __KERNEL_TASK_LOAD_H */
index ec6e7fe..53e3215 100644 (file)
@@ -144,7 +144,7 @@ static inline __ALWAYS_INLINE void enter_critical_section(void)
 static inline __ALWAYS_INLINE void exit_critical_section(void)
 {
        critical_section_count--;
-       if ((critical_section_count == 0) && platform_intr_ready())
+       if (critical_section_count == 0)
                arch_enable_ints();
 }
 
index d571a4e..4fdf5f6 100644 (file)
 
 #if !defined(ASSEMBLY) && defined(WITH_MONITOR_BIN)
 /*
- * Monitor data structures and functions
- * (usable if linked into the monitor binary)
+ * Exported monitor data structures and functions which can be
+ * referenced by routines also linked into the monitor binary.
  */
 extern uintptr_t __mon_cpu_return_addr;
 extern uintptr_t __mon_cpu_reset_vector;
 extern uintptr_t __mon_phys_base;
 extern uintptr_t __mon_phys_size;
 
+/* holds arguments/return value during fastcalls */
+struct fastcall_frame {
+       uint64_t r[8];  /* r0-r7 */
+};
+
 paddr_t mon_virt_to_phys(void *vaddr);
 void *mon_phys_to_virt(uint64_t paddr);
+
 int mon_mmu_map_mmio(uintptr_t vaddr, uint64_t paddr, uint32_t length);
+void mon_mmu_map_uncached(uintptr_t vaddr, uint64_t paddr, uint32_t length);
+void mon_mmu_unmap(uintptr_t vaddr, uint32_t length);
+
 void mon_atomic_or(volatile uint32_t *ptr, uint32_t bits);
+
+uint32_t mon_get_cpu_id(void);
 #endif // !ASSEMBLY && WITH_MONITOR_BIN
 
 
@@ -71,6 +82,10 @@ void mon_atomic_or(volatile uint32_t *ptr, uint32_t bits);
 #define SMC_IS_LEGACY  \
        (SMC_FASTCALL | (SMC_MUST_BE_ZERO_MASK << SMC_MUST_BE_ZERO_SHIFT))
 
+/* Silicon Partner issued SMCs */
+#define SMC_SIP_CALL                   (SMC_OWNER_SIP_SERVICE << SMC_OWNER_SHIFT)
+#define SMC_SIP_AARCH_SWITCH           (SMC_FASTCALL | SMC_SIP_CALL | 0x4)
+
 /* Trusted OS issued SMC (i.e. generated from the TLK kernel) */
 #define SMC_TOS_CALL                   (0x32 << SMC_OWNER_SHIFT)
 #define SMC_TOS_FROM_SECURE            (1 << 15)
@@ -90,12 +105,20 @@ void mon_atomic_or(volatile uint32_t *ptr, uint32_t bits);
 /* low byte used as jump table idx */
 #define        SMC_TOS_FUNC_ID_MASK            0xFF
 
-/* TOS issued SMCs */
+/* TOS issued SMCs (update MAX_FUNC_IDX when adding new calls) */
 #define        SMC_TOS_COMPLETION              (SMC_TOS_SECURE | 0x1)
 #define        SMC_TOS_PREEMPT_BY_IRQ          (SMC_TOS_SECURE | SMC_TOS_PREEMPT | 0x2)
 #define        SMC_TOS_PREEMPT_BY_FS           (SMC_TOS_SECURE | SMC_TOS_PREEMPT | 0x3)
 #define        SMC_TOS_INITIAL_NS_RETURN       (SMC_TOS_SECURE | 0x4)
 #define        SMC_TOS_ADDR_TRANSLATE          (SMC_TOS_SECURE | 0x5)
 #define        SMC_TOS_INIT_SHARED_ADDR        (SMC_TOS_SECURE | 0x6)
+#define        SMC_TOS_MAX_FUNC_IDX            0x6
+
+/* restart pre-empted SMC handling */
+#define SMC_TOS_RESTART                        (60 << 24)
+
+/* informs the NS world that we were pre-empted by an irq */
+#define SMC_ERR_PREEMPT_BY_IRQ         0xFFFFFFFD
+#define SMC_ERR_PREEMPT_BY_FS          0xFFFFFFFE
 
 #endif
index 30ba0e3..c24e150 100644 (file)
@@ -37,6 +37,7 @@
 #include <service/ote_storage.h>
 #include <service/ote_manifest.h>
 #include <ext_nv/ote_ext_nv.h>
+#include <service/ote_task_load.h>
 
 /*
  * Get property info
@@ -104,6 +105,19 @@ typedef struct {
        uint32_t        size;
 } te_ss_get_size_params_t;
 
+#define RPMB_FRAME_SIZE        512
+
+typedef struct {
+       uint8_t         req_frame[RPMB_FRAME_SIZE];
+       uint8_t         req_resp_frame[RPMB_FRAME_SIZE];
+       uint8_t         resp_frame[RPMB_FRAME_SIZE];
+} te_ss_rpmb_write_params_t;
+
+typedef struct {
+       uint8_t         req_frame[RPMB_FRAME_SIZE];
+       uint8_t         resp_frame[RPMB_FRAME_SIZE];
+} te_ss_rpmb_read_params_t;
+
 typedef union {
        te_ss_create_params_t           f_create;
        te_ss_delete_params_t           f_delete;
@@ -114,12 +128,15 @@ typedef union {
        te_ss_seek_params_t             f_seek;
        te_ss_trunc_params_t            f_trunc;
        te_ss_get_size_params_t         f_getsize;
+       te_ss_rpmb_write_params_t       f_rpmb_write;
+       te_ss_rpmb_read_params_t        f_rpmb_read;
 } te_ss_req_params_t;
 
 /*
  * Parameter block exchanged on each file system operation request.
  */
 typedef struct {
+       uint32_t                req_size;
        uint32_t                type;
        int32_t                 result;
        uint32_t                params_size;
index 4293f10..2e32e1f 100644 (file)
@@ -50,9 +50,6 @@ void platform_init_outer(void);
 /* handle syscall */
 bool platform_syscall_handler(void *arg);
 
-/* check platform is ready for interrupts */
-bool platform_intr_ready(void);
-
 /* Get a random number */
 uint32_t platform_get_rand32(void);
 #endif
index 86527f3..2273ce7 100644 (file)
@@ -14,6 +14,7 @@ MODULE_SRCS := \
        $(LOCAL_DIR)/main.c \
        $(LOCAL_DIR)/mutex.c \
        $(LOCAL_DIR)/task.c \
+       $(LOCAL_DIR)/task_load.c \
        $(LOCAL_DIR)/thread.c \
        $(LOCAL_DIR)/timer.c \
        $(LOCAL_DIR)/boot.c \
index eb2731c..5e915d3 100644 (file)
@@ -33,6 +33,7 @@
 #include <lib/ote/ote_protocol.h>
 #include <ote_intf.h>
 #include <platform/platform_p.h>
+#include <kernel/task_load.h>
 
 struct timespec {
        long tv_sec;    /* seconds */
@@ -43,8 +44,6 @@ struct timespec {
 #define        MAP_PRIVATE             0x02    /* Changes are private.  */
 #define        MAP_ANONYMOUS           0x20    /* Don't use a file.  */
 
-int tee_handle_ta_message(uint32_t fd, void *msg, uint32_t msgsize, bool isread);
-
 static int platform_ta_to_ta_request_handler(te_ta_to_ta_request_args_t *args)
 {
        te_error_t result;
@@ -279,6 +278,16 @@ static int platform_ioctl_handler(u_int cmd, void *cmdbuf)
                        *args = platform_get_rand32();
                        break;
                }
+               case OTE_IOCTL_TASK_REQUEST:
+               {
+                       te_task_request_args_t *args = cmdbuf;
+
+                       if (!task_valid_address((vaddr_t)args, sizeof(*args))) {
+                               return -EFAULT;
+                       }
+
+                       return task_request_handler (args);
+               }
                default:
                {
                        dprintf(SPEW, "%s: invalid ioctl: cmd=0x%x\n",
index 3e79655..5c93553 100644 (file)
@@ -36,6 +36,7 @@
 #include <kernel/elf.h>
 #include <platform.h>
 #include <platform/platform_p.h>
+#include <kernel/task_load.h>
 
 /* page aligned area for storing task headers */
 #define TASK_LIST_CARVEOUT_PAGES 1
@@ -53,18 +54,23 @@ static u_int task_image_size;
 extern u_int __tasks_start;
 extern u_int __tasks_end;
 
-extern int _heap_end;
+extern int _end;       /* end of binary &_end (heap starts after this) */
+extern int _heap_end;  /* heap ends here, adjusted by carve-outs below */
 
 /* memory carved off from the top (before heap_init) */
 #define carveout_taskmem       _heap_end
 
-static void task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32_Shdr *shdr)
+static status_t task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32_Shdr *shdr)
 {
+       status_t err = NO_ERROR;
        OTE_MANIFEST  *manifest;
        u_int *config_blob, config_blob_size;
        u_int i;
 
-       ASSERT(shdr->sh_size >= offsetof(OTE_MANIFEST, config_options));
+       if (shdr->sh_size < offsetof(OTE_MANIFEST, config_options)) {
+               err = ERR_NOT_VALID;
+               goto exit;
+       }
 
        /* init default config options before parsing manifest */
        taskp->props.min_heap_size = 5 * PAGE_SIZE;
@@ -72,6 +78,20 @@ static void task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32
 
        manifest = (OTE_MANIFEST *)(task_image_addr + shdr->sh_offset);
 
+       /*
+        * Informative name field may be zero filled and only non-zero names are used.
+        * Task loading may also override this field value.
+        */
+       memcpy(&taskp->task_name[0], &manifest->name[0], sizeof(taskp->task_name));
+
+       /*
+        * Copy TA specific config data (optional field, define semantics per task).
+        * E.g. could hold SHA1 digest of something you wish to load to the task
+        * at runtime.
+        */
+       memcpy(&taskp->task_private_data[0], &manifest->private_data[0],
+              sizeof(taskp->task_private_data));
+
        memcpy(&taskp->props.uuid, &manifest->uuid, sizeof(te_service_id_t));
 
        task_print_uuid(SPEW, "task load uuid = ", taskp);
@@ -79,67 +99,98 @@ static void task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32
        config_blob = (u_int *)((char *)manifest + offsetof(OTE_MANIFEST, config_options));
        config_blob_size = (shdr->sh_size - offsetof(OTE_MANIFEST, config_options));
 
-       taskp->props.config_entry_cnt = config_blob_size / sizeof (u_int);
+       taskp->props.config_entry_cnt = config_blob_size / sizeof(u_int);
 
        /* if no config options we're done */
-       if (taskp->props.config_entry_cnt == 0) {
-               return;
-       }
+       if (taskp->props.config_entry_cnt != 0) {
 
-       /* save off configuration blob start so it can be accessed later */
-       taskp->props.config_blob = config_blob;
+               /* save off configuration blob start so it can be accessed later */
+               taskp->props.config_blob = config_blob;
 
-       /*
-        * Step thru configuration blob.
-        *
-        * Save off some configuration data while we are here but
-        * defer processing of other data until it is needed later.
-        */
-       for (i = 0; i < taskp->props.config_entry_cnt; i++) {
-               switch (config_blob[i]) {
-               case OTE_CONFIG_KEY_MIN_STACK_SIZE:
-                       /* MIN_STACK_SIZE takes 1 data value */
-                       ASSERT((taskp->props.config_entry_cnt - i) > 1);
-                       taskp->props.min_stack_size =
-                               ROUNDUP(config_blob[++i], 4096);
-                       ASSERT(taskp->props.min_stack_size > 0);
-                       break;
-               case OTE_CONFIG_KEY_MIN_HEAP_SIZE:
-                       /* MIN_HEAP_SIZE takes 1 data value */
-                       ASSERT((taskp->props.config_entry_cnt - i) > 1);
-                       taskp->props.min_heap_size =
-                               ROUNDUP(config_blob[++i], 4096);
-                       ASSERT(taskp->props.min_heap_size > 0);
-                       break;
-               case OTE_CONFIG_KEY_MAP_MEM:
-                       /* MAP_MEM takes 3 data values */
-                       ASSERT((taskp->props.config_entry_cnt - i) > 3);
-                       taskp->props.map_io_mem_cnt++;
-                       i += 3;
-                       break;
-               case OTE_CONFIG_KEY_RESTRICT_ACCESS:
-                       /* Set clients who are restricted access.  */
-                       taskp->props.restrict_access = config_blob[++i];
-                       break;
-               case OTE_CONFIG_KEY_INSTALL:
-                       /* tasks which are allowed to install other tasks. */
-                       ASSERT((taskp->props.config_entry_cnt - i) > 1);
-                       taskp->props.install_priv = config_blob[++i];
-                       break;
-               case OTE_CONFIG_KEY_IMMUTABLE:
-                       /* prevents any manifest data overrides by installer. */
-                       ASSERT((taskp->props.config_entry_cnt - i) > 1);
-                       taskp->props.immutable = config_blob[++i];
-                       break;
-               default:
-                       dprintf(CRITICAL,
-                               "%s: unknown OTE_CONFIG_KEY_VALUE: %d\n",
-                               __func__, config_blob[i]);
-                       ASSERT(0);
-                       i++;
-                       break;
+               /*
+                * Step thru configuration blob.
+                *
+                * Save off some configuration data while we are here but
+                * defer processing of other data until it is needed later.
+                */
+               for (i = 0; i < taskp->props.config_entry_cnt; i++) {
+                       switch (config_blob[i]) {
+                       case OTE_CONFIG_KEY_MIN_STACK_SIZE:
+                               /* MIN_STACK_SIZE takes 1 data value */
+                               if ((taskp->props.config_entry_cnt - i) <= 1) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.min_stack_size =
+                                       ROUNDUP(config_blob[++i], 4096);
+                               if (taskp->props.min_stack_size <= 0) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               break;
+                       case OTE_CONFIG_KEY_MIN_HEAP_SIZE:
+                               /* MIN_HEAP_SIZE takes 1 data value */
+                               if ((taskp->props.config_entry_cnt - i) <= 1) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.min_heap_size =
+                                       ROUNDUP(config_blob[++i], 4096);
+                               if (taskp->props.min_heap_size <= 0) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               break;
+                       case OTE_CONFIG_KEY_MAP_MEM:
+                               /* MAP_MEM takes 3 data values */
+                               if ((taskp->props.config_entry_cnt - i) <= 3) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.map_io_mem_cnt++;
+                               i += 3;
+                               break;
+                       case OTE_CONFIG_KEY_RESTRICT_ACCESS:
+                               /* Set clients who are restricted access.  */
+                               if ((taskp->props.config_entry_cnt - i) <= 1) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.restrict_access = config_blob[++i];
+                               break;
+                       case OTE_CONFIG_KEY_INSTALL:
+                               /* tasks which are allowed to install other tasks. */
+                               if ((taskp->props.config_entry_cnt - i) <= 1) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.install_priv = config_blob[++i];
+                               break;
+                       case OTE_CONFIG_KEY_IMMUTABLE:
+                               /* prevents any manifest data overrides by installer. */
+                               if ((taskp->props.config_entry_cnt - i) <= 1) {
+                                       err = ERR_NOT_VALID;
+                                       goto exit;
+                               }
+                               taskp->props.immutable = config_blob[++i];
+                               break;
+                       default:
+                               dprintf(CRITICAL,
+                                       "%s: unknown OTE_CONFIG_KEY_VALUE: %d\n",
+                                       __func__, config_blob[i]);
+                               err = ERR_NOT_VALID;
+                               goto exit;
+                       }
                }
        }
+
+       if (0) {
+       exit:
+               if (err == NO_ERROR) {
+                       err = ERR_NOT_VALID;
+               }
+       }
+       return err;
 }
 
 static void task_setup_mmio(task_t *taskp)
@@ -208,6 +259,8 @@ void task_add_mapping(task_t *taskp, task_map_t *new_mptr)
 {
        task_map_t *mptr;
 
+       ASSERT(taskp);
+       ASSERT(new_mptr);
        ASSERT(new_mptr->vaddr && new_mptr->size);
        list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
                if (mptr->vaddr > new_mptr->vaddr) {
@@ -291,7 +344,7 @@ static status_t task_init_stack(task_t *taskp)
        mptr->size  = taskp->props.min_stack_size;
        mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size);
        if (mptr->u_phys.contig == NULL) {
-               free (mptr);
+               free(mptr);
                return ERR_NO_MEMORY;
        }
 
@@ -413,6 +466,7 @@ static status_t task_alloc_address_map(task_t *taskp)
                 * We're expecting to be able to execute the task in-place,
                 * meaning its PT_LOAD segments, should be page-aligned.
                 */
+               /* XXX TODO: convert this assert to error return later */
                ASSERT(!(prg_hdr->p_vaddr & PAGE_MASK) &&
                       !(prg_hdr->p_offset & PAGE_MASK));
 
@@ -565,7 +619,7 @@ status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp,
        if (bss_pad_shdr_p)
                *bss_pad_shdr_p = NULL;
 
-       shdr   = (Elf32_Shdr *) ((u_int)ehdr + ehdr->e_shoff);
+       shdr   = (Elf32_Shdr *)((u_int)ehdr + ehdr->e_shoff);
        shstbl = (char *)((u_int)ehdr + shdr[ehdr->e_shstrndx].sh_offset);
 
        bss_shdr = bss_pad_shdr = manifest_shdr = NULL;
@@ -637,8 +691,12 @@ status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp,
        if (manifest_shdr == NULL) {
                taskp->props.manifest_exists = 0;
        } else {
-               task_load_config_options((u_int)task_addr, taskp, manifest_shdr);
                taskp->props.manifest_exists = 1;
+               err = task_load_config_options((u_int)task_addr, taskp, manifest_shdr);
+               if (err != NO_ERROR) {
+                       dprintf(CRITICAL, "Invalid task manifest: 0x%x\n", err);
+                       goto exit;
+               }
        }
 
        taskp->elf_hdr = ehdr;
@@ -665,6 +723,9 @@ static void task_mem_init()
                /* list of tasks (static and loaded) */
                carveout_taskmem -= (TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
                task_list = (task_t *)carveout_taskmem;
+
+               task_load_config ((vaddr_t)&_end,
+                                 (vaddr_t *)&carveout_taskmem);
        }
 
        ASSERT(!(carveout_taskmem & PAGE_MASK));
@@ -704,8 +765,8 @@ static void task_bootloader()
 
                /* statically loaded tasks must run or the system halts */
                if (err != NO_ERROR) {
-                       dprintf (CRITICAL, "%s: task %d preparation failed (%d)\n",
-                                __func__, task_count, err);
+                       dprintf(CRITICAL, "%s: task %d preparation failed (%d)\n",
+                               __func__, task_count, err);
                        halt();
                }
 
@@ -786,7 +847,13 @@ status_t task_init_one_task(task_t *task, u_int task_type)
                task_set_valloc_start(task);
        }
 
-       snprintf(name, sizeof(name) - 1, "task_%u_T0", task->task_index);
+       /* force zero terminated task context derived thread names */
+       if (task->task_name[0] != '\000') {
+               snprintf(name, sizeof(name) - 1, "%s_%u_T0", task->task_name,
+                        task->task_index);
+       } else {
+               snprintf(name, sizeof(name) - 1, "task_%u_T0", task->task_index);
+       }
        name[sizeof(name) - 1] = '\000';
 
        task->task_type = task_type;
@@ -812,8 +879,12 @@ status_t task_init_one_task(task_t *task, u_int task_type)
 
        /* start it */
        if (task->entry) {
-               dprintf(INFO, "starting task#%u\n",
-                       task->task_index);
+               name[0] = '\000';
+               if (task->task_name[0] != '\000') {
+                       snprintf(name, sizeof(name) -1, " (%s)", task->task_name);
+                       name[sizeof(name) - 1] = '\000';
+               }
+               dprintf(INFO, "starting task#%u%s\n", task->task_index, name);
                thread_resume(thread);
        }
 
@@ -835,31 +906,44 @@ void task_init()
        u_int i;
 
        for (i = 0, task = task_list; i < task_count; i++, task++) {
-               err = task_init_one_task (task, TASK_TYPE_STATIC);
+               err = task_init_one_task(task, TASK_TYPE_STATIC);
                if (err != NO_ERROR) {
                        dprintf(CRITICAL, "%s: static task start failed %d -- halting\n",
                                __func__, err);
-                       halt ();
+                       halt();
                }
        }
+
+       task_load_init();
 }
 
 task_t *task_find_task_by_uuid(te_service_id_t *uuid)
 {
-       task_t *task;
-       u_int i;
+       task_t *task = NULL;
+       u_int i = 0;
 
        /* find task for this uuid */
-       for (i = 0, task = task_list; i < task_count; i++, task++) {
-               if (!memcmp(&task->props.uuid, uuid, sizeof(te_service_id_t))) {
-                       break;
+       if (uuid) {
+               for (i = 0, task = task_list; i < task_count; i++, task++) {
+                       if (task->task_state == TASK_STATE_ACTIVE ||
+                           task->task_state == TASK_STATE_INIT) {
+                               if (!memcmp(&task->props.uuid, uuid, sizeof(te_service_id_t))) {
+                                       break;
+                               }
+                       }
                }
+               if (i == task_count)
+                       return NULL;
        }
+       return task;
+}
 
-       if (i == task_count)
+task_t *task_find_task_by_index(uint32_t index)
+{
+       if (index >= task_count)
                return NULL;
 
-       return task;
+       return &task_list[index];
 }
 
 void
@@ -883,3 +967,73 @@ task_print_uuid(uint32_t level, const char *prefix, const task_t *taskp)
                         uuid->clock_seq_and_node[7]);
        }
 }
+
+/*
+ * This is only used by task loading code but placed here because it modifies
+ * task_count and the task_list when a new task is loaded.
+ */
+status_t task_register(task_t **task_p)
+{
+       status_t err  = NO_ERROR;
+       task_t *dtask = NULL;
+
+       if (!task_p || !*task_p) {
+               err = ERR_INVALID_ARGS;
+               goto exit;
+       }
+
+       enter_critical_section();
+
+       do {
+               if ((task_count + 1) >= MAX_TASK_COUNT) {
+                       err = ERR_TOO_MANY_TASKS;
+                       break;
+               }
+
+               /*
+                * Make sure UUID doesn't already exist.  Note that
+                * this search won't include the task we are processing
+                * here because task_count hasn't been incremented yet.
+                */
+               if (task_find_task_by_uuid(&(*task_p)->props.uuid) != NULL) {
+                       err = ERR_ALREADY_EXISTS;
+                       break;
+               }
+
+               /* Committed here */
+               dtask = &task_list[task_count];
+
+               /* task list entry reserved for this task */
+               (*task_p)->task_index = task_count++;
+       } while (0);
+
+       exit_critical_section();
+
+       if (dtask) {
+               memcpy(dtask, *task_p, sizeof(task_t));
+
+               /* clear the input task arg (not to be used anymore) */
+               memset(*task_p, 0, sizeof(task_t));
+
+               /* *task_p is now an entry from the task list */
+               *task_p = dtask;
+       }
+
+exit:
+       return err;
+}
+
+const task_t *task_get_list()
+{
+       return (const task_t *)task_list;
+}
+
+u_int task_get_count()
+{
+       return task_count;
+}
+
+u_int task_get_max_count()
+{
+       return MAX_TASK_COUNT;
+}
diff --git a/kernel/task_load.c b/kernel/task_load.c
new file mode 100644 (file)
index 0000000..9cbc608
--- /dev/null
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <debug.h>
+#include <sys/types.h>
+#include <compiler.h>
+#include <assert.h>
+#include <string.h>
+#include <malloc.h>
+#include <err.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <arch.h>
+#include <arch/arm.h>
+#include <arch/arm/mmu.h>
+#include <kernel/task.h>
+#include <kernel/thread.h>
+#include <kernel/elf.h>
+#include <platform.h>
+#include <platform/platform_p.h>
+#include <kernel/task_load.h>
+#include <lib/heap.h>
+
+/* for handling requests from syscall.c */
+#include <lib/ote/ote_protocol.h>
+
+#ifndef TASK_HEAP_PERCENTAGE
+
+/*
+ * Max 60% of heap can be used for task loading.
+ * Define as 0 to disable task loading.
+ */
+#define TASK_HEAP_PERCENTAGE 60
+
+#endif /* TASK_HEAP_PERCENTAGE */
+
+#ifndef TASK_HEAP_PAGES_RESERVED
+/*
+ * Task loading will leave this many pages for TLK heap use
+ * no matter what the percentage above is or no matter what
+ * the heap size is.
+ *
+ * If the heap is too small, task loading will be disabled.
+ */
+#define TASK_HEAP_PAGES_RESERVED 50
+#endif /* TASK_HEAP_PAGES_RESERVED */
+
+#ifndef TASK_HEAP_PAGES_MIN
+/*
+ * No point in supporting task loading if there are less pages
+ * than this to load tasks to.
+ *
+ * If the heap is too small, task loading will be disabled.
+ */
+#define TASK_HEAP_PAGES_MIN    20
+#endif /* TASK_HEAP_PAGES_MIN */
+
+/* pending task states (before task is fully loaded) */
+typedef enum {
+       TASK_PSTATE_UNDEFINED,
+       TASK_PSTATE_INVALID,
+       TASK_PSTATE_MEMBUF,
+       TASK_PSTATE_TA_MAPPED,
+       TASK_PSTATE_TA_UNMAPPED,
+       TASK_PSTATE_PREPARED,
+       TASK_PSTATE_READY
+} tp_state_t;
+
+/*
+ * Loaded tasks pending activation.
+ * Activated tasks are removed from this list.
+ */
+typedef struct {
+       uint32_t        tp_handle;
+       tp_state_t      tp_state;
+       task_t          tp_task;
+       vaddr_t         tp_task_tlk_addr;       /* task virt address in TLK */
+       uint32_t        tp_task_size;
+       vaddr_t         tp_task_ta_addr;        /* task virt address in TA */
+       struct list_node tp_node;
+} task_pending_t;
+
+static struct list_node task_pending_list;
+
+/*
+ * Count task pages allocated and max number of pages allowed
+ * for task loading.
+ *
+ * TASK_HEAP_PERCENTAGE defines how much of the total TLK heap can
+ * be used for this.
+ */
+static u_int task_pages_heap_used;
+static u_int task_pages_heap_max;
+static u_int task_heap_percentage = TASK_HEAP_PERCENTAGE;
+
+static vaddr_t original_heap_start;
+static vaddr_t original_heap_end;
+
+void task_load_config(vaddr_t vaddr_begin, vaddr_t *vaddr_end_p)
+{
+       u_int heap_pages = 0;
+
+       /* if no memory available, just return */
+       if (!vaddr_end_p || *vaddr_end_p == 0)
+               goto exit;
+
+       /* save the original limit values for info (heap begin..end) */
+       original_heap_start = vaddr_begin;
+       original_heap_end   = *vaddr_end_p;
+
+       if ((original_heap_start == 0) ||
+           (original_heap_start >= original_heap_end) ||
+           (task_heap_percentage == 0))
+               goto exit;
+
+       heap_pages = (original_heap_end - original_heap_start) / PAGE_SIZE;
+
+       if (heap_pages <= TASK_HEAP_PAGES_RESERVED) {
+               dprintf(INFO, "heap too small for task loading (%d pages)\n",
+                       heap_pages);
+               goto exit;
+       }
+
+       /*
+        * Max number of heap pages allowed for task loading
+        * This is an upper limit, less heap may be available at runtime.
+        */
+       task_pages_heap_max =
+               (heap_pages / 100) * task_heap_percentage;
+
+       if (heap_pages - task_pages_heap_max < TASK_HEAP_PAGES_RESERVED)
+               task_pages_heap_max = heap_pages - TASK_HEAP_PAGES_RESERVED;
+
+       if (task_pages_heap_max < TASK_HEAP_PAGES_MIN) {
+               dprintf(INFO, "Not enough pages for task loading (%d pages)\n",
+                       task_pages_heap_max);
+
+               task_pages_heap_max = 0;
+               goto exit;
+       }
+
+       dprintf(INFO, "Max heap %d pages; task load limit %d pages\n",
+               heap_pages, task_pages_heap_max);
+
+       if (0) {
+       exit:
+               dprintf(INFO, "task loading not supported\n");
+       }
+}
+
+void task_load_init()
+{
+       list_initialize(&task_pending_list);
+}
+
+u_int task_allowed_to_load_tasks(task_t *taskp)
+{
+       return (taskp && (taskp->props.install_priv & OTE_INSTALL_AUTHORIZED));
+}
+
+static void task_add_new_pending(task_pending_t *tp, vaddr_t addr, uint32_t size,
+                                tp_state_t pstate)
+{
+       uint32_t handle = 0;
+       int found = 0;
+
+       if (!tp || !size || !addr)
+               return;
+
+       tp->tp_task_tlk_addr = addr;    /* task address in TLK */
+       tp->tp_task_ta_addr = 0;        /* TA address not yet mapped */
+       tp->tp_task_size = size;        /* task byte size */
+       tp->tp_state = TASK_PSTATE_INVALID;
+
+       do {
+               task_pending_t *tp_scan = NULL;
+
+               found = 0;
+
+               handle = platform_get_rand32(); /* need to push a unique non-zero handle */
+               if (!handle)
+                       handle++;               /* zero means handle unused */
+
+               enter_critical_section();
+
+               list_for_every_entry(&task_pending_list, tp_scan,
+                                    task_pending_t, tp_node) {
+                       if (tp_scan->tp_handle == handle) {
+                               found++;
+                               break;
+                       }
+               }
+
+               /* new handle found; reserve it */
+               if (!found) {
+                       tp->tp_handle = handle;
+                       tp->tp_state  = pstate;
+                       list_add_tail(&task_pending_list, &tp->tp_node);
+               }
+               exit_critical_section();
+
+       } while (found);
+}
+
+/*
+ * Atomically search pending task by random handle
+ * Found entry is removed from the list so caller must push it back if required.
+ */
+static task_pending_t *task_find_pending(uint32_t handle)
+{
+       task_pending_t *tp_scan = NULL;
+       int found = 0;
+
+       enter_critical_section();
+
+       list_for_every_entry(&task_pending_list, tp_scan, task_pending_t, tp_node) {
+               if (tp_scan->tp_handle == handle) {
+                       found++;
+
+                       list_delete(&tp_scan->tp_node);
+                       break;
+               }
+       }
+
+       exit_critical_section();
+
+       if (!found)
+               tp_scan = NULL;
+
+       return tp_scan;
+}
+
+static void task_push_pending(task_pending_t *tp, tp_state_t pstate)
+{
+       if (tp) {
+               tp->tp_state = pstate;
+
+               enter_critical_section();
+               list_add_tail(&task_pending_list, &tp->tp_node);
+               exit_critical_section();
+       }
+}
+
+/*
+ * Map the reserved memory virtual address into the target TA
+ * task address space.
+ */
+static status_t task_map_memory_to_ta(task_t *taskp, uint32_t handle,
+                                     vaddr_t *ta_addr_p)
+{
+       status_t err = NO_ERROR;
+       int flags = TM_UW | TM_UR | TM_KERN_SEC_VA;
+       task_map_t *mptr = NULL;
+       task_pending_t *tp  = NULL;
+
+       if (!taskp || !ta_addr_p)
+               return ERR_INVALID_ARGS;
+
+       tp = task_find_pending(handle);
+       if (!tp) {
+               dprintf(SPEW, "%s: task handle unknown 0x%x\n",
+                       __func__, handle);
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       if (tp->tp_state != TASK_PSTATE_MEMBUF) {
+               dprintf(CRITICAL, "%s: task handle 0x%x in wrong state: %d\n",
+                       __func__, handle, tp->tp_state);
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       mptr = arch_task_map_memory(taskp, tp->tp_task_tlk_addr,
+                                   tp->tp_task_size, flags);
+       if (mptr == NULL) {
+               dprintf(CRITICAL, "TLK vaddr app load memory map failed for 0x%lx (%d bytes))\n",
+                       (unsigned long)tp->tp_task_tlk_addr, tp->tp_task_size);
+               err = ERR_NO_MEMORY;
+               goto exit;
+       }
+
+       tp->tp_task_ta_addr = mptr->vaddr;
+       *ta_addr_p = mptr->vaddr;
+
+       task_push_pending(tp, TASK_PSTATE_TA_MAPPED);
+
+       if (0) {
+       exit:
+               /* released by caller */
+               if (tp)
+                       task_push_pending(tp, TASK_PSTATE_INVALID);
+
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+
+       return err;
+}
+
+/* @brief Task loading step#1/3, request TLK shared memory mapping to TA space
+ *
+ * Allocate page aligned memory from the TLK heap for app loading and
+ * map it to TA memory. TA then copies the TASK into this memory area
+ * and calls tlk_handle_app_prepare.
+ */
+status_t task_request_app_load_memory(u_int byte_size, uint32_t *handle_p)
+{
+       status_t err = NO_ERROR;
+       u_int    npages = 0;
+       vaddr_t  load_addr = NULL;
+       task_pending_t *tp = NULL;
+
+       if (byte_size <= 0 || !handle_p) {
+               err = ERR_INVALID_ARGS;
+               goto exit;
+       }
+
+       *handle_p = 0;
+       npages = ROUNDUP(byte_size, PAGE_SIZE) / PAGE_SIZE;
+
+       dprintf(SPEW, "TASK MEM ALLOCATION REQUEST => %d bytes (rounded to %d pages)\n",
+               byte_size, npages);
+
+       load_addr = (vaddr_t)heap_alloc((npages * PAGE_SIZE), PAGE_SIZE);
+       if (!load_addr) {
+               err = ERR_NO_MEMORY;
+               goto exit;
+       }
+       memset((void *)load_addr, 0, npages * PAGE_SIZE);
+
+       tp = malloc(sizeof (task_pending_t));
+       if (!tp) {
+               err = ERR_NO_MEMORY;
+               goto exit;
+       }
+       memset(tp, 0, sizeof (task_pending_t));
+
+       /* check for an upper limit for heap allocation for task loading */
+       enter_critical_section();
+
+       if (task_pages_heap_used + npages > task_pages_heap_max) {
+               err = ERR_TASK_GENERIC;
+       } else {
+               task_pages_heap_used += npages;
+       }
+
+       exit_critical_section();
+
+       /* if not allowed to reserve more pages */
+       if (err != NO_ERROR) {
+               dprintf(INFO, "TASK MEM => too many pages (%d) used for tasks\n",
+                       task_pages_heap_used);
+               goto exit;
+       }
+
+       /* setup a task load pending object with a unique handle */
+       task_add_new_pending(tp, load_addr, byte_size, TASK_PSTATE_MEMBUF);
+
+       /* Must not fail after this point... */
+       *handle_p = tp->tp_handle;
+
+       dprintf(SPEW, "TASK MEM allocated (handle 0x%x) => app start=0x%x (%d pages) [tasks pages used %d]\n",
+               *handle_p, (uint32_t)load_addr, npages, task_pages_heap_used);
+
+       if (0) {
+       exit:
+               if (tp)
+                       free(tp);
+
+               if (load_addr)
+                       heap_free((void *)load_addr);
+
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+
+       return err;
+}
+
+static status_t task_dealloc_app_load_memory_tp(task_pending_t *tp)
+{
+       status_t err = NO_ERROR;
+       u_int npages = 0;
+       u_int byte_size = 0;
+       vaddr_t vaddr = 0;
+       u_int used_pages = task_pages_heap_used;
+
+       if (!tp) {
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       vaddr     = tp->tp_task_tlk_addr;
+       byte_size = tp->tp_task_size;
+
+       if (!vaddr || byte_size <= 0) {
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       npages = ROUNDUP(byte_size, PAGE_SIZE) / PAGE_SIZE;
+
+       ASSERT (task_pages_heap_used >= npages);
+
+       heap_free ((void *)vaddr);
+
+       enter_critical_section();
+
+       if (task_pages_heap_used >= npages)
+               task_pages_heap_used -= npages;
+
+       exit_critical_section();
+
+       if (used_pages < npages)
+               dprintf(INFO, "TASK MEM (handle 0x%x) dealloc INCONSISTENCY => task addr 0x%x (%d pages) [tasks pages used %d]\n",
+                       tp->tp_handle, (uint32_t)vaddr, npages, used_pages);
+       else
+               dprintf(INFO, "TASK MEM (handle 0x%x) dealloc => task addr 0x%x (%d pages) [tasks pages used %d]\n",
+                       tp->tp_handle, (uint32_t)vaddr, npages, used_pages);
+
+       if (0) {
+       exit:
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+static status_t task_dealloc_app_load_memory(uint32_t handle)
+{
+       status_t err = NO_ERROR;
+       task_pending_t *tp = NULL;
+
+       tp = task_find_pending(handle);
+       if (!tp) {
+               dprintf(SPEW, "%s: task handle unknown 0x%x\n",
+                       __func__, handle);
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       err = task_dealloc_app_load_memory_tp(tp);
+
+       free(tp);
+       tp = NULL;
+
+       if (0) {
+       exit:
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+/* @brief Override manifest properties unless manifest is immutable.
+ *
+ * Installer task starting a loaded task may restrict the manifest options
+ * dynamically or prevent a task from being run (after verifying the
+ * manifest options).
+ *
+ * Modifications requested to manifest flagged as immutable are ignored.
+ */
+static status_t task_property_override(task_t *taskp, task_restrictions_t *tr)
+{
+       status_t err = NO_ERROR;
+       uint32_t size;
+
+       if (!tr) {
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       if ((taskp->props.immutable & OTE_MANIFEST_IMMUTABLE) == 0) {
+               if (tr->min_stack_size > 0) {
+                       size = ROUNDUP(tr->min_stack_size, PAGE_SIZE);
+                       taskp->props.min_stack_size = size;
+                       dprintf(INFO, "Loaded task stack size set to %d bytes\n",
+                               taskp->props.min_stack_size);
+               }
+
+               if (tr->min_heap_size > 0) {
+                       size = ROUNDUP(tr->min_heap_size, PAGE_SIZE);
+                       taskp->props.min_heap_size = size;
+                       dprintf(INFO, "Loaded task heap size set to %d bytes\n",
+                               taskp->props.min_heap_size);
+               }
+
+               /* override the task name if set */
+               if (tr->task_name[0])
+                       task_set_name(taskp, tr->task_name);
+       }
+
+       if (0) {
+       exit:
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+/* @brief unmap task from TA memory at step#2/3 of task loading before
+ * the binary is parsed.
+ */
+static status_t task_unmap_app_mem_from_ta(task_t *taskp, uint32_t handle)
+{
+       status_t err = NO_ERROR;
+       task_pending_t *tp = NULL;
+       task_map_t *mptr = NULL;
+
+       if (!taskp) {
+               err = ERR_INVALID_ARGS;
+               goto exit;
+       }
+
+       tp = task_find_pending(handle);
+
+       if (!tp) {
+               dprintf(SPEW, "%s: task handle unknown 0x%x\n",
+                       __func__, handle);
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       if (tp->tp_state != TASK_PSTATE_TA_MAPPED) {
+               dprintf(CRITICAL, "%s: task handle 0x%x in wrong state: %d\n",
+                       __func__, handle, tp->tp_state);
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       /*
+        * Unmap the application install buffer from the installer
+        * TA memory virtual address before doing anything with the TA
+        * copied data (for security reasons).
+        */
+       mptr = task_find_mapping(taskp, tp->tp_task_ta_addr, tp->tp_task_size);
+       if (!mptr) {
+               dprintf(CRITICAL, "Can't find mapped memory from task address space\n");
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       arch_task_unmap_memory(taskp, mptr);
+       dprintf (SPEW,
+                "Unmapped task map memory <0%08lx, %d bytes> from the calling task\n",
+                tp->tp_task_ta_addr, tp->tp_task_size);
+
+       task_push_pending(tp, TASK_PSTATE_TA_UNMAPPED);
+
+       if (0) {
+       exit:
+               /* released by caller */
+               if (tp)
+                       task_push_pending(tp, TASK_PSTATE_INVALID);
+
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+/* @brief Copy task properties to task info.
+ */
+static void task_get_config(task_info_t *ti, task_t *taskp)
+{
+       if (ti)
+               memset(ti, 0, sizeof(task_info_t));
+
+       if (taskp && ti) {
+               memcpy(&ti->uuid, &taskp->props.uuid,
+                      sizeof(taskp->props.uuid));
+
+#define TASK_COPY_PROPERTY(to,from,field) (to)->field = (from)->props.field
+
+               TASK_COPY_PROPERTY(ti,taskp,manifest_exists);
+               TASK_COPY_PROPERTY(ti,taskp,multi_instance);
+               TASK_COPY_PROPERTY(ti,taskp,immutable);
+               TASK_COPY_PROPERTY(ti,taskp,min_stack_size);
+               TASK_COPY_PROPERTY(ti,taskp,min_heap_size);
+               TASK_COPY_PROPERTY(ti,taskp,map_io_mem_cnt);
+               TASK_COPY_PROPERTY(ti,taskp,restrict_access);
+               TASK_COPY_PROPERTY(ti,taskp,install_priv);
+
+#undef TASK_COPY_PROPERTY
+
+               memcpy(ti->task_name, taskp->task_name,
+                      sizeof(taskp->task_name));
+
+               ti->task_name[sizeof (ti->task_name) - 1] = '\000';
+
+               memcpy(ti->task_private_data, taskp->task_private_data,
+                      sizeof(taskp->task_private_data));
+       }
+}
+
+/* @brief Parse loaded task binary in TLK memory at step#2/3 of task loading.
+ */
+static status_t task_parse_app(uint32_t handle, task_info_t *ti)
+{
+       status_t err = NO_ERROR;
+       task_pending_t *tp = NULL;
+
+       tp = task_find_pending(handle);
+       if (!tp) {
+               dprintf(SPEW, "%s: task handle unknown 0x%x\n",
+                       __func__, handle);
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       if (tp->tp_state != TASK_PSTATE_TA_UNMAPPED) {
+               dprintf(CRITICAL, "%s: task handle 0x%x in wrong state: %d\n",
+                       __func__, handle, tp->tp_state);
+               err = ERR_TASK_GENERIC;
+               goto exit;
+       }
+
+       /* prepare task based on static app data in buffer */
+       err = task_prepare((char *)tp->tp_task_tlk_addr, tp->tp_task_size,
+                          &tp->tp_task, NULL);
+       if (err != NO_ERROR) {
+               dprintf(CRITICAL, "%s: loaded task preparation failed (%d)\n",
+                       __func__, err);
+               goto exit;
+       }
+
+       if (!tp->tp_task.props.manifest_exists) {
+               dprintf(INFO, "%s: loaded task image has no manifest\n", __func__);
+       }
+
+       /* copy selected manifest config options to be passed back to caller */
+       task_get_config(ti, &tp->tp_task);
+
+       task_push_pending(tp, TASK_PSTATE_PREPARED);
+
+       if (0) {
+       exit:
+               /* released by caller */
+               if (tp)
+                       task_push_pending(tp, TASK_PSTATE_INVALID);
+
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+/* @brief Start loaded application with manifest restrictions at step#3/3.
+ *
+ * If manifest is immutable restriction values are ignored.
+ */
+static status_t task_run_app(uint32_t handle, uint32_t reject_task, task_restrictions_t *tr)
+{
+       status_t err = NO_ERROR;
+       task_pending_t *tp = NULL;
+       task_t *taskp = NULL;
+       int task_registered = 0;
+
+       tp = task_find_pending(handle);
+       if (!tp) {
+               dprintf(SPEW, "%s: task handle unknown 0x%x\n",
+                       __func__, handle);
+               err = ERR_NOT_FOUND;
+               goto exit;
+       }
+
+       /* a task can be rejected at any state; this triggers resource cleanup */
+       if (!tr || reject_task) {
+               dprintf(INFO, "%s: task handle 0x%x rejected in state: %d\n",
+                       __func__, handle, tp->tp_state);
+
+               task_dealloc_app_load_memory_tp(tp);
+       } else {
+               if (tp->tp_state != TASK_PSTATE_PREPARED) {
+                       dprintf(CRITICAL, "%s: task handle 0x%x in wrong state: %d\n",
+                               __func__, handle, tp->tp_state);
+                       err = ERR_TASK_GENERIC;
+                       goto exit;
+               }
+
+               taskp = &tp->tp_task;
+
+               /*
+                * Any possible task restrictions are enforced here
+                *
+                * If loading an image without manifest is required =>
+                * add support for setting UUID and other manifest
+                * configured options by the property override calls.
+                *
+                * Currently this is NOT SUPPORTED but it is simple to
+                * add to the task_property_override().
+                *
+                * Doing so would enable the installer to control all
+                * aspects of app install (task loading), including
+                * the application identity and all options (e.g.
+                * the task config data modification).
+                *
+                * NOTE: Currently all loaded tasks must contain a
+                * built-in manifest.
+                */
+               err = task_property_override(taskp, tr);
+               if (err != NO_ERROR) {
+                       dprintf(CRITICAL, "%s: task install 0x%x blocked by override error %d\n",
+                               __func__, handle, err);
+                       goto exit;
+               }
+
+               /*
+                * At this point the task must either have manifest loaded from signed image
+                * or contain one created by the property override above.
+                */
+               if (!taskp->props.manifest_exists) {
+                       dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
+                       err = ERR_NOT_VALID;
+                       goto exit;
+               }
+
+               /*
+                * Atomically commit task (and assign the index) to the known task list
+                * unless there is an error. TASKP value swapped to track the registered
+                * task header (old object is cleared in the call).
+                */
+               err = task_register(&taskp);
+
+               if (err != NO_ERROR) {
+                       dprintf(CRITICAL, "%s: loaded task %d registration failed (%d)\n",
+                               __func__, task_get_count(), err);
+                       goto exit;
+               }
+
+               task_registered++;
+
+               {
+                       const task_t *task_list = task_get_list();
+
+                       /* taskp now points to an entry in the carve out task list */
+                       ASSERT(task_list);
+                       ASSERT(taskp == &task_list[taskp->task_index]);
+               }
+
+               /*
+                * Init task and start it (flagged as post-loaded)
+                *
+                * XXX NOTE:
+                * Failing here leaks memory as task terminating/releasing
+                * resources at this point is not supported (yet).
+                *
+                * Resources may be allocated for stack, heap, address map,
+                * thread, etc...
+                */
+               err = task_init_one_task(taskp, TASK_TYPE_LOADED);
+               if (err != NO_ERROR) {
+                       dprintf(CRITICAL, "%s: loaded task %d init failed (%d)\n",
+                               __func__, task_get_count(), err);
+                       goto exit;
+               }
+
+               /*
+                * Already removed from pending task list and now junked
+                * because task is now active
+                */
+       }
+
+       free(tp);
+       tp = NULL;
+
+       if (0) {
+       exit:
+               if (tp) {
+                       if (!task_registered)
+                               task_dealloc_app_load_memory_tp(tp);
+
+                       free(tp);
+               }
+
+               if (err == NO_ERROR)
+                       err = ERR_GENERIC;
+       }
+       return err;
+}
+
+/**
+ * @brief Change name of the task
+ */
+void task_set_name(task_t *task, const char *name)
+{
+       if (task) {
+               if (!name || !name[0])
+                       task->task_name[0] = '\000';
+               else
+                       strlcpy(task->task_name, name, sizeof(task->task_name));
+       }
+}
+
+#ifdef DEBUG
+
+/* reference to end of the binary and end of heap */
+extern int _end;
+extern int _heap_end;
+
+/* debug: print info of heap sizes and task load space to console */
+void task_system_info()
+{
+#define HEAP_LEN ((u_int)_heap_end - (u_int)&_end)
+
+       dprintf(INFO, "HEAP start (orig): 0x%x\n", (u_int)original_heap_start);
+       dprintf(INFO, "HEAP end   (orig): 0x%x\n", (u_int)original_heap_end);
+
+       dprintf(INFO, "HEAP start (end of binary == &_end) %p\n", &_end);
+       dprintf(INFO, "HEAP end (end_of_memory == _heap_end) %p\n",
+               (void *)_heap_end);
+       dprintf(INFO, "HEAP size (_heap_end-&_end)=%u (0x%x) bytes\n",
+               HEAP_LEN, HEAP_LEN);
+
+       dprintf(INFO, "TASK_INFO: Max task count: %d\n",
+               task_get_max_count());
+
+       dprintf(INFO, "TASK_INFO: number of tasks: %d\n",
+               task_get_count());
+
+       dprintf(INFO, "TASK_INFO: sizeof(task_t): %d\n", sizeof(task_t));
+
+       dprintf(INFO, "HEAP pages used for tasks: %d\n", task_pages_heap_used);
+
+       dprintf(INFO, "HEAP pages for tasks: left %d (out of %d)\n",
+               task_pages_heap_max - task_pages_heap_used,
+               task_pages_heap_max);
+}
+#endif /* DEBUG */
+
+/* handlers for task_request_handler() */
+
+static int tlk_handle_app_memory_request(te_app_load_memory_request_args_t *args)
+{
+       vaddr_t     ta_vaddr    = NULL;
+       uint32_t    handle      = 0;
+       task_t     *taskp       = current_thread->arch.task;
+
+       if (!args)
+               return -EINVAL;
+
+       dprintf(SPEW, "%s: mem request app size %d bytes\n",
+               __func__, args->app_size);
+
+       task_print_uuid(INFO, "Map task memory request by UUID ", taskp);
+
+       /*
+        * Simple manifest based permission allowing a task to install
+        * other tasks.
+        */
+       if (!task_allowed_to_load_tasks(taskp)) {
+               task_print_uuid(CRITICAL, "Client tried to perform "
+                               "an operation for which they are "
+                               "not permitted ", taskp);
+               return -EACCES;
+       }
+
+       {
+               status_t err = NO_ERROR;
+
+               err = task_request_app_load_memory(args->app_size, &handle);
+               if (err != NO_ERROR)  {
+                       dprintf(CRITICAL, "Failed to allocate app memory (%d bytes, err %d)\n",
+                               args->app_size, err);
+                       return ((err == ERR_NO_MEMORY) ? -ENOMEM : -EINVAL);
+               }
+
+               dprintf(SPEW, "Handle for app loading 0x%x, %d bytes\n",
+                       handle, args->app_size);
+
+               err = task_map_memory_to_ta(taskp, handle, &ta_vaddr);
+               if (err != NO_ERROR || !ta_vaddr) {
+                       status_t err2 = task_dealloc_app_load_memory(handle);
+                       if (err2 != NO_ERROR)  {
+                               dprintf(CRITICAL, "Failed to dealloc app handle 0x%x, %d bytes, err %d\n",
+                                       handle, args->app_size, err2);
+                       }
+                       return -ENOMEM;
+               }
+       }
+
+       /* pass shared buffer TA vaddr back to the TA */
+       args->app_addr   = ta_vaddr;
+       /* handle to refer to the pending task load */
+       args->app_handle = handle;
+
+       /*
+        * app_size field is unmodified; still contains the requested
+        * mem size but the actual shared buffer size is rounded
+        * to next page boundary.
+        */
+       return 0;
+}
+
+static void map_task_info_to_te(te_task_info_t *to, task_info_t *from)
+{
+       if (to)
+               memset(to, 0, sizeof(te_task_info_t));
+
+       if (from && to) {
+               memcpy(&to->uuid, &from->uuid, sizeof(from->uuid));
+
+#define TASK_COPY_PROPERTY(out,in,field) (out)->field = (in)->field
+
+               TASK_COPY_PROPERTY(to,from,manifest_exists);
+               TASK_COPY_PROPERTY(to,from,multi_instance);
+               TASK_COPY_PROPERTY(to,from,min_stack_size);
+               TASK_COPY_PROPERTY(to,from,min_heap_size);
+               TASK_COPY_PROPERTY(to,from,map_io_mem_cnt);
+               TASK_COPY_PROPERTY(to,from,restrict_access);
+               TASK_COPY_PROPERTY(to,from,install_priv);
+               TASK_COPY_PROPERTY(to,from,immutable);
+
+#undef TASK_COPY_PROPERTY
+
+               memcpy(to->task_name, from->task_name, OTE_TASK_NAME_MAX_LENGTH);
+               memcpy(to->task_private_data,
+                      from->task_private_data, OTE_TASK_PRIVATE_DATA_LENGTH);
+       }
+}
+
+static int tlk_handle_app_prepare(te_app_prepare_args_t *args)
+{
+       status_t err = NO_ERROR;
+       task_t *taskp = current_thread->arch.task;
+       task_info_t ti;
+
+       if (!args)
+               return -EINVAL;
+
+       dprintf(INFO, "Parsing app handle 0x%x\n", args->app_handle);
+
+       if (!task_allowed_to_load_tasks(taskp)) {
+               task_print_uuid(CRITICAL, "Client tried to perform "
+                               "an operation for which they are "
+                               "not permitted ", taskp);
+               return -EACCES;
+       }
+
+       /*
+        * Remove the shared memory mapping from installer
+        * memory map. After this only TLK will see the application.
+        */
+       err = task_unmap_app_mem_from_ta(taskp, args->app_handle);
+       if (err != NO_ERROR) {
+               dprintf(INFO, "Could not unmap TA shared memory (%d)\n", err);
+               task_dealloc_app_load_memory(args->app_handle);
+               return -EINVAL;
+       }
+
+       /*
+        * Parse application
+        */
+       memset(&ti, 0, sizeof (ti));
+       err = task_parse_app(args->app_handle, &ti);
+       if (err != NO_ERROR) {
+               dprintf(INFO, "Could parse loaded application (%d)\n", err);
+               task_dealloc_app_load_memory(args->app_handle);
+               return -EINVAL;
+       }
+       map_task_info_to_te(&args->te_task_info, &ti);
+
+       return 0;
+}
+
+static void map_task_app_restrictions(task_restrictions_t *to,
+                                     te_task_restrictions_t *from)
+{
+       if (to)
+               memset(to, 0, sizeof(task_restrictions_t));
+
+       if (from && to) {
+
+               /*
+                * Copy selected non-security related fields
+                * that the installer wants to enforce to the task
+                * when it starts. Zero field values indicate "not-used"
+                * and are ignored.
+                */
+               to->min_stack_size = from->min_stack_size;
+               to->min_heap_size = from->min_heap_size;
+
+               memcpy(to->task_name, from->task_name, OTE_TASK_NAME_MAX_LENGTH);
+       }
+}
+
+static int tlk_handle_app_start(te_app_start_args_t *args)
+{
+       status_t err = NO_ERROR;
+       task_t *taskp = current_thread->arch.task;
+       task_restrictions_t task_restrictions;
+       const char *op_info = "start";
+
+       if (!args)
+               return -EINVAL;
+
+       if (!task_allowed_to_load_tasks(taskp)) {
+               task_print_uuid(CRITICAL, "Client tried to perform "
+                               "an operation for which they are "
+                               "not permitted ", taskp);
+               return -EACCES;
+       }
+
+       map_task_app_restrictions(&task_restrictions, &args->app_restrictions);
+
+       if (args->app_reject)
+               op_info = "reject";
+
+       dprintf(INFO, "%s app with handle 0x%x\n", op_info, args->app_handle);
+
+       err = task_run_app(args->app_handle, args->app_reject, &task_restrictions);
+       if (err != NO_ERROR) {
+               /* Do not log when rejecting a task that no longer exists */
+               if (!args->app_reject || (err != ERR_NOT_FOUND))
+                       dprintf(CRITICAL, "Could not %s loaded application (%d)\n",
+                               op_info, err);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * A bit strict: only authorized apps can list which tasks are installed
+ * in the system. However, this is a privileged operation also in GP specs.
+ *
+ * Copy the UUID and TASK_NAME of the indicated task to the arg buf.
+ * If the task has no defined name the app_name field is zeroed.
+ */
+static int tlk_list_apps(te_app_list_args_t *args)
+{
+       task_t *taskp   = current_thread->arch.task;
+       task_t *task    = NULL;
+
+       if (!args)
+               return -EINVAL;
+
+       if (!task_allowed_to_load_tasks(taskp)) {
+               task_print_uuid(CRITICAL, "Client tried to perform "
+                               "an operation for which they are "
+                               "not permitted ", taskp);
+               return -EACCES;
+       }
+
+       memset(&args->app_uuid, 0, sizeof(te_service_id_t));
+       memset(&args->app_name[0], 0, sizeof (args->app_name));
+
+       /*
+        * Otherwise copy the UUID of the specified task to the
+        * IOCTL argument.
+        */
+       task = task_find_task_by_index(args->app_index);
+       if (!task)
+               return -ENOENT;
+
+       memcpy(&args->app_uuid, &task->props.uuid,
+              sizeof(te_service_id_t));
+
+       if (task->task_name[0])
+               memcpy(&args->app_name[0], &task->task_name[0],
+                      sizeof(task->task_name));
+
+       return 0;
+}
+
+static int tlk_task_get_info(te_get_task_info_t *args)
+{
+       task_t *taskp   = current_thread->arch.task;
+       task_t *task    = NULL;
+       task_info_t ti;
+
+       if (!args)
+               return -EINVAL;
+
+       /* Any task is authorized to fetch it's own manifest information (SELF).
+        * Installers are authorized to fetch it from any task.
+        */
+       if (!task_allowed_to_load_tasks(taskp) &&
+           (args->gti_request_type != OTE_GET_TASK_INFO_REQUEST_SELF)) {
+               task_print_uuid(CRITICAL, "Client tried to perform "
+                               "an operation for which they are "
+                               "not permitted ", taskp);
+               return -EACCES;
+       }
+
+       switch (args->gti_request_type) {
+       case OTE_GET_TASK_INFO_REQUEST_INDEX:
+               task = task_find_task_by_index(args->gtiu_index);
+               break;
+       case OTE_GET_TASK_INFO_REQUEST_UUID:
+               task = task_find_task_by_uuid(&args->gtiu_uuid);
+               break;
+       case OTE_GET_TASK_INFO_REQUEST_SELF:
+               task = taskp;
+               break;
+       default:
+               task = NULL;
+               break;
+       }
+
+       if (!task)
+               return -ENOENT;
+
+       memset(&ti, 0, sizeof (ti));
+       task_get_config(&ti, task);
+       map_task_info_to_te(&args->gti_info, &ti);
+
+       return 0;
+}
+
+/*
+ * Called by syscall OTE_IOCTL_TASK_REQUEST ioctl handler
+ */
+int task_request_handler(te_task_request_args_t *args)
+{
+       int retval = 0;
+
+       if (!args) {
+               retval = -EINVAL;
+               goto exit;
+       }
+
+       switch (args->ia_opcode) {
+       case OTE_TASK_OP_MEMORY_REQUEST:
+               retval = tlk_handle_app_memory_request(&args->ia_load_memory_request);
+               break;
+
+       case OTE_TASK_OP_PREPARE:
+               retval = tlk_handle_app_prepare(&args->ia_prepare);
+               break;
+
+       case OTE_TASK_OP_START:
+               retval = tlk_handle_app_start(&args->ia_start);
+               break;
+
+       case OTE_TASK_OP_LIST:
+               retval = tlk_list_apps(&args->ia_list);
+               break;
+
+       case OTE_TASK_OP_GET_TASK_INFO:
+               retval = tlk_task_get_info(&args->ia_get_task_info);
+               break;
+
+#ifdef DEBUG
+       case OTE_TASK_OP_SYSTEM_INFO:
+               task_system_info();
+               break;
+#endif
+
+       default:
+               retval = -EINVAL;
+               break;
+       }
+
+exit:
+       return retval;
+}
index 6b07967..e71bf31 100644 (file)
 
 #define MODE_EL(x)     ((x) << 2)
 
-/* interrupts masked, start in AARCH32 SVC mode */
-#define MON_INIT_EL1_SPSR_AARCH32      ((0x7 << 6) | MODE_SVC)
+/* flag indicating in which mode CPU is returned */
+#define MON_CPU_RETURN_64      0x0
+#define MON_CPU_RETURN_32      0x1
 
-/* interrupts masked, return to BL in AARCH64 EL2 mode */
-#define MON_INIT_EL2_SPSR_AARCH64      ((0x7 << 6) | MODE_EL(2))
+/* SPSR_EL3 register fields/settings */
+#define MON_SPSR_EXC_MASKED    (0x7 << 6)
 
-/* monitor SCR is AARCH64, NS=0 and RES1 fields */
-#define MON_INIT_EL3_SCR       ((0x1 << 10) | 0x3 << 4)
+/* SCR_EL3 register fields/settings */
+#define MON_SCR_NS_MODE                (0x1 << 0)
+#define MON_SCR_RESV1          (0x3 << 4)
+#define MON_SCR_32BIT          (0x0 << 10)
+#define MON_SCR_64BIT          (0x1 << 10)
 
 /* ARM GIC cpu/dist offsets */
 #define ARM_GIC_GICC_CTLR              0x0
index 6f1c8f7..0c03ff1 100644 (file)
 
 #include <arch/defines.h>
 
-/*
- * Initialize banked copy of MAIR0/MAIR1 registers with the memory
- * attributes the kernel will be using:
- *
- * idx0 = strongly-ordered
- * idx1 = outer: writeback/no alloc, inner: writeback/alloc
- *
- * and let the remaining indexes be allocated when a lookup during
- * a page mapping fails to find an existing entry.
- */
-#define MMU_MEMORY_ATTR_INDIR          0x0000EF00
-
-/* indices into attr indirect regs */
-#define MMU_MEMORY_STRONGLY_ORDERED                    0
-#define MMU_MEMORY_WB_OUTER_NO_ALLOC_INNER_ALLOC       1
-
 #define MMU_MEMORY_SET_ATTR_IDX(val)   (((val) & 0x7) << 2)
 
 #define MMU_MEMORY_WRITE_BACK_NO_ALLOCATE      0xE
index b62b944..3bb219b 100644 (file)
 #define __PSCI_H
 
 #define PSCI_FUNC_ID_VERSION           0x84000000
-#define PSCI_FUNC_ID_CPU_SUSPEND       0x84000001
-#define PSCI_FUNC_ID_CPU_ON            0x84000002
-#define PSCI_FUNC_ID_CPU_OFF           0x84000003
-#define PSCI_FUNC_ID_AFFINITY_INFO     0x84000004
-#define PSCI_FUNC_ID_MIGRATE           0x84000005
+#define PSCI_FUNC_ID_CPU_SUSPEND_LEGACY        0x84000001
+#define PSCI_FUNC_ID_CPU_SUSPEND       0xC4000001
+#define PSCI_FUNC_ID_CPU_OFF           0xC4000002
+#define PSCI_FUNC_ID_CPU_ON            0xC4000003
+#define PSCI_FUNC_ID_AFFINITY_INFO     0xC4000004
+#define PSCI_FUNC_ID_MIGRATE           0xC4000005
 
 #define PSCI_RETURN_SUCCESS            (0)
 #define PSCI_RETURN_NOT_SUPPORTED      (-1)
 #define PSCI_RETURN_NOT_PRESENT                (-7)
 #define PSCI_RETURN_DISABLED           (-8)
 
-#ifndef ASSEMBLY
-/* holds arguments / return value during calls */
-struct psci_frame {
-        uint64_t r[8]; /* r0-r7 */
-};
-#endif
-
 #endif
index c7ac404..ff85bca 100644 (file)
@@ -25,6 +25,7 @@
 #include <arch/arm.h>
 #include <arm64/asm.h>
 #include <psci.h>
+#include <arm64/monitor_macros.h>
 
 /* called both for cold reset and boot_secondary */
 FUNCTION(mon_init_cpu)
@@ -33,7 +34,7 @@ FUNCTION(mon_init_cpu)
        b.ne    .               // error, if not EL3
 
        /* initialize SCR to secure state */
-       mov     x3, #MON_INIT_EL3_SCR
+       mov     x3, #(MON_SCR_RESV1 | MON_SCR_64BIT)
        msr     scr_el3, x3
        isb
 
@@ -82,25 +83,39 @@ FUNCTION(mon_init_cpu)
        ret
 
 /*
- * Return to address saved in __mon_cpu_return_addr, which
- * will be the BL during cold reset or address in NS world
- * during PSCI CPU transistions.
+ * Return to address saved in __mon_cpu_return_addr, in
+ * AARCH32 SVC (non-secure) mode.
  */
-FUNCTION(mon_init_return)
-       /* load per-cpu return address */
-       cpuidx  x12
-       adr     x5, __mon_cpu_return_addr
-       ldr     x3, [x5, x12, lsl #3]
-       msr     elr_el3, x3
+FUNCTION(mon_return_aarch32_ns)
+       /* load return address */
+       cpuidx  x1
+       adr     x2, __mon_cpu_return_addr
+       ldr     x2, [x2, x1, lsl #3]
+
+       msr     elr_el3, x2
+       mov     x2, #(MON_SCR_RESV1 | MON_SCR_32BIT | MON_SCR_NS_MODE)
+       msr     scr_el3, x2
+       mov     x2, #(MON_SPSR_EXC_MASKED | MODE_SVC)
+       msr     spsr_el3, x2
 
-       mov     x3, #MON_INIT_EL3_SCR
-       orr     x3, x3, #1              // return NS=1
-       msr     scr_el3, x3
-       isb
+       eret
+
+/*
+ * Return to address saved in __mon_cpu_return_addr, in
+ * AARCH64 EL2 (non-secure) mode.
+ */
+FUNCTION(mon_return_aarch64_ns)
+       /* load return address */
+       cpuidx  x1
+       adr     x2, __mon_cpu_return_addr
+       ldr     x2, [x2, x1, lsl #3]
+
+       msr     elr_el3, x2
+       mov     x2, #(MON_SCR_RESV1 | MON_SCR_64BIT | MON_SCR_NS_MODE)
+       msr     scr_el3, x2
+       mov     x2, #(MON_SPSR_EXC_MASKED | MODE_EL(2))
+       msr     spsr_el3, x2
 
-       /* go back non-secure in EL2 */
-       mov     x3, #MON_INIT_EL2_SPSR_AARCH64
-       msr     spsr_el3, x3
        eret
 
 /*
@@ -118,7 +133,13 @@ FUNCTION(boot_secondary)
 
        cpuidx  x0
        bl      platform_psci_cpu_has_reset
-       b       mon_init_return
+       b       mon_return_aarch64_ns
+
+/* get the CPU ID */
+FUNCTION(mon_get_cpu_id)
+       mrs     x0, midr_el1
+       ubfx    x0, x0, #4, #12
+       ret
 
 .ltorg
 .align 3
@@ -133,3 +154,11 @@ __mon_cpu_return_addr:
        .rept MONCPUS
        .quad 0
        .endr
+
+.ltorg
+.align 3
+.global __mon_cpu_return_mode
+__mon_cpu_return_mode:
+       .rept MONCPUS
+       .quad 0
+       .endr
diff --git a/lib/monitor/arm64/monitor_fastcall.S b/lib/monitor/arm64/monitor_fastcall.S
new file mode 100644 (file)
index 0000000..f76221e
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/arm.h>
+#include <arm64/asm.h>
+#include <lib/monitor/monitor_vector.h>
+
+FUNCTION(monitor_fastcall)
+       str     lr, [sp, #-16]!         // save NS world's LR
+
+       /* load fastcall frame */
+       stp     x6, x7, [sp, #-16]!
+       stp     x4, x5, [sp, #-16]!
+       stp     x2, x3, [sp, #-16]!
+       stp     x0, x1, [sp, #-16]!
+
+       /*
+        * Find index in fastcall_table. If index >= TRUSTED_BASE,
+        * route through TRUSTED_SERVICE (covers OS/APP fastcalls).
+        */
+       lsr     x10, x0, #SMC_OWNER_SHIFT
+       and     x10, x10, #SMC_OWNER_MASK
+       cmp     x10, #SMC_OWNER_TRUSTED_BASE
+       mov     x6, #SMC_OWNER_TRUSTED_SERVICE
+       csel    x10, x6, x10, ge
+
+       /* args for fastcall handler (frame, cpu) */
+       mov     x0, sp
+       cpuidx  x1
+
+       /* call fastcall handler */
+       adr     x9, fastcall_table
+       ldr     x10, [x9, x10, lsl #3]
+       blr     x10
+
+       /* restore fastcall frame (returning results) */
+       ldp     x0, x1, [sp], #16
+       ldp     x2, x3, [sp], #16
+       ldp     x4, x5, [sp], #16
+       ldp     x6, x7, [sp], #16
+
+       ldr     lr, [sp], #16           // restore NS world's LR
+       ldp     x9, x10, [sp], #16      // restore scratch
+       eret
+
+unimp_fastcall:
+       b       .
+
+/* fastcall SMCs issued from non-secure */
+fastcall_table:
+       .quad   unimp_fastcall          /* ARM Arch service */
+       .quad   unimp_fastcall          /* CPU service */
+       .quad   platform_sip_handler    /* SIP service */
+       .quad   unimp_fastcall          /* OEM service */
+       .quad   platform_psci_handler   /* ARM Standard service (currently, only PSCI) */
+       .quad   unimp_fastcall          /* Trusted (OS/TA) service */
index 69bffaf..ccc96ba 100644 (file)
@@ -31,3 +31,37 @@ FUNCTION(mon_atomic_or)
        stxr    w3, w2, [x0]
        cbnz    w3, 1b
        ret
+
+/* void *memset(void *s, int c, size_t n); */
+FUNCTION(memset)
+       cbz     x0, done
+
+       /* dup 8bit x1 to 8 byte value */
+       orr     x1, x1, x1, lsl #8
+       orr     x1, x1, x1, lsl #16
+       orr     x1, x1, x1, lsl #32
+
+       mov     x7, #0xF
+       mov     x8, x0
+
+       /* first write 16 byte chunks */
+       bics    xzr, x2, x7
+       b.eq    less_than_16            // memset is < 16 bytes
+
+do_16_bytes:
+       stp     x1, x1, [x8], #16
+       sub     x2, x2, #16
+
+       bics    xzr, x2, x7
+       b.ne    do_16_bytes
+
+less_than_16:
+       cbz     x2, done                // ended on a 16 byte boundary
+
+do_single_byte:
+       /* write trailing bytes */
+       strb    w1, [x8], #1
+       sub     x2, x2, 1
+       cbnz    x2, do_single_byte
+done:
+       ret
index 46b702d..50b166a 100644 (file)
 /* indices into attr indirect regs */
 #define MMU_MEMORY_STRONGLY_ORDERED                     0
 #define MMU_MEMORY_WB_OUTER_NO_ALLOC_INNER_ALLOC        1
+#define MMU_MEMORY_UC_OUTER_UC_INNER                    2
 
 /* mmio (index 0) */
 #define MMU_PTE_L2_BLOCK_MMIO_FLAGS    \
        (MMU_MEMORY_SET_ATTR_IDX(0) | MMU_MEMORY_ACCESS_FLAG |  \
         MMU_MEMORY_AP_P_RW_U_NA | 0x1)
 
-/* mem (index 1) */
-#define MMU_PTE_L2_BLOCK_MEM_FLAGS     \
+/* mem wb (index 1) */
+#define MMU_PTE_L2_BLOCK_MEM_WB_FLAGS  \
        (MMU_MEMORY_SET_ATTR_IDX(1) | MMU_MEMORY_ACCESS_FLAG |  \
+        MMU_MEMORY_SH_INNER_SHAREABLE | \
+        MMU_MEMORY_AP_P_RW_U_NA | 0x1)
+
+/* mem uc (index 2) */
+#define MMU_PTE_L2_BLOCK_MEM_UC_FLAGS  \
+       (MMU_MEMORY_SET_ATTR_IDX(2) | MMU_MEMORY_ACCESS_FLAG |  \
         MMU_MEMORY_AP_P_RW_U_NA | 0x1)
 
 /* value for MAIR register:
  *     idx0 = strongly-ordered,
  *     idx1 = outer: writeback/no alloc, inner: writeback/alloc
+ *     idx2 = outer: non-cacheable, inner: non-cacheable
  */
-#define MMU_MEMORY_ATTR_INDIR  0x0000EF00
+#define MMU_MEMORY_ATTR_INDIR  0x0044EF00
 
 #define MMU_TCR_FLAGS_EL3       \
        (MMU_MEMORY_TCR_PS_40BIT | \
        dsb     sy
 .endm
 
+/* corrupts \vaddr, \pgt, \paddr, \length \tmp */
+.macro mmu_unmap_virt, vaddr, length, pgt, tmp
+       lsr     \vaddr, \vaddr, #MMU_L2_BLOCK_SHIFT
+       add     \pgt, \pgt, \vaddr, lsl #MMU_ENTRY_SHIFT
+       mov     \tmp, #(1 << MMU_L2_BLOCK_SHIFT)
+1:
+       /* write entry and update */
+       str     xzr, [\pgt], #(1 << MMU_ENTRY_SHIFT)
+       sub     \length, \length, \tmp
+       cbnz    \length, 1b
+       dsb     sy
+.endm
+
+.macro mmu_roundup, addr, align
+       add     \addr, \addr, \align
+       bic     \addr, \addr, \align
+.endm
+
+.macro mmu_rounddown, addr, align
+       bic     \addr, \addr, \align
+.endm
+
+.macro mmu_map_align, vaddr, paddr, length, eaddr, align
+       mov     \align, #(MMU_L2_BLOCK_SIZE - 1)
+       add     \eaddr, \length, \vaddr
+
+       /* return aligned vaddr, paddr and length */
+       mmu_roundup \eaddr, \align
+       mmu_rounddown \vaddr, \align
+       mmu_rounddown \paddr, \align
+       sub     \length, \eaddr, \vaddr
+.endm
+
+.macro mmu_unmap_align, vaddr, length, eaddr, align
+       mov     \align, #(MMU_L2_BLOCK_SIZE - 1)
+       add     \eaddr, \length, \vaddr
+
+       /* return aligned vaddr, paddr and length */
+       mmu_roundup \eaddr, \align
+       mmu_rounddown \vaddr, \align
+       sub     \length, \eaddr, \vaddr
+.endm
+
 /* int mon_mmu_map_mmio(vaddr_t vaddr, paddr_t paddr, uint64_t length) */
 FUNCTION(mon_mmu_map_mmio)
-       mov     x9, #(MMU_L2_BLOCK_SIZE - 1)
-       mov     x10, x0         // save x0
-
-       /* check vaddr/paddr/length are BLOCK size aligned */
-       tst     x0, x9
-       cneg    x0, xzr, ne
-       cbnz    x0, done
-       tst     x1, x9
-       cneg    x0, xzr, ne
-       cbnz    x0, done
-       tst     x2, x9
-       cneg    x0, xzr, ne
-       cbnz    x0, done
-
-       mov     x0, x10         // restore x0
+       /* align vaddr/paddr/length to BLOCK size params */
+       mmu_map_align x0, x1, x2, x9, x10
 
        /* phys address of mon_second_level */
        adr     x3, mon_second_level    // virt addr
@@ -126,12 +164,52 @@ FUNCTION(mon_mmu_map_mmio)
        mmu_map_virt_phys x0, x1, x2, x3, x4, x5, x6
        dsb     sy
        isb
-       tlbi    vmalle1is
+       tlbi    alle3
        dsb     sy
        isb
 
        mov     x0, xzr
-done:
+       ret
+
+/* int mon_mmu_map_uncached(vaddr_t vaddr, paddr_t paddr, uint64_t length) */
+FUNCTION(mon_mmu_map_uncached)
+       /* align vaddr/paddr/length to BLOCK size params */
+       mmu_map_align x0, x1, x2, x9, x10
+
+       /* phys address of mon_second_level */
+       adr     x3, mon_second_level    // virt addr
+       ldr     x4, __mon_phys_offset
+       sub     x3, x3, x4              // phys addr
+       ldr     x4, =MMU_PTE_L2_BLOCK_MEM_UC_FLAGS
+
+       /* create mapping (x5, x6 are scratch) */
+       mmu_map_virt_phys x0, x1, x2, x3, x4, x5, x6
+       dsb     sy
+       isb
+       tlbi    alle3
+       dsb     sy
+       isb
+
+       mov     x0, xzr
+       ret
+
+/* int mon_mmu_unmap(vaddr_t vaddr, uint64_t length) */
+FUNCTION(mon_mmu_unmap)
+       /* align vaddr/length to BLOCK size params */
+       mmu_unmap_align x0, x1, x9, x10
+
+       /* phys address of mon_second_level */
+       adr     x3, mon_second_level    // virt addr
+       ldr     x4, __mon_phys_offset
+       sub     x3, x3, x4              // phys addr
+
+       /* unmap vaddr for length (x4 is scratch) */
+       mmu_unmap_virt x0, x1, x3, x4
+       dsb     sy
+       isb
+       tlbi    alle3
+       dsb     sy
+       isb
        ret
 
 /* uint64_t mon_virt_phys_el3(uint64_t vaddr) */
@@ -157,7 +235,7 @@ spin_wait:
        adr     x4, mon_first_level     // phys addr
        msr     ttbr0_el3, x4
 
-       tlbi    vmalle1is
+       tlbi    alle3
        dsb     sy
        isb
 
@@ -192,13 +270,13 @@ FUNCTION(mon_setup_pagetable)
 
        /* map MONBASE -> carveout in mon_second_level */
        ldr     x4, =MONBASE            // virt
-       ldr     x5, =MMU_PTE_L2_BLOCK_MEM_FLAGS
+       ldr     x5, =MMU_PTE_L2_BLOCK_MEM_WB_FLAGS
        mov     x10, x0                 // phys (carveout base)
        mov     x12, x2                 // size
        mov     x13, x3                 // phys pgt
        mmu_map_virt_phys x4, x10, x12, x13, x5, x6, x7
 
-       /* indentity map carveout in mon_second_level */
+       /* identity map carveout in mon_second_level */
        mov     x4, x0                  // virt
        mov     x10, x0                 // phys (carveout base)
        mov     x12, x2                 // size
index bd99267..4f02d20 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm.h>
 #include <arch/arm.h>
 #include <arm64/asm.h>
+#include <arm64/monitor_macros.h>
 
 /* vector entry points */
 .macro  vec_entry, label
@@ -84,8 +85,8 @@ FUNCTION(_vector_el3)
 
 /* void mon_clear_bss(void) */
 FUNCTION(mon_clear_bss)
-       ldr     x0, =__bss_start
-       ldr     x1, =__bss_end
+       adr     x0, __bss_start
+       adr     x1, __bss_end
        subs    x1, x1, x0              /* size in bytes */
        b.eq    2f
 1:
@@ -101,6 +102,14 @@ FUNCTION(mon_clear_bss)
  * return).
  */
 FUNCTION(mon_start_tlk_el1)
+        /*
+        * Save EL1 entry state before secureos init (which modifies
+        * the same EL1 arch state), so on return to the BL, it can
+        * be restored.
+        */
+       adr     x3, el1_non_secure_context
+       mon_save_el1_regs x3
+
        /* entry into EL1 is at _end of monitor binary */
        ldr     x6, __mon_phys_offset
        adr     x4, _end
@@ -120,7 +129,7 @@ FUNCTION(mon_start_tlk_el1)
 
        mon_scr_secure_32 x3
 
-       ldr     x3, =MON_INIT_EL1_SPSR_AARCH32
+       mov     x3, #(MON_SPSR_EXC_MASKED | MODE_SVC)
        msr     spsr_el3, x3
        msr     elr_el3, x5
        eret
@@ -167,6 +176,9 @@ _reset:
        adr     x3, __mon_bootarg_addr
        str     x1, [x3]
 
+       /* clear early while serialized */
+       bl      mon_clear_bss
+
        bl      mon_init_cpu
 
        /* setup MMU pagetables, (args: base, off, size) */
@@ -182,8 +194,6 @@ _reset:
        msr     vbar_el3, x0
        isb
 
-       bl      mon_clear_bss
-
        cpuidx  x0
        bl      platform_psci_init
        b       mon_start_tlk_el1
index 9da39ef..3df92c7 100644 (file)
@@ -20,7 +20,6 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
-#include <config.h>
 #include <asm.h>
 #include <arm64/asm.h>
 #include <arm64/monitor_macros.h>
 #define        EC_SMC_AARCH32          0x13
 #define        EC_SMC_AARCH64          0x17
 
+.macro mon_handle_aarch_switch, scr, scr2
+       ldr     \scr, =SMC_SIP_AARCH_SWITCH
+       cmp     \scr, x0
+       b.ne    1f      // continue, not SMC_SIP_AARCH_SWITCH
+
+       /* save address/mode and use during return */
+       cpuidx  \scr
+       adr     \scr2, __mon_cpu_return_addr
+       str     x1, [\scr2, \scr, lsl #3]
+       adr     \scr2, __mon_cpu_return_mode
+       str     x2, [\scr2, \scr, lsl #3]
+
+       ldp     \scr, \scr2, [sp], #16  // restore scratch
+       mov     x0, xzr                 // return success
+
+       cbz     x2, mon_return_aarch64_ns
+       b       mon_return_aarch32_ns
+1:
+.endm
+
 /*
  * Occurs from either non-secure/secure EL1.
  *
@@ -55,13 +74,27 @@ FUNCTION(handle_exc_aarch32)
        cmp     x9, #EC_SMC_AARCH32
        b.ne    .               // fail
 
-       /* incoming SMC should be from secure world */
+       /* check for 32bit Trusted OS (TOS) calls */
+       and     x9, x0, #(SMC_OWNER_MASK << SMC_OWNER_SHIFT)
+       ldr     x10, =SMC_TOS_CALL
+       cmp     x9, x10
+       b.eq    handle_trusted_os_call
+
+       /* service NS SIP call to switch aarch */
+       mon_handle_aarch_switch x9, x10
+       b       .               // unrecognized aarch32 SMC
+
+handle_trusted_os_call:
+       /* SMC expected from secureos in EL1(S) */
        mrs     x9, scr_el3
-       tst     x9, #1
-       b.ne    .               // fail
+       tst     x9, #MON_SCR_NS_MODE
+       b.ne    .               // not in secure mode
+
+       and     x0, x0, #SMC_TOS_FUNC_ID_MASK
+       cmp     x0, #SMC_TOS_MAX_FUNC_IDX
+       b.gt    .               // too large an index
 
        /* call function at tos_table[idx] */
-       and     w0, w0, #SMC_TOS_FUNC_ID_MASK
        adr     x9, tos_table
        ldr     x10, [x9, x0, lsl #3]
        br      x10
@@ -94,8 +127,12 @@ tos_initial_ns_return:
        adr     x3, el1_secure_context
        mon_save_el1_regs x3
 
+       /* restore NS EL1 state (from monitor entry point) */
+       adr     x3, el1_non_secure_context
+       mon_restore_el1_regs x3
+
        bl      platform_psci_coldboot_epilog
-       b       mon_init_return
+       b       mon_return_aarch64_ns
 
 tos_init_shared_addr:
        ldp     x9, x10, [sp], #16      // restore scratch
@@ -213,41 +250,17 @@ secure_el1_smc64:
        /*
         * SMCs from 64bit non-secure world.
         *
-        * This is the path for PSCI calls and Trusted OS SMCs interfacing
-        * with secure TAs. For PSCI, they would be serviced entirely within
-        * the monitor (none are currently being used). Trusted OS SMCs
-        * need to transition to the secureos in EL1 for servicing.
+        * This is the path for both monitor fastcalls (i.e. those serviced
+        * entirely within the monitor) and Trusted OS SMCs interfacing with
+        * secure TAs (which call the secureos in EL1 for handling).
         */
 non_secure_el1_smc64:
-       /*
-        * Currently, only handle ARM STD (pcsi) fastcalls, all
-        * others take the standard call code path.
-        */
-       tst     x0, #SMC_FASTCALL
-       b.eq    non_secure_stdcall
-       tst     x0, #(SMC_OWNER_ARM_STD << SMC_OWNER_SHIFT)
-       b.eq    non_secure_stdcall
+       /* service NS SIP call to switch aarch */
+       mon_handle_aarch_switch x9, x10
 
-       str     lr, [sp, #-8]!          // save lr
-
-       /*
-        * Find index in fastcall_table. If index >= TRUSTED_BASE,
-        * route through TRUSTED_SERVICE (covers OS/APP fastcalls).
-        */
-       lsr     x10, x0, #SMC_OWNER_SHIFT
-       and     x10, x10, #SMC_OWNER_MASK
-       cmp     x10, #SMC_OWNER_TRUSTED_BASE
-       mov     x9, #SMC_OWNER_TRUSTED_SERVICE
-       csel    x10, x9, x10, ge
-
-       /* call fastcall handler */
-       adr     x9, fastcall_table
-       ldr     x10, [x9, x10, lsl #3]
-       blr     x10
-
-       ldr     lr, [sp], #8            // restore lr
-       ldp     x9, x10, [sp], #16      // restore scratch
-       eret
+       /* handle fastcall SMCs */
+       tst     x0, #SMC_FASTCALL
+       b.ne    monitor_fastcall
 
 non_secure_stdcall:
        /* save incoming SMC args from registers */
@@ -269,8 +282,6 @@ non_secure_stdcall:
         */
        ldr     w3, =0x32000004         // SMC_TOS_NS_IRQ_PENDING_VECTOR
        cmp     w0, w3
-       ldr     w3, =0x32000008         // SMC_TOS_SS_REGISTER_HANDLER
-       ccmp    w0, w3, #0x4, ne
        b.ne    call_secureos
 
        mov     x0, #0
@@ -289,29 +300,18 @@ call_secureos:
        mon_scr_secure_32 x3
        eret
 
-unimp_fastcall:
-       b       .
-
 .align 3
 el1_secure_context:
        .rept   NUM_CTX_REGS
        .quad   0
        .endr
 
+.global el1_non_secure_context
 el1_non_secure_context:
        .rept   NUM_CTX_REGS
        .quad   0
        .endr
 
-/* fastcall SMCs issued from non-secure */
-fastcall_table:
-       .quad   unimp_fastcall          /* ARM Arch service */
-       .quad   unimp_fastcall          /* CPU service */
-       .quad   unimp_fastcall          /* SIP service */
-       .quad   unimp_fastcall          /* OEM service */
-       .quad   arm_std_fastcall        /* ARM Standard service */
-       .quad   unimp_fastcall          /* Trusted (OS/TA) service */
-
 /* SMCs issued from the Trusted OS */
 tos_table:
        .quad   -1
index d2f917e..2e2aa45 100644 (file)
@@ -29,7 +29,7 @@ MODULE_SRCS += \
        $(LOCAL_DIR)/arm64/monitor_start.S \
        $(LOCAL_DIR)/arm64/monitor_vector.S \
        $(LOCAL_DIR)/arm64/monitor_cpu.S \
-       $(LOCAL_DIR)/arm64/monitor_psci.S \
+       $(LOCAL_DIR)/arm64/monitor_fastcall.S \
        $(LOCAL_DIR)/arm64/monitor_lib.S \
        $(LOCAL_DIR)/arm64/monitor_mmu.S
 
index 2c76f39..aad8fd0 100644 (file)
@@ -54,8 +54,3 @@ __WEAK bool platform_syscall_handler(void *arg)
 {
        return false;
 }
-
-__WEAK bool platform_intr_ready(void)
-{
-       return true;
-}
index ef13a8c..6bd1d6b 100644 (file)
@@ -59,11 +59,10 @@ struct int_handler_struct {
        void *arg;
 };
 
-/* location in NS to have IRQ serviced */
-unsigned long _jump_to_ns_irq_addr = 0;
-
+#if DEBUG
 /* storage containing state at time of last interrupt */
 static struct arm_iframe arm_iframe_last;
+#endif
 
 static struct int_handler_struct int_handler_table[NR_IRQS];
 static uint32_t tegra_gic_cpu_base = TEGRA_ARM_INT_CPU_BASE;
@@ -78,28 +77,6 @@ static uint32_t ictlr_reg_base[] = {
 
 #define vectorToController(vector) (((vector) - 32) / 32)
 
-static bool platform_ns_handler_set;
-
-bool platform_intr_ready(void)
-{
-       /* return if normal world handler is setup */
-       return platform_ns_handler_set;
-}
-
-inline void platform_set_intr_ready_state(bool ready, struct tz_monitor_frame *frame)
-{
-       /* update state of normal world handler */
-       platform_ns_handler_set = ready;
-#if NOT_YET
-       /* enable intrs, if not in a critical section (and enabled in NS world) */
-       if (ready) {
-               if (!in_critical_section() && !(frame->spsr & 0x80)) {
-                       arch_enable_ints();
-               }
-       }
-#endif
-}
-
 status_t mask_interrupt(unsigned int vector)
 {
        uint32_t base = ictlr_reg_base[vectorToController(vector)];
@@ -136,11 +113,16 @@ status_t unmask_interrupt(unsigned int vector)
 enum handler_return platform_irq(struct arm_iframe *iframe)
 {
        enum handler_return ret = INT_NO_RESCHEDULE;
-       struct tz_monitor_frame tzframe;
        uint32_t vector;
+       struct tz_monitor_frame *smc_frame = NULL, frame;
 
-       /* save interrupt state */
+#if DEBUG
+       /*
+        * debug aid to know where within the secure world the last interrupt
+        * occurred
+        */
        memcpy((void *)&arm_iframe_last, iframe, sizeof(arm_iframe_last));
+#endif
 
        /* read GIC interrupt ack register */
        vector = *REG32(tegra_gic_cpu_base + GIC_CPU_ICCIAR) & 0x3FF;
@@ -153,15 +135,21 @@ enum handler_return platform_irq(struct arm_iframe *iframe)
                ASSERT(vector == 1022);
        }
 
-       /* create a frame to use to return to the NS world */
-       memset(&tzframe, 0, sizeof(tzframe));
+#if defined(WITH_MONITOR_BIN)
+       memset(&frame, 0, sizeof(struct tz_monitor_frame));
+       frame.r[0] = SMC_ERR_PREEMPT_BY_IRQ;
 
-       ASSERT(_jump_to_ns_irq_addr);
-       tzframe.pc = _jump_to_ns_irq_addr;
-       tzframe.spsr = MODE_SVC;
+       smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, &frame);
+       while (smc_frame->r[0] != SMC_TOS_RESTART) {
+               frame.r[0] = SMC_ERR_PREEMPT_BY_IRQ;
+               smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, &frame);
+       }
+#else
+       smc_frame = tz_switch_to_ns(SMC_TOS_PREEMPT_BY_IRQ, NULL);
+       while (smc_frame)
+               smc_frame = tz_switch_to_ns(SMC_TOS_PREEMPT_BY_IRQ, NULL);
+#endif
 
-       /* service in the NS world */
-       platform_handle_tz_smc(SMC_TOS_PREEMPT_BY_IRQ, &tzframe);
        return ret;
 }
 
index 5ac8616..a0910b6 100644 (file)
 #define CLK_OUT_ENB_TZRAM_SHIFT                30
 #define CLK_OUT_ENB_TZRAM_ENABLE       1
 
-#define SZ_1MB 0x00100000
-
-#define MC_VIDEO_PROTECT_BOM           0x648
-#define MC_VIDEO_PROTECT_SIZE_MB       0x64c
-
-status_t platform_program_vpr(uint32_t vpr_base, uint32_t vpr_size)
-{
-       static uint32_t save_vpr_base=0, save_vpr_size=0;
-       uint32_t save_vpr_end, vpr_end;
-
-       if (!save_vpr_base && !save_vpr_size) {
-               save_vpr_base = *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_BOM);
-               save_vpr_size = *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_SIZE_MB);
-
-               /* apply nvtboot WAR as initial carveout may include HIVEC_BASE */
-               if ((uint64_t)save_vpr_base + (save_vpr_size * SZ_1MB) > NV_ARM_CORE_HIVEC_BASE) {
-                       save_vpr_end = ROUNDDOWN(NV_ARM_CORE_HIVEC_BASE, SZ_1MB);
-                       save_vpr_size = (save_vpr_end - save_vpr_base) / SZ_1MB;
-               }
-       }
-
-       /* check if vpr_size and vpr_base are MB align */
-       if ((vpr_size & (SZ_1MB-1)) || (vpr_base & (SZ_1MB-1))  ) {
-               dprintf(CRITICAL, "%s: vpr base/size not MB align\n", __func__);
-               return ERR_GENERIC;
-       }
-
-       vpr_end = vpr_base + vpr_size;
-       vpr_size = vpr_size / SZ_1MB;
-       save_vpr_end = save_vpr_base + (save_vpr_size * SZ_1MB);
-
-       if (vpr_base > save_vpr_end || save_vpr_base > vpr_end) {
-               memset((void*)save_vpr_base, 0, (save_vpr_size * SZ_1MB)); //non-overlap
-       } else {
-               if (save_vpr_base < vpr_base) {
-                       memset((void*)save_vpr_base, 0, vpr_base-save_vpr_base); //shrink/shift at base
-               }
-               if (save_vpr_end > vpr_end) {
-                       memset((void*)vpr_end, 0, save_vpr_end-vpr_end); //shrink at end
-               }
-       }
-
-       *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_BOM) = vpr_base;
-       *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_SIZE_MB) = vpr_size;
-       save_vpr_base = vpr_base;
-       save_vpr_size = vpr_size;
-
-       return NO_ERROR;
-}
-
 void platform_setup_keys(void)
 {
        boot_params *boot_params_ptr;
index f8b0f1d..f73cb23 100644 (file)
@@ -58,7 +58,6 @@ extern uint32_t __jumpback_addr;
 
 uint32_t debug_uart_id = DEFAULT_DEBUG_PORT;
 
-static uint32_t _jump_to_ns_ss_op;
 static te_ss_op_t *ss_op_shmem;
 static te_ss_op_t *ns_ss_op_shmem;
 
@@ -78,7 +77,7 @@ void platform_early_init(void)
 
 void platform_idle(void)
 {
-       struct tz_monitor_frame frame;
+       struct tz_monitor_frame frame, *smc_frame;
 #if ARM_CPU_CORTEX_A9
        uint32_t val;
 #endif
@@ -88,9 +87,9 @@ void platform_idle(void)
        platform_secure_dram_aperture();
 #endif
 
+#if !defined(WITH_MONITOR_BIN)
        memset(&frame, 0, sizeof(frame));
 
-#if !defined(WITH_MONITOR_BIN)
        ASSERT(__jumpback_addr);
        frame.pc = __jumpback_addr;
        frame.spsr = __save_boot_cpsr;  /* interrupts disabled, in svc */
@@ -110,7 +109,11 @@ void platform_idle(void)
 
        dputs(CRITICAL, "TLK initialization complete. Jumping to non-secure world\n");
 
-       platform_handle_tz_smc(SMC_TOS_INITIAL_NS_RETURN, &frame);
+       smc_frame = tz_switch_to_ns(SMC_TOS_INITIAL_NS_RETURN, &frame);
+       while (smc_frame) {
+               tz_stdcall_handler(smc_frame);
+               smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, smc_frame);
+       }
 }
 
 void platform_init(void)
@@ -196,26 +199,12 @@ uint32_t platform_get_time_us(void)
        return *(volatile uint32_t *)(TEGRA_TMRUS_BASE);
 }
 
-void platform_handle_tz_smc(uint32_t smc_type, struct tz_monitor_frame *ns_frame)
-{
-       struct tz_monitor_frame *smc_frame;
-
-       smc_frame = tz_switch_to_ns(smc_type, ns_frame);
-       while (smc_frame) {
-               tz_handle_monitor(smc_frame);
-               smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, ns_frame);
-       }
-}
-
 status_t platform_ss_register_handler(struct tz_monitor_frame *frame)
 {
-       /* r[1] -> address to ns fs callback function */
-       _jump_to_ns_ss_op = frame->r[1];
-
-       /* r[2] -> address of shared fs operation buffer */
-       ns_ss_op_shmem = (te_ss_op_t *)(uintptr_t)frame->r[2];
+       /* r[1] -> address of shared fs operation buffer */
+       ns_ss_op_shmem = (te_ss_op_t *)(uintptr_t)frame->r[1];
 
-       ss_op_shmem = (te_ss_op_t *)tz_map_shared_mem((addr_t)ns_ss_op_shmem,
+       ss_op_shmem = (te_ss_op_t *)tz_map_shared_mem((nsaddr_t)frame->r[1],
                                                sizeof(*ss_op_shmem));
        if (!ss_op_shmem)
                return ERR_GENERIC;
@@ -252,9 +241,23 @@ status_t set_log_phy_addr(nsaddr_t _ns_cb_struct_addr)
        return NO_ERROR;
 }
 
+static int validate_rpmb_frame_arg(te_storage_param_t *arg)
+{
+       /* validate frame length */
+       if (arg->mem.len != RPMB_FRAME_SIZE)
+               return -EINVAL;
+
+       /* validate frame memory range */
+       if (!task_valid_address((vaddr_t)arg->mem.base, arg->mem.len))
+               return -EFAULT;
+
+       return 0;
+}
+
 int platform_ss_request_handler(te_storage_request_t *req)
 {
-       struct tz_monitor_frame frame;
+       struct tz_monitor_frame frame, *smc_frame;
+       int result;
 
        ss_op_shmem->type = req->type;
        ss_op_shmem->result = 0;
@@ -263,47 +266,44 @@ int platform_ss_request_handler(te_storage_request_t *req)
 
        switch (ss_op_shmem->type) {
        case OTE_FILE_REQ_TYPE_CREATE:
-               /* arg #0: dirname buffer */
+               /* input args: dirname, filename, flags */
                strncpy(ss_op_shmem->params.f_create.dname,
                        req->args[0].mem.base,
                        sizeof(ss_op_shmem->params.f_create.dname));
-               /* arg #1: filename buffer */
                strncpy(ss_op_shmem->params.f_create.fname,
                        req->args[1].mem.base,
                        sizeof(ss_op_shmem->params.f_create.fname));
-               /* arg #2: flags */
                ss_op_shmem->params.f_create.flags = req->args[2].val.a;
 
-               ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_create);
+               ss_op_shmem->params_size =
+                       sizeof(ss_op_shmem->params.f_create);
                break;
        case OTE_FILE_REQ_TYPE_DELETE:
-               /* arg #0: dirname buffer */
+               /* input args: dirname, filename */
                strncpy((char *)ss_op_shmem->params.f_delete.dname,
                        req->args[0].mem.base,
                        sizeof(ss_op_shmem->params.f_delete.dname));
-               /* arg #1: filename buffer */
                strncpy((char *)ss_op_shmem->params.f_delete.fname,
                        req->args[1].mem.base,
                        sizeof(ss_op_shmem->params.f_delete.fname));
 
-               ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_delete);
+               ss_op_shmem->params_size =
+                       sizeof(ss_op_shmem->params.f_delete);
                break;
        case OTE_FILE_REQ_TYPE_OPEN:
-               /* arg #0: dirname buffer */
+               /* input args: dirname, filename, flags */
                strncpy((char *)ss_op_shmem->params.f_open.dname,
                        req->args[0].mem.base,
                        sizeof(ss_op_shmem->params.f_open.dname));
-               /* arg #1: filename buffer */
                strncpy((char *)ss_op_shmem->params.f_open.fname,
                        req->args[1].mem.base,
                        sizeof(ss_op_shmem->params.f_open.fname));
-               /* arg #2: open flags */
                ss_op_shmem->params.f_open.flags = req->args[2].val.a;
 
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_open);
                break;
        case OTE_FILE_REQ_TYPE_CLOSE:
-               /* arg #0: file handle */
+               /* input args: file handle */
                ss_op_shmem->params.f_close.handle = req->args[0].val.a;
 
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_close);
@@ -315,9 +315,14 @@ int platform_ss_request_handler(te_storage_request_t *req)
                        return -EFAULT;
                }
 
-               /* arg #0: file handle */
+               /* validate read buffer size */
+               if (req->args[1].mem.len >
+                       sizeof(ss_op_shmem->params.f_read.data)) {
+                       return -EINVAL;
+               }
+
+               /* input args: file_handle, read buffer */
                ss_op_shmem->params.f_close.handle = req->args[0].val.a;
-               /* arg #1: read buffer attributes */
                ss_op_shmem->params.f_read.data_size = req->args[1].mem.len;
 
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_read);
@@ -329,12 +334,15 @@ int platform_ss_request_handler(te_storage_request_t *req)
                        return -EFAULT;
                }
 
-               /* arg #0: file handle */
+               /* validate write buffer size */
+               if (req->args[1].mem.len >
+                       sizeof(ss_op_shmem->params.f_write.data)) {
+                       return -EINVAL;
+               }
+
+               /* input args: file_handle, write buffer */
                ss_op_shmem->params.f_write.handle = req->args[0].val.a;
-               /* arg #1: write buffer attributes */
-               ss_op_shmem->params.f_write.data_size =
-                       MIN(req->args[1].mem.len,
-                               sizeof(ss_op_shmem->params.f_write.data));
+               ss_op_shmem->params.f_write.data_size = req->args[1].mem.len;
                memcpy((void*)ss_op_shmem->params.f_write.data,
                        req->args[1].mem.base,
                        ss_op_shmem->params.f_write.data_size);
@@ -342,44 +350,107 @@ int platform_ss_request_handler(te_storage_request_t *req)
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_write);
                break;
        case OTE_FILE_REQ_TYPE_GET_SIZE:
-               /* arg #0: file handle */
+               /* input arg: file_handle */
                ss_op_shmem->params.f_getsize.handle = req->args[0].val.a;
 
-               ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_getsize);
+               ss_op_shmem->params_size =
+                       sizeof(ss_op_shmem->params.f_getsize);
                break;
        case OTE_FILE_REQ_TYPE_SEEK:
-               /* arg #0: file handle */
+               /* input args: file_handle, offset, whence */
                ss_op_shmem->params.f_seek.handle = req->args[0].val.a;
-               /* arg #1: offset */
                ss_op_shmem->params.f_seek.offset = req->args[1].val.a;
-               /* arg #2: whence value */
                ss_op_shmem->params.f_seek.whence = req->args[2].val.a;
 
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_seek);
                break;
        case OTE_FILE_REQ_TYPE_TRUNC:
-               /* arg #0: file handle */
+               /* input args: file_handle, length */
                ss_op_shmem->params.f_trunc.handle = req->args[0].val.a;
-               /* arg #0: truncate length */
                ss_op_shmem->params.f_trunc.length = req->args[1].val.a;
 
                ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_trunc);
                break;
+       case OTE_FILE_REQ_TYPE_RPMB_WRITE:
+               /* validate request frame */
+               result = validate_rpmb_frame_arg(&req->args[0]);
+               if (result) {
+                       dprintf(CRITICAL, "%s: write req frame invalid\n",
+                               __func__);
+                       return result;
+               }
+
+               /* validate request-response frame */
+               result = validate_rpmb_frame_arg(&req->args[1]);
+               if (result) {
+                       dprintf(CRITICAL, "%s: write req-resp frame invalid\n",
+                               __func__);
+                       return result;
+               }
+
+               /* validate response frame */
+               result = validate_rpmb_frame_arg(&req->args[2]);
+               if (result) {
+                       dprintf(CRITICAL, "%s: write resp frame invalid\n",
+                               __func__);
+                       return result;
+               }
+
+               /* input args: request frame, request-response frame */
+               memcpy((void*)ss_op_shmem->params.f_rpmb_write.req_frame,
+                       req->args[0].mem.base, req->args[0].mem.len);
+               memcpy((void*)ss_op_shmem->params.f_rpmb_write.req_resp_frame,
+                       req->args[1].mem.base, req->args[1].mem.len);
+
+               ss_op_shmem->params_size =
+                       sizeof(ss_op_shmem->params.f_rpmb_write);
+               break;
+       case OTE_FILE_REQ_TYPE_RPMB_READ:
+               /* validate request frame */
+               result = validate_rpmb_frame_arg(&req->args[0]);
+               if (result) {
+                       dprintf(CRITICAL, "%s: read req frame invalid\n",
+                               __func__);
+                       return result;
+               }
+
+               /* validate response frame */
+               result = validate_rpmb_frame_arg(&req->args[1]);
+               if (result) {
+                       dprintf(CRITICAL, "%s: read resp frame invalid\n",
+                               __func__);
+                       return result;
+               }
+
+               /* input arg: request frame */
+               memcpy((void*)ss_op_shmem->params.f_rpmb_read.req_frame,
+                       req->args[0].mem.base, req->args[0].mem.len);
+
+               ss_op_shmem->params_size =
+                       sizeof(ss_op_shmem->params.f_rpmb_read);
+               break;
        default:
                return -EINVAL;
        }
 
-       /* actual file read/write */
-       memset(&frame, 0, sizeof(struct tz_monitor_frame));
-
-       frame.pc = _jump_to_ns_ss_op;
-       frame.spsr = MODE_SVC;
-
        /* adjust size down to only include required parameters */
-       frame.r[0] = (sizeof(te_ss_op_t) - sizeof(te_ss_req_params_t)) +
-               ss_op_shmem->params_size;
+       ss_op_shmem->req_size = (sizeof(te_ss_op_t) - sizeof(uint32_t) -
+               sizeof(te_ss_req_params_t)) + ss_op_shmem->params_size;
+
+#if defined(WITH_MONITOR_BIN)
+       memset(&frame, 0, sizeof(struct tz_monitor_frame));
+       frame.r[0] = SMC_ERR_PREEMPT_BY_FS;
 
-       platform_handle_tz_smc(SMC_TOS_PREEMPT_BY_FS, &frame);
+       smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, &frame);
+       while (smc_frame->r[0] != SMC_TOS_RESTART) {
+               frame.r[0] = SMC_ERR_PREEMPT_BY_IRQ;
+               smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, &frame);
+       }
+#else
+       smc_frame = tz_switch_to_ns(SMC_TOS_PREEMPT_BY_FS, NULL);
+       while (smc_frame)
+               smc_frame = tz_switch_to_ns(SMC_TOS_PREEMPT_BY_IRQ, NULL);
+#endif
 
        req->result = ss_op_shmem->result;
        if (req->result != 0) {
@@ -391,21 +462,32 @@ int platform_ss_request_handler(te_storage_request_t *req)
        /* move any expected return data into request structure */
        switch (ss_op_shmem->type) {
        case OTE_FILE_REQ_TYPE_OPEN:
-               /* arg #3: return file handle */
+               /* output arg: file_handle */
                req->args[3].val.a = ss_op_shmem->params.f_open.handle;
                break;
        case OTE_FILE_REQ_TYPE_READ:
-               /* arg #1: return amount of data read */
+               /* output args: amount of data read, data */
                req->args[1].mem.len = ss_op_shmem->params.f_read.data_size;
-               /* arg #1: return data read */
                memcpy(req->args[1].mem.base,
                        (void *)ss_op_shmem->params.f_read.data,
                        req->args[1].mem.len);
                break;
        case OTE_FILE_REQ_TYPE_GET_SIZE:
-               /* arg #1: return file size */
+               /* output arg: file size */
                req->args[1].val.a = ss_op_shmem->params.f_getsize.size;
                break;
+       case OTE_FILE_REQ_TYPE_RPMB_WRITE:
+               /* output arg: response frame */
+               memcpy(req->args[2].mem.base,
+                       (void *)ss_op_shmem->params.f_rpmb_write.resp_frame,
+                       req->args[2].mem.len);
+               break;
+       case OTE_FILE_REQ_TYPE_RPMB_READ:
+               /* output arg: response frame */
+               memcpy(req->args[1].mem.base,
+                       (void *)ss_op_shmem->params.f_rpmb_read.resp_frame,
+                       req->args[1].mem.len);
+               break;
        default:
                break;
        }
index c9bdf2f..c164b92 100644 (file)
@@ -79,23 +79,6 @@ unsigned int cpu_power_down_mode = 0;
 /* tracks if we need to load resume handlers into tzram */
 static bool load_tzram_lp1_resume_handler = true;
 
-static void pm_save_monitor_stack(void)
-{
-       /*
-        * Save the (adjusted) value of the monitor stack after the
-        * next frame is popped.
-        */
-       __asm__ volatile (
-               "mrs    r1, cpsr        \n" // save current mode
-               "cps    #0x16           \n" // change to monitor mode
-               "mov    %0, sp          \n" // save current mon sp
-               "add    %0, %0, %1      \n" // account for current frame
-               "msr    cpsr_c, r1      \n" // restore previous mode
-               : "=r" (mon_stack_top)
-               : "I" (sizeof(struct tz_monitor_frame))
-               : "r1" );
-}
-
 static void pm_set_monitor_stack(void)
 {
        void *stack_top_mon;
@@ -262,7 +245,6 @@ static void pm_handle_lp0_suspend_smc(struct tz_monitor_frame *frame)
 
        /* save off current state */
        cpu_save_context();
-       pm_save_monitor_stack();
 
        /* need to reload LP1 handler into tzram before next LP1 suspend */
        load_tzram_lp1_resume_handler = true;
@@ -288,7 +270,6 @@ static void pm_handle_lp1_suspend_smc(struct tz_monitor_frame *frame)
 
        /* save off current state */
        cpu_save_context();
-       pm_save_monitor_stack();
 
        /* save off state needed by LP1 resume handler */
        TZRAM_STORE(TZRAM_CPU_AVOID_CLKM_SWITCH,
@@ -316,7 +297,6 @@ static void pm_handle_lp2_suspend_smc(struct tz_monitor_frame *frame)
 
        /* save off current state */
        cpu_save_context();
-       pm_save_monitor_stack();
 
        /* flush dcache last */
        flush_dcache_all();
index 4115d3d..d3aad2a 100644 (file)
@@ -50,6 +50,8 @@ extern unsigned int ote_logger_enabled;
 
 #if !defined(WITH_MONITOR_BIN)
 extern struct tz_monitor_frame *go_nonsecure(uint32_t smc_type, struct tz_monitor_frame *);
+extern unsigned long mon_fastcall_frame_addr;
+extern unsigned long mon_stdcall_frame_addr;
 #endif
 
 /* location in NS of req/param structs */
@@ -144,10 +146,16 @@ vaddr_t tz_map_shared_mem(nsaddr_t ns_addr, uint32_t size)
 void tz_init(void)
 {
 #if !defined(WITH_MONITOR_BIN)
+       /* allocate space for fast/std call monitor frames */
+       mon_fastcall_frame_addr = (unsigned long)calloc(1, sizeof(uint64_t) * 15);
+       ASSERT(mon_fastcall_frame_addr);
+
+       mon_stdcall_frame_addr = (unsigned long)calloc(1, sizeof(uint64_t) * 15);
+       ASSERT(mon_stdcall_frame_addr);
+
        pm_init();
 #endif
        te_intf_init();
-       platform_set_intr_ready_state(false, NULL);
 }
 
 struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_frame *frame)
@@ -177,8 +185,6 @@ struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_fr
                ns_vfp_hw_context->valid = false;
        }
 
-       exit_critical_section();
-
        arm_set_vfp_fpexc(ns_vfp_hw_context->fpexc);
 
 #if defined(WITH_MONITOR_BIN)
@@ -189,6 +195,8 @@ struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_fr
        incoming_smc = go_nonsecure(smc_type, frame);
 #endif
 
+       exit_critical_section();
+
        /* on entry, save NS fpexc and disable to detect vfp usage */
        ns_vfp_hw_context->fpexc = arm_get_vfp_fpexc();
        arm_set_vfp_fpexc(0x0);
@@ -227,10 +235,6 @@ void tz_handle_system_smc(struct tz_monitor_frame *frame)
        nsaddr_t _ns_cb_struct_addr = NULL;
 
        switch (frame->r[0]) {
-               case SMC_TOS_NS_IRQ_PENDING_VECTOR:
-                       _jump_to_ns_irq_addr = frame->r[1];
-                       platform_set_intr_ready_state(true, frame);
-                       break;
 
                case SMC_TOS_SS_REGISTER_HANDLER:
                        error = platform_ss_register_handler(frame);
@@ -262,11 +266,11 @@ void tz_handle_system_smc(struct tz_monitor_frame *frame)
                        dprintf(CRITICAL, "%s", early_logbuf);
 
                        break;
-
+#if !defined(WITH_MONITOR_BIN)
                case SMC_TOS_PROGRAM_VPR:
-               case SMC_SIP_PROGRAM_VPR:
                        error = platform_program_vpr(frame->r[1], frame->r[2]);
                        break;
+#endif
        }
 
        frame->r[0] = error;
@@ -459,43 +463,51 @@ static void tz_handle_trusted_app_smc(struct tz_monitor_frame *frame)
        frame->r[0] = result;
 }
 
-void tz_handle_monitor(struct tz_monitor_frame *frame)
+void tz_stdcall_handler(struct tz_monitor_frame *frame)
 {
-       dprintf(SPEW,
-               "%s: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
-               __func__, frame->r[0], frame->r[1], frame->r[2], frame->r[3],
-               frame->r[4], frame->r[5], frame->r[6], frame->r[7]);
+       dprintf(SPEW, "%s: 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", __func__,
+               frame->r[0], frame->r[1], frame->r[2], frame->r[3]);
 
-       switch (frame->r[0])
-       {
+       switch (frame->r[0]) {
+       case SMC_TOS_SS_REGISTER_HANDLER:
+       case SMC_TOS_NS_REG_REQPARAM_BUF:
+       case SMC_TOS_INIT_LOGGER:
+       case SMC_TOS_PROGRAM_VPR:
+               tz_handle_system_smc(frame);
+               break;
+
+       case SMC_TA_OPEN_SESSION:
+       case SMC_TA_CLOSE_SESSION:
+       case SMC_TA_LAUNCH_OPERATION:
+               tz_handle_trusted_app_smc(frame);
+               break;
+
+       default:
+               dprintf(CRITICAL, "%s: unhandled function 0x%x\n",
+                       __func__, (uint32_t)frame->r[0]);
+               frame->r[0] = TZ_UNSUPPORTED_PARAM;
+               break;
+       }
+}
+
+void tz_fastcall_handler(struct tz_monitor_frame *frame)
+{
+       switch (frame->r[0]) {
 #if !defined(WITH_MONITOR_BIN)
-               case SMC_SIP_L2_MANAGEMENT:
-               case SMC_SIP_CPU_RESET_VECTOR:
-               case SMC_SIP_CPU_RESET_VECTOR_LEGACY:
-               case SMC_SIP_DEVICE_SUSPEND:
-                       pm_handle_platform_smc(frame);
-                       break;
+       case SMC_SIP_L2_MANAGEMENT:
+       case SMC_SIP_CPU_RESET_VECTOR:
+       case SMC_SIP_CPU_RESET_VECTOR_LEGACY:
+       case SMC_SIP_DEVICE_SUSPEND:
+               pm_handle_platform_smc(frame);
+               break;
+
+       case SMC_SIP_PROGRAM_VPR:
+               frame->r[0] = platform_program_vpr(frame->r[1], frame->r[2]);
+               break;
 #endif
 
-               case SMC_TOS_NS_IRQ_PENDING_VECTOR:
-               case SMC_TOS_SS_REGISTER_HANDLER:
-               case SMC_TOS_NS_REG_REQPARAM_BUF:
-               case SMC_TOS_INIT_LOGGER:
-               case SMC_TOS_PROGRAM_VPR:
-               case SMC_SIP_PROGRAM_VPR:
-                       tz_handle_system_smc(frame);
-                       break;
-
-               case SMC_TA_OPEN_SESSION:
-               case SMC_TA_CLOSE_SESSION:
-               case SMC_TA_LAUNCH_OPERATION:
-                       tz_handle_trusted_app_smc(frame);
-                       break;
-
-               default:
-                       dprintf(CRITICAL, "%s: unhandled function 0x%x\n",
-                               __func__, (uint32_t)frame->r[0]);
-                       frame->r[0] = TZ_UNSUPPORTED_PARAM;
-                       break;
+       default:
+               frame->r[0] = TZ_UNSUPPORTED_PARAM;
+               break;
        }
 }
index 6dc48df..b8b2048 100644 (file)
@@ -42,7 +42,8 @@ typedef enum {
        SMC_TOS_NS_RETURN_FROM_IRQ      = 0x32000005,
        SMC_TOS_FS_OP_DONE              = 0x32000006,
        SMC_TOS_INIT_LOGGER             = 0x32000007,
-       SMC_TOS_SS_REGISTER_HANDLER     = 0x32000008,
+       SMC_TOS_SS_REQ_COMPLETE         = 0x32000009,
+       SMC_TOS_SS_REGISTER_HANDLER     = 0x32000010,
 
        /* Trusted Application Calls */
        SMC_TA_OPEN_SESSION             = 0x30000001,
@@ -72,7 +73,6 @@ void platform_init_cpu(void);
 void platform_config_interrupts(void);
 void platform_disable_debug_intf(void);
 void platform_enable_debug_intf(void);
-void platform_handle_tz_smc(uint32_t smc_type, struct tz_monitor_frame *ns_frame);
 void platform_set_intr_ready_state(bool, struct tz_monitor_frame *frame);
 status_t platform_ss_register_handler(struct tz_monitor_frame *frame);
 int platform_ss_request_handler(te_storage_request_t *req);
@@ -83,7 +83,8 @@ void platform_secure_dram_aperture(void);
 
 void tz_init(void);
 struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_frame *frame);
-void tz_handle_monitor(struct tz_monitor_frame *frame);
+void tz_stdcall_handler(struct tz_monitor_frame *frame);
+void tz_fastcall_handler(struct tz_monitor_frame *frame);
 void tz_handle_smc_l2(unsigned int smc);
 void tz_handle_smc_deep_sleep(void);
 vaddr_t tz_map_shared_mem(nsaddr_t ns_addr, uint32_t size);
@@ -95,6 +96,10 @@ struct tz_monitor_frame *monitor_send_receive(uint32_t smc_type, struct tz_monit
 void pm_init(void);
 void pm_handle_platform_smc(struct tz_monitor_frame *frame);
 
+#if defined(WITH_MONITOR_BIN)
+void platform_monitor_init_cpu(void);
+#endif
+
 #if ARM_WITH_SCU
 void cpu_enable_scu(void);
 void cpu_enable_scu_access(void);
index 414b04b..6d1c0e7 100644 (file)
@@ -22,7 +22,6 @@
  */
 
 #include <stdlib.h>
-#include <lib/heap.h>
 #include <err.h>
 #include <debug.h>
 #include <platform.h>
 #include <reg.h>
 #include <string.h>
 
+#include <lib/monitor/monitor_vector.h>
 #include <platform/platform_p.h>
 
+#define SZ_1MB 0x00100000
+
 #define MC_SMMU_CONFIG_0                       0x10
 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_DISABLE   0
 #define MC_SMMU_CONFIG_0_SMMU_ENABLE_ENABLE    1
@@ -54,6 +56,7 @@
 #define MC_SMMU_PTC_FLUSH_ALL                  0
 
 #define MC_SMMU_ASID_SECURITY_0                        0x38
+#define ASID_SECURITY          (0)
 
 #define MC_SECURITY_CFG0_0                     0x70
 #define MC_SECURITY_CFG1_0                     0x74
 #define MC_SMMU_TRANSLATION_ENABLE_0_0         0x228
 #define MC_SMMU_TRANSLATION_ENABLE_1_0         0x22c
 #define MC_SMMU_TRANSLATION_ENABLE_2_0         0x230
-
 #define TRANSLATION_ENABLE     (~0)
-#define ASID_SECURITY          (0)
 
-static uint32_t        platform_sec_base;
-static uint32_t        platform_sec_size;
+#define MC_VIDEO_PROTECT_BOM                   0x648
+#define MC_VIDEO_PROTECT_SIZE_MB               0x64c
+
+static uint32_t   platform_sec_base;
+static uint32_t   platform_sec_size;
+static uintptr_t  save_vpr_base;
+static uintptr_t  save_vpr_size;
 
 static void init_smmu_hw()
 {
@@ -98,6 +104,59 @@ void platform_secure_dram_aperture()
        *REG32(TEGRA_MC_BASE + MC_SECURITY_CFG1_0) = (platform_sec_size >> 20);
 }
 
+status_t platform_program_vpr(uint32_t vpr_base, uint32_t vpr_size)
+{
+       uintptr_t save_vpr_end, vpr_end;
+
+       if (!save_vpr_base && !save_vpr_size) {
+               save_vpr_base = *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_BOM);
+               save_vpr_size = *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_SIZE_MB);
+
+               /* apply nvtboot WAR as initial carveout may include HIVEC_BASE */
+               if ((uint64_t)save_vpr_base + (save_vpr_size * SZ_1MB) > NV_ARM_CORE_HIVEC_BASE) {
+                       save_vpr_end = ROUNDDOWN(NV_ARM_CORE_HIVEC_BASE, SZ_1MB);
+                       save_vpr_size = (save_vpr_end - save_vpr_base) / SZ_1MB;
+               }
+       }
+
+       /* check if vpr_size and vpr_base are MB align */
+       if ((vpr_size & (SZ_1MB-1)) || (vpr_base & (SZ_1MB-1))  ) {
+               dprintf(CRITICAL, "%s: vpr base/size not MB align\n", __func__);
+               return ERR_GENERIC;
+       }
+
+#if defined(WITH_MONITOR_BIN)
+       /* create identity mapping to old VPR range */
+       mon_mmu_map_uncached(save_vpr_base, save_vpr_base, (save_vpr_size * SZ_1MB));
+#endif
+
+       vpr_end = vpr_base + vpr_size;
+       vpr_size = vpr_size / SZ_1MB;
+       save_vpr_end = save_vpr_base + (save_vpr_size * SZ_1MB);
+
+       if (vpr_base > save_vpr_end || save_vpr_base > vpr_end) {
+               memset((void*)save_vpr_base, 0, (save_vpr_size * SZ_1MB)); //non-overlap
+       } else {
+               if (save_vpr_base < vpr_base) {
+                       memset((void*)save_vpr_base, 0, vpr_base-save_vpr_base); //shrink/shift at base
+               }
+               if (save_vpr_end > vpr_end) {
+                       memset((void*)vpr_end, 0, save_vpr_end-vpr_end); //shrink at end
+               }
+       }
+
+#if defined(WITH_MONITOR_BIN)
+       mon_mmu_unmap(save_vpr_base, (save_vpr_size * SZ_1MB));
+#endif
+
+       *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_BOM) = vpr_base;
+       *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_SIZE_MB) = vpr_size;
+       save_vpr_base = vpr_base;
+       save_vpr_size = vpr_size;
+
+       return NO_ERROR;
+}
+
 void platform_init_memory(uint32_t sec_base, uint32_t sec_size)
 {
        platform_sec_base = sec_base;
@@ -110,4 +169,10 @@ void platform_restore_memory()
 {
        init_smmu_hw();
        platform_secure_dram_aperture();
+
+       /* Program VPR during LP0 exit, if we have the most current value */
+       if (save_vpr_base && save_vpr_size) {
+               *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_BOM) = save_vpr_base;
+               *REG32(TEGRA_MC_BASE + MC_VIDEO_PROTECT_SIZE_MB) = save_vpr_size;
+       }
 }
diff --git a/platform/tegra/monitor/platform.c b/platform/tegra/monitor/platform.c
new file mode 100644 (file)
index 0000000..589485c
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <err.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform/platform_p.h>
+#include <lib/monitor/monitor_vector.h>
+
+#define CORTEX_A57     0xD07
+#define CORTEX_A53     0xD03
+
+#define L2ACTLR                (1 << 6)
+#define L2ECTLR                (1 << 5)
+#define L2CTLR         (1 << 4)
+#define CPUECTLR       (1 << 1)
+#define CPUACTLR       (1 << 0)
+#define ACTLR_MODE     (L2ACTLR | L2ECTLR | L2CTLR | CPUECTLR | CPUACTLR)
+
+void platform_sip_handler(struct fastcall_frame *frame, uint32_t cpu)
+{
+       status_t retval;
+
+       /* currently, only handling SMC_SIP_PROGRAM_VPR */
+       if (frame->r[0] != SMC_SIP_PROGRAM_VPR) {
+               frame->r[0] = ERR_NOT_SUPPORTED;
+               return;
+       }
+
+       retval = platform_program_vpr(frame->r[1], frame->r[2]);
+       frame->r[0] = retval;
+}
+
+/* Implementation specific CPU init */
+void platform_monitor_init_cpu(void)
+{
+       uint32_t val = ACTLR_MODE;
+       int cpu = mon_get_cpu_id();
+
+       /* enable L2 and CPU ECTLR RW access from non-secure world */
+       if (cpu == CORTEX_A57 || cpu == CORTEX_A53) {
+               __asm__ volatile (
+                       "msr    actlr_el3, %0   \n"
+                       "msr    actlr_el2, %0   \n"
+                       :: "r" (val)
+               );
+       }
+}
index c0e820e..dd944d2 100644 (file)
@@ -86,9 +86,9 @@ void platform_psci_init(uint32_t cpu)
        uint32_t reg;
 
        /* identity map MMIO ranges for register access */
-       (void) mon_mmu_map_mmio(MMIO_RANGE_0_ADDR, MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE);
-       (void) mon_mmu_map_mmio(MMIO_RANGE_1_ADDR, MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE);
-       (void) mon_mmu_map_mmio(MMIO_RANGE_2_ADDR, MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE);
+       mon_mmu_map_mmio(MMIO_RANGE_0_ADDR, MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE);
+       mon_mmu_map_mmio(MMIO_RANGE_1_ADDR, MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE);
+       mon_mmu_map_mmio(MMIO_RANGE_2_ADDR, MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE);
 
        platform_init_debug_port(DEFAULT_DEBUG_PORT);
 
@@ -105,6 +105,7 @@ void platform_psci_init(uint32_t cpu)
 
        mon_atomic_or(&cpus_started, 1 << cpu);
 
+       platform_monitor_init_cpu();
        platform_init_memory(__mon_phys_base, __mon_phys_size);
        platform_config_interrupts();
 }
@@ -117,9 +118,11 @@ void platform_psci_coldboot_epilog()
        /* mark entire TLK carveout as secure in the MC */
        platform_secure_dram_aperture();
 
+#if !defined(WITH_ARM_PSCI_SUPPORT)
        /* make sure all CPUs have reset before reprogramming vector */
        while (cpus_started != cpus_expected)
                ;
+#endif
 
        psci_program_reset_vectors();
 }
@@ -152,19 +155,31 @@ void platform_psci_cpu_has_reset(uint32_t cpu)
        if (reg == (__mon_phys_size >> 20))
                return;
 
+#if !defined(WITH_ARM_PSCI_SUPPORT)
+       /* make sure all CPUs have reset before reprogramming vector */
+       while (cpus_started != cpus_expected)
+               ;
+#endif
+
        psci_program_reset_vectors();
+       platform_monitor_init_cpu();
        platform_restore_memory();
        platform_config_interrupts();
 }
 
-int platform_psci_handler(struct psci_frame *frame, uint32_t cpu)
+void platform_psci_handler(struct fastcall_frame *frame, uint32_t cpu)
 {
        /* currently, only handling CPU_SUSPEND */
-       if (frame->r[0] != PSCI_FUNC_ID_CPU_SUSPEND)
-               return PSCI_RETURN_NOT_SUPPORTED;
+       if (frame->r[0] != PSCI_FUNC_ID_CPU_SUSPEND_LEGACY &&
+           frame->r[0] != PSCI_FUNC_ID_CPU_SUSPEND) {
+               frame->r[0] = PSCI_RETURN_NOT_SUPPORTED;
+               return;
+       }
 
        /* save NS entry point */
        ((uint64_t *)&__mon_cpu_return_addr)[cpu] = frame->r[2];
 
-       return PSCI_RETURN_SUCCESS;
+       cpus_started = 0;
+
+       frame->r[0] = PSCI_RETURN_SUCCESS;
 }
index 2a92f44..f3ff8f9 100644 (file)
@@ -13,6 +13,7 @@ MONITOR_MODULE := true
 MODULE_SRCS += \
        $(LOCAL_DIR)/memory.c \
        $(LOCAL_DIR)/interrupts.c \
+       $(LOCAL_DIR)/platform.c \
        $(LOCAL_DIR)/psci.c \
        $(LOCAL_DIR)/debug.c