| /* |
| * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining |
| * a copy of this software and associated documentation files |
| * (the "Software"), to deal in the Software without restriction, |
| * including without limitation the rights to use, copy, modify, merge, |
| * publish, distribute, sublicense, and/or sell copies of the Software, |
| * and to permit persons to whom the Software is furnished to do so, |
| * subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be |
| * included in all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| */ |
| #include <debug.h> |
| #include <sys/types.h> |
| #include <compiler.h> |
| #include <assert.h> |
| #include <string.h> |
| #include <malloc.h> |
| #include <err.h> |
| #include <stdlib.h> |
| #include <arch.h> |
| #include <arch/arm.h> |
| #include <arch/arm/mmu.h> |
| #include <kernel/task.h> |
| #include <kernel/thread.h> |
| #include <kernel/elf.h> |
| #include <platform.h> |
| #include <platform/platform_p.h> |
| #include <kernel/task_load.h> |
| #include <lib/ote/ote_protocol.h> |
| |
| /*! page aligned area for storing static task headers before heap is initialized */ |
| #define TASK_LIST_CARVEOUT_PAGES 1 |
| |
| /*! max number of tasks embedded in the TLK task image */ |
| #define MAX_STATIC_TASK_COUNT ((TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE) / sizeof(task_t)) |
| |
| /* task list and used index */ |
| static u_int task_next_index; /* next task index [ 0..N ] */ |
| static task_t *task_blist; /* boot time fixed size task list */ |
| |
| /* task_blist is converted to task_list after heap is initialized */ |
| static struct list_node task_list; |
| |
| static char *task_image_start; |
| static char *task_image_end; |
| static u_int task_image_size; |
| |
| extern u_int __tasks_start; |
| extern u_int __tasks_end; |
| |
| extern int _end; /* end of binary &_end (heap starts after this) */ |
| extern int _heap_end; /* heap ends here, adjusted by carve-outs below */ |
| |
| /* memory carved off from the top (before heap_init) */ |
| #define carveout_taskmem _heap_end |
| |
| void task_print_uuid(uint32_t level, const te_service_id_t *uuid) |
| { |
| if (uuid) { |
| dprintf(level, "%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x", |
| uuid->time_low, |
| uuid->time_mid, |
| uuid->time_hi_and_version, |
| uuid->clock_seq_and_node[0], /* clock_seq_hi_and_reserved */ |
| uuid->clock_seq_and_node[1], /* clock_seq_low */ |
| uuid->clock_seq_and_node[2], |
| uuid->clock_seq_and_node[3], |
| uuid->clock_seq_and_node[4], |
| uuid->clock_seq_and_node[5], |
| uuid->clock_seq_and_node[6], |
| uuid->clock_seq_and_node[7]); |
| } |
| } |
| |
| static status_t task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32_Shdr *shdr) |
| { |
| status_t err = NO_ERROR; |
| OTE_MANIFEST *manifest; |
| u_int *config_blob, config_blob_size; |
| u_int i; |
| te_service_id_t null_uuid = NULL_UUID; |
| |
| if (shdr->sh_size < offsetof(OTE_MANIFEST, config_options)) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| |
| /* init default config options before parsing manifest */ |
| taskp->props.min_heap_size = 5 * PAGE_SIZE; |
| taskp->props.min_stack_size = DEFAULT_STACK_SIZE; |
| |
| manifest = (OTE_MANIFEST *)(task_image_addr + shdr->sh_offset); |
| |
| /* |
| * Informative name field may be zero filled and only non-zero names are used. |
| * Task loading may also override this field value. |
| */ |
| memcpy(&taskp->task_name[0], &manifest->name[0], sizeof(taskp->task_name)); |
| taskp->task_name[sizeof(taskp->task_name) - 1] = '\000'; |
| |
| /* |
| * Copy TA specific config data (optional field, define semantics per task). |
| * E.g. could hold SHA1 digest of something you wish to load to the task |
| * at runtime. |
| */ |
| memcpy(&taskp->task_private_data[0], &manifest->private_data[0], |
| sizeof(taskp->task_private_data)); |
| |
| /* reject all tasks with NULL UUID */ |
| if (!memcmp(&manifest->uuid, &null_uuid, sizeof(te_service_id_t))) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| memcpy(&taskp->props.uuid, &manifest->uuid, sizeof(te_service_id_t)); |
| |
| task_print_id(SPEW, "task load uuid = ", taskp); |
| |
| config_blob = (u_int *)((char *)manifest + offsetof(OTE_MANIFEST, config_options)); |
| config_blob_size = (shdr->sh_size - offsetof(OTE_MANIFEST, config_options)); |
| |
| taskp->props.config_entry_cnt = config_blob_size / sizeof(u_int); |
| |
| /* if no config options we're done */ |
| if (taskp->props.config_entry_cnt != 0) { |
| |
| /* save off configuration blob start so it can be accessed later */ |
| taskp->props.config_blob = config_blob; |
| |
| /* |
| * Step thru configuration blob. |
| * |
| * Save off some configuration data while we are here but |
| * defer processing of other data until it is needed later. |
| */ |
| for (i = 0; i < taskp->props.config_entry_cnt; i++) { |
| switch (config_blob[i]) { |
| case OTE_CONFIG_KEY_MIN_STACK_SIZE: |
| /* MIN_STACK_SIZE takes 1 data value */ |
| if ((taskp->props.config_entry_cnt - i) <= 1) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.min_stack_size = |
| ROUNDUP(config_blob[++i], 4096); |
| if (taskp->props.min_stack_size <= 0) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| break; |
| case OTE_CONFIG_KEY_MIN_HEAP_SIZE: |
| /* MIN_HEAP_SIZE takes 1 data value */ |
| if ((taskp->props.config_entry_cnt - i) <= 1) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.min_heap_size = |
| ROUNDUP(config_blob[++i], 4096); |
| if (taskp->props.min_heap_size <= 0) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| break; |
| case OTE_CONFIG_KEY_MAP_MEM: |
| /* MAP_MEM takes 3 data values */ |
| if ((taskp->props.config_entry_cnt - i) <= 3) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.map_io_mem_cnt++; |
| i += 3; |
| break; |
| case OTE_CONFIG_KEY_RESTRICT_ACCESS: |
| /* Set clients who are restricted access. */ |
| if ((taskp->props.config_entry_cnt - i) <= 1) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.restrict_access = config_blob[++i]; |
| break; |
| case OTE_CONFIG_KEY_AUTHORIZE: |
| /* tasks which are authorized to perform restricted operations */ |
| if ((taskp->props.config_entry_cnt - i) <= 1) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.authorizations = config_blob[++i]; |
| break; |
| case OTE_CONFIG_KEY_TASK_ISTATE: |
| /* task initial state attributes */ |
| if ((taskp->props.config_entry_cnt - i) <= 1) { |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| taskp->props.initial_state = config_blob[++i]; |
| break; |
| default: |
| dprintf(CRITICAL, |
| "%s: unknown OTE_CONFIG_KEY_VALUE: %d\n", |
| __func__, config_blob[i]); |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| } |
| } |
| |
| if (0) { |
| exit: |
| if (err == NO_ERROR) { |
| err = ERR_NOT_VALID; |
| } |
| } |
| return err; |
| } |
| |
| static void task_setup_mmio(task_t *taskp) |
| { |
| u_int i; |
| u_int id, offset, size; |
| |
| /* step thru configuration blob looking for I/O mapping requests */ |
| for (i = 0; i < taskp->props.config_entry_cnt; i++) { |
| if (taskp->props.config_blob[i] == OTE_CONFIG_KEY_MAP_MEM) { |
| /* found one; setup mapping to io range */ |
| id = taskp->props.config_blob[++i]; |
| offset = taskp->props.config_blob[++i]; |
| size = taskp->props.config_blob[++i]; |
| |
| /* check mapping ID is non-zero and unique */ |
| ASSERT(id); |
| ASSERT(!task_find_mapping_by_id(taskp, id)); |
| |
| arch_task_setup_mmio(taskp, id, offset, size); |
| } else { |
| /* all other config options take 1 data value */ |
| i++; |
| } |
| } |
| } |
| |
| static void task_set_valloc_start(task_t *taskp) |
| { |
| struct list_node *node; |
| task_map_t *mptr; |
| |
| /* |
| * Dynamic allocs start after the static alloc preceding the |
| * stack (expected to be called before dynamic allocs begin). |
| */ |
| node = &taskp->stack_map->node; |
| taskp->valloc_list = list_prev(node, node); |
| |
| mptr = containerof(taskp->valloc_list, task_map_t, node); |
| taskp->valloc_start = mptr->vaddr + mptr->size; |
| taskp->valloc_end = taskp->stack_map->vaddr; |
| } |
| |
| addr_t task_find_address_space(task_t *taskp, u_int size, u_int align) |
| { |
| addr_t astart, aend; |
| task_map_t *mptr; |
| |
| astart = ROUNDUP(taskp->valloc_start, align); |
| aend = astart + size; |
| |
| /* find first fit */ |
| list_for_every_entry(taskp->valloc_list, mptr, task_map_t, node) { |
| if (aend < mptr->vaddr) |
| break; /* fits before mptr alloc */ |
| if (mptr->vaddr == taskp->valloc_end) { |
| /* hit end without finding space */ |
| dprintf(CRITICAL, "failed to find task address space\n"); |
| return 0; |
| } |
| astart = ROUNDUP((mptr->vaddr + mptr->size), align); |
| aend = astart + size; |
| } |
| ASSERT(!(astart & (align - 1))); |
| return astart; |
| } |
| |
| void task_add_mapping(task_t *taskp, task_map_t *new_mptr) |
| { |
| task_map_t *mptr; |
| |
| ASSERT(taskp); |
| ASSERT(new_mptr); |
| ASSERT(new_mptr->vaddr && new_mptr->size); |
| ASSERT(!new_mptr->id || (new_mptr->flags & TM_IO)); |
| |
| list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) { |
| if (mptr->vaddr > new_mptr->vaddr) { |
| ASSERT((new_mptr->vaddr + new_mptr->size) <= mptr->vaddr); |
| list_add_before(&mptr->node, &new_mptr->node); |
| return; |
| } |
| } |
| list_add_tail(&taskp->map_list, &new_mptr->node); |
| } |
| |
| void task_delete_mapping(task_t *taskp, task_map_t *mptr) |
| { |
| list_delete(&mptr->node); |
| } |
| |
| task_map_t *task_find_mapping(task_t *taskp, addr_t vaddr, u_int size) |
| { |
| task_map_t *mptr; |
| |
| list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) { |
| if ((mptr->vaddr <= vaddr) && |
| ((mptr->vaddr + mptr->size) >= (vaddr + size))) { |
| return mptr; |
| } |
| } |
| |
| dprintf(CRITICAL, |
| "task %d: vaddr 0x%08x for 0x%08x bytes not mapped\n", |
| taskp->task_index, (u_int)vaddr, size); |
| return NULL; |
| } |
| |
| task_map_t *task_find_mapping_by_id(task_t *taskp, u_int id) |
| { |
| task_map_t *mptr; |
| |
| list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) { |
| if (mptr->id == id) |
| return mptr; |
| } |
| |
| return NULL; |
| } |
| |
| status_t task_get_physaddr(task_t *taskp, addr_t vaddr, paddr_t *paddr) |
| { |
| task_map_t *mptr; |
| |
| mptr = task_find_mapping(taskp, vaddr, 0); |
| if (mptr == NULL) |
| return ERR_INVALID_ARGS; |
| |
| if (mptr->flags & TM_PHYS_CONTIG) { |
| *paddr = mptr->u_phys.contig + (vaddr - mptr->vaddr); |
| } else { |
| uint32_t pageno = (vaddr - mptr->vaddr) / PAGE_SIZE; |
| *paddr = mptr->u_phys.pagelist[pageno] + (vaddr & PAGE_MASK); |
| } |
| return NO_ERROR; |
| } |
| |
| bool task_valid_address(vaddr_t addr, u_int size) |
| { |
| task_t *taskp; |
| task_map_t *mptr; |
| |
| taskp = current_thread->arch.task; |
| mptr = task_find_mapping(taskp, addr, size); |
| return !!mptr; |
| } |
| |
| static status_t task_init_stack(task_t *taskp) |
| { |
| task_map_t *mptr; |
| |
| mptr = calloc(1, sizeof(task_map_t)); |
| if (mptr == NULL) |
| return ERR_NO_MEMORY; |
| |
| mptr->size = taskp->props.min_stack_size; |
| mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size); |
| if (mptr->u_phys.contig == NULL) { |
| free(mptr); |
| return ERR_NO_MEMORY; |
| } |
| |
| mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig); |
| |
| mptr->vaddr = TASK_STACK_ADDR - mptr->size; |
| mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG | TM_PHYS_ALLOCATED); |
| mptr->offset = 0; |
| mptr->map_attrs = NULL; |
| |
| taskp->stack_map = mptr; |
| task_add_mapping(taskp, mptr); |
| |
| dprintf(SPEW, |
| "task %d: stack vaddr 0x%08lx, paddr 0x%08llx, msize 0x%08x\n", |
| taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig, mptr->size); |
| |
| return NO_ERROR; |
| } |
| |
| static status_t task_init_brk(u_int task_image_addr, task_t *taskp, Elf32_Ehdr *ehdr) |
| { |
| task_map_t *mptr; |
| vaddr_t vbrk; |
| uint32_t brklen; |
| |
| /* find mapping in which brk resides */ |
| mptr = task_find_mapping(taskp, taskp->start_brk, 0); |
| if (mptr == NULL) { |
| dprintf(CRITICAL, "task failed to find brk in mappings\n"); |
| halt(); |
| } |
| |
| /* what's leftover in the mapping goes to brk */ |
| taskp->curr_brk = taskp->start_brk; |
| taskp->end_brk = taskp->start_brk + |
| ((mptr->vaddr + mptr->size) - taskp->start_brk); |
| |
| /* mmap expects MAP_ANONYMOUS to be zeros */ |
| vbrk = physical_to_virtual(mptr->u_phys.contig) + |
| (taskp->start_brk - mptr->vaddr); |
| brklen = taskp->end_brk - taskp->curr_brk; |
| |
| memset((void *)vbrk, 0, brklen); |
| |
| platform_clean_invalidate_cache_range(vbrk, brklen); |
| |
| /* increase user mode heap (if not enough remains) */ |
| if ((taskp->end_brk - taskp->curr_brk) < taskp->props.min_heap_size) { |
| mptr = calloc(1, sizeof(task_map_t)); |
| if (mptr == NULL) |
| return ERR_NO_MEMORY; |
| |
| mptr->size = taskp->props.min_heap_size; |
| mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size); |
| if (mptr->u_phys.contig == NULL) { |
| free(mptr); |
| return ERR_NO_MEMORY; |
| } |
| |
| /* mmap expects MAP_ANONYMOUS to be zeros */ |
| memset((void *)(addr_t)mptr->u_phys.contig, 0, mptr->size); |
| |
| /* mptr->paddr still virtual at this point */ |
| platform_clean_invalidate_cache_range(mptr->u_phys.contig, mptr->size); |
| |
| mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig); |
| |
| mptr->vaddr = taskp->end_brk; |
| mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG | TM_PHYS_ALLOCATED); |
| mptr->offset = 0; |
| mptr->map_attrs = NULL; |
| |
| task_add_mapping(taskp, mptr); |
| |
| taskp->end_brk += mptr->size; |
| } |
| |
| dprintf(SPEW, |
| "task %d: brk vaddr 0x%08lx, msize 0x%08x\n", |
| taskp->task_index, taskp->start_brk, |
| (u_int)(taskp->end_brk - taskp->start_brk)); |
| |
| return NO_ERROR; |
| } |
| |
| static status_t task_alloc_address_map(task_t *taskp) |
| { |
| Elf32_Ehdr *elf_hdr; |
| Elf32_Phdr *prg_hdr; |
| u_int i; |
| u_int task_image_addr; |
| task_map_t *mptr; |
| status_t ret; |
| |
| elf_hdr = taskp->elf_hdr; |
| task_image_addr = (u_int)elf_hdr; |
| |
| taskp->start_code = ~0; |
| |
| /* alloc user stack */ |
| ret = task_init_stack(taskp); |
| if (ret != NO_ERROR) { |
| dprintf(CRITICAL, "failed to load task: stack creation error\n"); |
| return ret; |
| } |
| |
| /* create mappings for PT_LOAD sections */ |
| for (i = 0; i < elf_hdr->e_phnum; i++) { |
| addr_t first, last, last_mem; |
| |
| prg_hdr = (Elf32_Phdr *)((u_int)elf_hdr + elf_hdr->e_phoff + |
| (i * sizeof(Elf32_Phdr))); |
| |
| if (prg_hdr->p_type != PT_LOAD) |
| continue; |
| |
| /* skip PT_LOAD if it's below task start or above .bss */ |
| if ((prg_hdr->p_vaddr < TASK_START_ADDR) || |
| (prg_hdr->p_vaddr >= taskp->end_bss)) |
| continue; |
| |
| /* |
| * We're expecting to be able to execute the task in-place, |
| * meaning its PT_LOAD segments, should be page-aligned. |
| */ |
| if((prg_hdr->p_vaddr & PAGE_MASK) || (prg_hdr->p_offset & PAGE_MASK)) |
| return ERR_TASK_GENERIC; |
| |
| mptr = calloc(1, sizeof(task_map_t)); |
| if (mptr == NULL) |
| return ERR_NO_MEMORY; |
| |
| mptr->size = (prg_hdr->p_memsz + PAGE_MASK) & ~PAGE_MASK; |
| mptr->u_phys.contig = virtual_to_physical(task_image_addr) + prg_hdr->p_offset; |
| mptr->vaddr = prg_hdr->p_vaddr; |
| mptr->flags = (prg_hdr->p_flags & PF_FLAG_MASK) | TM_PHYS_CONTIG; |
| mptr->offset = 0; |
| mptr->map_attrs = NULL; |
| |
| task_add_mapping(taskp, mptr); |
| |
| /* check for overlap into user stack range */ |
| if ((TASK_STACK_ADDR - taskp->stack_map->size) < (mptr->vaddr + mptr->size)) { |
| dprintf(CRITICAL, |
| "failed to load task: (overlaps user stack 0x%lx)\n", |
| TASK_STACK_ADDR - taskp->stack_map->size); |
| return ERR_TOO_BIG; |
| } |
| |
| dprintf(SPEW, |
| "task %d: load vaddr 0x%08lx, paddr 0x%08llx" |
| " rsize 0x%08x, msize 0x%08x, flags 0x%08x\n", |
| taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig, |
| mptr->size, prg_hdr->p_memsz, mptr->flags); |
| |
| /* start of code/data */ |
| first = prg_hdr->p_vaddr; |
| if (first < taskp->start_code) |
| taskp->start_code = first; |
| if (taskp->start_data < first) |
| taskp->start_data = first; |
| |
| /* end of code/data */ |
| last = prg_hdr->p_vaddr + prg_hdr->p_filesz; |
| if ((prg_hdr->p_flags & PF_X) && taskp->end_code < last) |
| taskp->end_code = last; |
| if (taskp->end_data < last) |
| taskp->end_data = last; |
| |
| /* end of brk */ |
| last_mem = prg_hdr->p_vaddr + prg_hdr->p_memsz; |
| if (last_mem > taskp->start_brk) |
| taskp->start_brk = last_mem; |
| } |
| |
| ret = task_init_brk(task_image_addr, taskp, elf_hdr); |
| if (ret != NO_ERROR) { |
| dprintf(CRITICAL, "failed to load task: task heap creation error\n"); |
| return ret; |
| } |
| |
| dprintf(SPEW, "task %d: code: start 0x%08lx end 0x%08lx\n", |
| taskp->task_index, taskp->start_code, taskp->end_code); |
| dprintf(SPEW, "task %d: data: start 0x%08lx end 0x%08lx\n", |
| taskp->task_index, taskp->start_data, taskp->end_data); |
| dprintf(SPEW, "task %d: bss: end 0x%08lx\n", |
| taskp->task_index, taskp->end_bss); |
| dprintf(SPEW, "task %d: brk: start 0x%08lx end 0x%08lx\n", |
| taskp->task_index, taskp->start_brk, taskp->end_brk); |
| |
| taskp->entry = elf_hdr->e_entry; |
| dprintf(SPEW, "task %d: entry 0x%08lx\n", taskp->task_index, taskp->entry); |
| |
| return NO_ERROR; |
| } |
| |
| /* |
| * Align the next task to a page boundary, by copying what remains |
| * in the task image to the aligned next task start. This should be |
| * called after we're done with the section headers as the previous |
| * tasks .shstrtab section will be clobbered. |
| * |
| * Note: task_image_size remains the carved out part in LK to exit |
| * the bootloader loop, so still increment by max_extent. Because of |
| * the copy down to an aligned next task addr, task_image_size is |
| * more than what we're actually using. |
| */ |
| static char *task_align_next_task(Elf32_Ehdr *elf_hdr, Elf32_Shdr *pad_hdr) |
| { |
| char *next_task_align_start; |
| char *next_task_fsize_start; |
| char *task_image_addr; |
| u_int copy_size; |
| u_int max_extent; |
| |
| ASSERT(pad_hdr); |
| ASSERT(elf_hdr); |
| |
| task_image_addr = (char *)elf_hdr; |
| max_extent = (elf_hdr->e_shoff + (elf_hdr->e_shnum * elf_hdr->e_shentsize)) - 1; |
| ASSERT((task_image_addr + max_extent + 1) <= task_image_end); |
| |
| next_task_align_start = task_image_addr + pad_hdr->sh_offset + pad_hdr->sh_size; |
| next_task_fsize_start = task_image_addr + max_extent + 1; |
| ASSERT(next_task_align_start <= next_task_fsize_start); |
| |
| copy_size = task_image_end - next_task_fsize_start; |
| if (copy_size) { |
| /* |
| * Copy remaining image bytes to aligned start for the next |
| * (and subsequent) tasks. Also decrement task_image_end, so |
| * we copy less each time we realign for the next task. |
| */ |
| memcpy(next_task_align_start, next_task_fsize_start, copy_size); |
| |
| platform_clean_invalidate_cache_range((addr_t)next_task_align_start, |
| copy_size); |
| |
| task_image_end -= (next_task_fsize_start - next_task_align_start); |
| } |
| |
| task_image_size -= (max_extent + 1); |
| return next_task_align_start; |
| } |
| |
| status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp, |
| Elf32_Shdr **bss_pad_shdr_p, task_type_t task_type) |
| { |
| status_t err = NO_ERROR; |
| Elf32_Ehdr *ehdr = NULL; |
| Elf32_Shdr *shdr = NULL; |
| Elf32_Shdr *bss_shdr = NULL; |
| Elf32_Shdr *bss_pad_shdr = NULL; |
| Elf32_Shdr *manifest_shdr = NULL; |
| char *shstbl = NULL; |
| vaddr_t bss_addr = 0; |
| u_int i = 0; |
| u_int task_max_extent = 0; |
| |
| if (!task_addr || !taskp || task_size == 0) { |
| err = ERR_INVALID_ARGS; |
| goto exit; |
| } |
| |
| /* For the preloaded tasks: the size includes this task and all |
| * other tasks that follow in the same image. |
| */ |
| dprintf(SPEW, "%s task: start %p size %d (0x%08x)\n", |
| __func__, task_addr, task_size, task_size); |
| |
| ehdr = (Elf32_Ehdr *) task_addr; |
| if (strncmp((char *)ehdr->e_ident, ELFMAG, SELFMAG)) { |
| dprintf(CRITICAL, "%s: ELF header not found\n", |
| __func__); |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| |
| if (bss_pad_shdr_p) |
| *bss_pad_shdr_p = NULL; |
| |
| shdr = (Elf32_Shdr *)((u_int)ehdr + ehdr->e_shoff); |
| shstbl = (char *)((u_int)ehdr + shdr[ehdr->e_shstrndx].sh_offset); |
| |
| bss_shdr = bss_pad_shdr = manifest_shdr = NULL; |
| |
| /* calculate task end */ |
| for (i = 0; i < ehdr->e_shnum; i++) { |
| u_int extent; |
| |
| if (shdr[i].sh_type == SHT_NULL) |
| continue; |
| #if 0 |
| dprintf(CRITICAL, "task: sect %d, off 0x%08x, size 0x%08x, name %s\n", |
| i, shdr[i].sh_offset, shdr[i].sh_size, shstbl + shdr[i].sh_name); |
| #endif |
| |
| /* track bss and manifest sections */ |
| if (!strcmp((shstbl + shdr[i].sh_name), ".bss")) { |
| bss_shdr = shdr + i; |
| taskp->end_bss = bss_shdr->sh_addr + bss_shdr->sh_size; |
| } |
| else if (!strcmp((shstbl + shdr[i].sh_name), ".bss-pad")) { |
| bss_pad_shdr = shdr + i; |
| } |
| else if (!strcmp((shstbl + shdr[i].sh_name), |
| ".ote.manifest")) { |
| manifest_shdr = shdr + i; |
| } |
| |
| if (shdr[i].sh_type != SHT_NOBITS) { |
| extent = shdr[i].sh_offset + shdr[i].sh_size; |
| if (task_max_extent < extent) |
| task_max_extent = extent; |
| } |
| } |
| |
| /* |
| * We need these sections. |
| * Manifest is handled later. |
| */ |
| if (!bss_shdr || !bss_pad_shdr) { |
| dprintf(CRITICAL, "%s: Invalid task image\n", __func__); |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| |
| if (bss_pad_shdr_p) |
| *bss_pad_shdr_p = bss_pad_shdr; |
| |
| if ((bss_shdr->sh_offset + bss_shdr->sh_size) > task_max_extent) { |
| dprintf(CRITICAL, "%s: Invalid task image\n", __func__); |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| |
| if (ROUNDUP(task_max_extent, 4) != ehdr->e_shoff) { |
| dprintf(CRITICAL, "%s: Invalid task image\n", __func__); |
| err = ERR_NOT_VALID; |
| goto exit; |
| } |
| |
| /* clear .bss */ |
| bss_addr = (vaddr_t)(task_addr + bss_shdr->sh_offset); |
| |
| memset((uint8_t *)bss_addr, 0, bss_shdr->sh_size); |
| |
| platform_clean_invalidate_cache_range(bss_addr, bss_shdr->sh_size); |
| |
| /* let the caller decide if it can handle binaries without manifest */ |
| if (manifest_shdr == NULL) { |
| taskp->props.manifest_exists = 0; |
| } else { |
| taskp->props.manifest_exists = 1; |
| err = task_load_config_options((u_int)task_addr, taskp, manifest_shdr); |
| if (err != NO_ERROR) { |
| dprintf(CRITICAL, "Invalid task manifest: 0x%x\n", err); |
| goto exit; |
| } |
| } |
| |
| taskp->elf_hdr = ehdr; |
| taskp->task_size = task_size; |
| |
| taskp->task_type = task_type; |
| taskp->task_state = TASK_STATE_INIT; |
| |
| if (0) { |
| exit: |
| if (err == NO_ERROR) |
| err = ERR_GENERIC; |
| } |
| |
| return err; |
| } |
| |
| /* |
| * Carveout memory for task headers. |
| * Called before heap_init. |
| * |
| * The task headers are converted to a list after the heap is initialized. |
| */ |
| static void task_mem_init() |
| { |
| if (task_image_size != 0) { |
| carveout_taskmem &= ~PAGE_MASK; |
| |
| /* list of tasks (static and loaded) */ |
| carveout_taskmem -= (TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE); |
| task_blist = (task_t *)carveout_taskmem; |
| |
| task_load_config((vaddr_t)&_end, |
| (vaddr_t *)&carveout_taskmem); |
| } |
| |
| ASSERT(!(carveout_taskmem & PAGE_MASK)); |
| } |
| |
| /* |
| * Look in the kernel's ELF header for task sections and |
| * carveout memory for their LOAD-able sections. This is |
| * called before heap_init. |
| * |
| * This sets up the built-in tasks, they are started later with task_init. |
| */ |
| static void task_bootloader() |
| { |
| char *task_image_addr = NULL; |
| task_t *taskp = NULL; |
| status_t err = NO_ERROR; |
| |
| dprintf(SPEW, "static tasks: start %p size 0x%08x end %p\n", |
| task_image_start, task_image_size, task_image_end); |
| |
| task_image_addr = task_image_start; |
| |
| task_mem_init(); |
| |
| memset(task_blist, 0, TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE); |
| |
| taskp = task_blist; |
| |
| while (task_image_size > 0) { |
| u_int i = 0; |
| Elf32_Shdr *bss_pad_shdr = NULL; |
| |
| if ((task_next_index + 1) > MAX_STATIC_TASK_COUNT) { |
| dprintf(CRITICAL, "%s: Too many (%d) tasks in image\n", |
| __func__, task_next_index+1); |
| halt(); |
| } |
| |
| err = task_prepare(task_image_addr, task_image_size, |
| taskp, &bss_pad_shdr, TASK_TYPE_STATIC); |
| |
| /* statically loaded tasks must run or the system halts */ |
| if (err != NO_ERROR) { |
| dprintf(CRITICAL, "%s: task#%u preparation failed (%d)\n", |
| __func__, task_next_index, err); |
| halt(); |
| } |
| |
| /* Because the size passed to task_prepare above can be larger than the |
| * actual task size in memory (it is larger unless this is the |
| * last task of the image) => fixup the task size here. |
| * |
| * BSS-PAD section is the last accepted PT_LOAD elf section of the secure task, |
| * so task actual memory size can be calculated as below. |
| */ |
| taskp->task_size = bss_pad_shdr->sh_offset + bss_pad_shdr->sh_size; |
| |
| /* static tasks must contain a manifest section */ |
| if (!taskp->props.manifest_exists) { |
| dprintf(CRITICAL, "%s: Invalid task#%u in image, no manifest\n", |
| __func__, task_next_index); |
| halt(); |
| } |
| |
| /* |
| * Make sure UUID doesn't already exist. Note that |
| * this search won't include the task we are processing |
| * here because task_next_index hasn't been incremented yet. |
| * |
| * task_find_task_by_uuid() can not yet be used. |
| */ |
| for (i = 0; i < task_next_index; i++) { |
| task_t *ts = &task_blist[i]; |
| if (!memcmp(&ts->props.uuid, &taskp->props.uuid, sizeof(te_service_id_t))) { |
| dprintf(CRITICAL, "%s: task#%u duplicate UUID found!\n", |
| __func__, task_next_index); |
| halt(); |
| } |
| } |
| |
| /* |
| * The next tasks in the image are moved down to the next free |
| * page aligned address after the current task. |
| */ |
| task_image_addr = task_align_next_task(taskp->elf_hdr, bss_pad_shdr); |
| |
| taskp->task_index = task_next_index++; |
| taskp++; |
| } |
| } |
| |
| void task_early_init() |
| { |
| task_image_start = (char *)&__tasks_start; |
| task_image_end = (char *)&__tasks_end; |
| task_image_size = (task_image_end - task_image_start); |
| |
| ASSERT(!((uint32_t)task_image_start & PAGE_MASK)); |
| |
| /* |
| * If there is no TAs that are loaded, we will then skip the |
| * task initialization phase |
| */ |
| if (task_image_size != 0) |
| task_bootloader(); |
| } |
| |
| status_t task_init_one_task(task_t *task) |
| { |
| status_t err = NO_ERROR; |
| char name[32]; |
| thread_t *thread; |
| const char *state_str = "(unknown)"; |
| |
| if (!task || task->task_index >= task_next_index) { |
| err = ERR_INVALID_ARGS; |
| goto exit; |
| } |
| |
| if (task->task_state != TASK_STATE_INIT) { |
| dprintf(CRITICAL, "%s: Task not startable in state %d\n", |
| __func__, task->task_state); |
| err = ERR_TASK_GENERIC; |
| goto exit; |
| } |
| |
| list_initialize(&task->map_list); |
| list_initialize(&task->thread_node); |
| |
| err = task_alloc_address_map(task); |
| if (err != NO_ERROR) { |
| dprintf(CRITICAL, "%s: failed to load address map\n", |
| __func__); |
| goto exit; |
| } |
| |
| /* setup dynamic alloc range */ |
| task_set_valloc_start(task); |
| |
| /* reserve mmio va ranges here */ |
| if (task->props.map_io_mem_cnt > 0) { |
| task_setup_mmio(task); |
| |
| /* reset valloc start */ |
| task_set_valloc_start(task); |
| } |
| |
| /* force zero terminated task context derived thread names */ |
| if (task->task_name[0] != '\000') { |
| snprintf(name, sizeof(name) - 1, "%s_%u_T0", task->task_name, |
| task->task_index); |
| } else { |
| snprintf(name, sizeof(name) - 1, "task_%u_T0", task->task_index); |
| } |
| name[sizeof(name) - 1] = '\000'; |
| |
| thread = thread_create(name, (thread_start_routine)(task->entry), 0, LOW_PRIORITY, 4096); |
| if (thread == NULL) { |
| dprintf(CRITICAL, "%s: allocate user thread failed\n", |
| __func__); |
| err = ERR_GENERIC; |
| goto exit; |
| } |
| |
| task->task_state = TASK_STATE_STARTING; |
| |
| if (arch_task_init(thread, task) == false) { |
| dprintf(CRITICAL, "%s: arch thread/task init failed\n", |
| __func__); |
| err = ERR_GENERIC; |
| goto exit; |
| } |
| |
| if (task->props.initial_state & OTE_MANIFEST_TASK_ISTATE_BLOCKED) { |
| task->task_state = TASK_STATE_BLOCKED; |
| state_str = "blocked"; |
| } else { |
| task->task_state = TASK_STATE_ACTIVE; |
| state_str = "active"; |
| } |
| |
| /* start it */ |
| if (task->entry) { |
| dprintf(INFO, "starting %s task#%u%s\n", state_str, task->task_index, |
| task_get_name_str(task, " (", ")", name, sizeof(name))); |
| |
| thread_resume(thread); |
| } |
| |
| if (0) { |
| exit: |
| if (err == NO_ERROR) |
| err = ERR_GENERIC; |
| } |
| return err; |
| } |
| |
| /* |
| * Start static tasks initialized by task_early_init() |
| * |
| * Convert the boot time task_blist into run-time task_list. |
| */ |
| void task_init() |
| { |
| status_t err = NO_ERROR; |
| task_t *task; |
| u_int i; |
| |
| task_image_start = NULL; |
| task_image_end = NULL; |
| task_image_size = 0; |
| |
| list_initialize(&task_list); |
| |
| /* |
| * If there is no TAs that are loaded, we will then skip the |
| * task initialization phase |
| */ |
| if (task_next_index == 0) |
| return; |
| |
| /* convert the boot time task_blist into a run-time task_list */ |
| |
| for (i = 0, task = task_blist; i < task_next_index; i++, task++) { |
| task_t *taskp = malloc(sizeof(task_t)); |
| if (!taskp) { |
| dprintf(CRITICAL, "%s: out of memory -- halting\n", __func__); |
| halt(); |
| } |
| memcpy(taskp, task, sizeof(task_t)); |
| |
| err = task_init_one_task(taskp); |
| if (err != NO_ERROR) { |
| dprintf(CRITICAL, "%s: static task start failed %d -- halting\n", |
| __func__, err); |
| halt(); |
| } |
| |
| list_add_tail(&task_list, &taskp->node); |
| } |
| |
| /* boot time task header pages are no longer used */ |
| memset(task_blist, 0, TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE); |
| task_blist = NULL; |
| |
| task_unload_init(); |
| task_load_init(); |
| } |
| |
| task_t *task_find_task_by_uuid(te_service_id_t *uuid) |
| { |
| task_t *task = NULL; |
| |
| /* find task for this uuid */ |
| if (uuid) { |
| te_service_id_t null_uuid = NULL_UUID; |
| |
| if (!memcmp(&null_uuid, uuid, sizeof(te_service_id_t))) |
| return NULL; |
| |
| list_for_every_entry(&task_list, task, task_t, node) { |
| if (task->task_state != TASK_STATE_UNKNOWN) { |
| if (!memcmp(&task->props.uuid, uuid, sizeof(te_service_id_t))) { |
| return task; |
| } |
| } |
| } |
| } |
| return NULL; |
| } |
| |
| task_t *task_find_task_by_index(uint32_t index) |
| { |
| task_t *task = NULL; |
| |
| if (index >= task_next_index) |
| return NULL; |
| |
| list_for_every_entry(&task_list, task, task_t, node) { |
| if (task->task_state != TASK_STATE_UNKNOWN) { |
| if (task->task_index == index) { |
| return task; |
| } |
| } |
| } |
| return NULL; |
| } |
| |
| const char *task_get_name_str(const task_t *task, const char *prefix, const char *suffix, |
| char *buf, uint32_t buflen) |
| { |
| uint32_t pslen = 0; |
| |
| if (prefix) |
| pslen += strlen(prefix); |
| else |
| prefix = ""; |
| |
| if (suffix) |
| pslen += strlen(suffix); |
| else |
| suffix = ""; |
| |
| /* OTE_TASK_NAME_MAX_LENGTH includes the NUL character at end of task name */ |
| if (!task || !buf || buflen < (OTE_TASK_NAME_MAX_LENGTH + pslen) || |
| !task->task_name[0]) |
| return ""; |
| |
| snprintf(buf, buflen, "%s%s%s", prefix, task->task_name, suffix); |
| return buf; |
| } |
| |
| void task_print_id(uint32_t level, const char *prefix, const task_t *taskp) |
| { |
| te_service_id_t null_uuid = NULL_UUID; |
| |
| if (taskp) { |
| char tp_name[OTE_TASK_NAME_MAX_LENGTH+3]; |
| |
| dprintf(level, "%s", (prefix ? prefix : "")); |
| if (memcmp(&taskp->props.uuid, &null_uuid, sizeof(te_service_id_t))) |
| task_print_uuid(level, &taskp->props.uuid); |
| dprintf(level, "%s\n", task_get_name_str(taskp, " (", ")", tp_name, sizeof(tp_name))); |
| } |
| } |
| |
| /* |
| * This is only used by task loading code but placed here because it modifies |
| * task_next_index and the task_list when a new task is loaded. |
| */ |
| status_t task_register(task_t **task_p) |
| { |
| status_t err = NO_ERROR; |
| task_t *dtask = NULL; |
| |
| if (!task_p || !*task_p) { |
| err = ERR_INVALID_ARGS; |
| goto exit; |
| } |
| |
| dtask = malloc(sizeof(task_t)); |
| if (!dtask) { |
| err = ERR_NO_MEMORY; |
| dprintf(CRITICAL, "error allocating task header: 0x%x\n", err); |
| goto exit; |
| } |
| |
| memcpy(dtask, *task_p, sizeof(task_t)); |
| |
| enter_critical_section(); |
| |
| do { |
| /* |
| * Make sure UUID doesn't already exist. |
| */ |
| if (task_find_task_by_uuid(&dtask->props.uuid) != NULL) { |
| err = ERR_ALREADY_EXISTS; |
| break; |
| } |
| |
| /* Current task index now reserved for this task */ |
| dtask->task_index = task_next_index++; |
| |
| list_add_tail(&task_list, &dtask->node); |
| } while (0); |
| |
| exit_critical_section(); |
| |
| if (err != NO_ERROR) |
| goto exit; |
| |
| memset(*task_p, 0, sizeof(task_t)); |
| |
| /* swap *task_p to point to the registered object */ |
| *task_p = dtask; |
| |
| if (0) { |
| exit: |
| if (dtask) |
| free(dtask); |
| } |
| return err; |
| } |
| |
| u_int task_get_count() |
| { |
| return task_next_index; |
| } |
| |
| u_int task_get_active_count() |
| { |
| task_t *task = NULL; |
| int count = 0; |
| |
| list_for_every_entry(&task_list, task, task_t, node) { |
| if ((task->task_state == TASK_STATE_ACTIVE) || |
| (task->task_state == TASK_STATE_BLOCKED)) { |
| count++; |
| } |
| } |
| return count; |
| } |
| |
| const struct list_node *task_get_task_list() |
| { |
| return &task_list; |
| } |
| |
| status_t task_register_ta_events(task_t *task, uint32_t events_mask) |
| { |
| if (task == NULL) |
| return ERR_INVALID_ARGS; |
| |
| if ((events_mask & TA_EVENT_MASK) != events_mask) { |
| dprintf(INFO, "%s: bad events_mask: %d\n", __func__, |
| events_mask); |
| return ERR_INVALID_ARGS; |
| } |
| |
| task->ta_events_mask = events_mask; |
| |
| return NO_ERROR; |
| } |
| |
| /* |
| * Signal all tasks that have requested the supplied event. |
| * void *arg is supplied by the caller and is the struct te_command |
| * message to send to the task when it is scheduled. |
| */ |
| status_t task_signal_ta_event(enum ta_event_id event, void *arg) |
| { |
| task_t *tmp; |
| task_t *task; |
| thread_t *thread; |
| |
| list_for_every_entry_safe(&task_list, task, tmp, task_t, node) { |
| /* Only schedule tasks that have requested callbacks. */ |
| if ((task->ta_events_mask & (1 << event)) == 0) |
| continue; |
| |
| thread = list_peek_head_type(&task->thread_node, |
| thread_t, task_node); |
| |
| if (thread->state != THREAD_BLOCKED) { |
| dprintf(CRITICAL, |
| "%s:%d thread in bad state (%d) for event\n", |
| __func__, __LINE__, thread->state); |
| continue; |
| } |
| |
| enter_critical_section(); |
| |
| thread->arg = arg; |
| |
| /* kickoff thread */ |
| thread_unblock_from_wait_queue(thread, false, NO_ERROR); |
| thread_yield(); |
| exit_critical_section(); |
| } |
| |
| return NO_ERROR; |
| } |