| /* |
| * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining |
| * a copy of this software and associated documentation files |
| * (the "Software"), to deal in the Software without restriction, |
| * including without limitation the rights to use, copy, modify, merge, |
| * publish, distribute, sublicense, and/or sell copies of the Software, |
| * and to permit persons to whom the Software is furnished to do so, |
| * subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be |
| * included in all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| */ |
| #include <assert.h> |
| #include <sys/types.h> |
| #include <stdlib.h> |
| #include <string.h> |
| #include <arch.h> |
| #include <err.h> |
| #include <arch/arm.h> |
| #include <arch/arm/mmu.h> |
| #include <kernel/task.h> |
| #include <kernel/thread.h> |
| #include <kernel/boot_params.h> |
| #include <platform.h> |
| #include <platform/platform_p.h> |
| |
| #define TASK_ARGS_SPACE (4 * 16) |
| |
| static void arch_task_set_usermode(thread_t *thread) |
| { |
| struct context_switch_frame *frame; |
| task_t *task = thread->arch.task; |
| |
| frame = (struct context_switch_frame *)thread->arch.sp; |
| frame->psr = MODE_USR; |
| frame->sp = task->sp; |
| } |
| |
| void arch_task_set_context(thread_t *thread) |
| { |
| arch_thread_set_context(thread); |
| arch_task_set_usermode(thread); |
| } |
| |
| static void arch_task_map(task_t *taskp, task_map_t *mptr) |
| { |
| vaddr_t vaddr; |
| paddr_t paddr; |
| u_int pg; |
| |
| if ((mptr->vaddr + mptr->size) > MAX_TASK_SIZE) { |
| dprintf(CRITICAL, "task address space exceeds max: 0x%lx\n", |
| MAX_TASK_SIZE); |
| halt(); |
| } |
| |
| ASSERT(!(mptr->size & PAGE_MASK)); |
| |
| for (pg = 0; pg < (mptr->size / PAGE_SIZE); pg++) { |
| if (mptr->flags & TM_PHYS_CONTIG) |
| paddr = mptr->u_phys.contig + (pg * PAGE_SIZE); |
| else |
| paddr = mptr->u_phys.pagelist[pg]; |
| |
| ASSERT(!(paddr & PAGE_MASK)); |
| vaddr = mptr->vaddr + (pg * PAGE_SIZE); |
| arm_mmu_map_upage(taskp, vaddr, paddr, mptr); |
| } |
| } |
| |
| static void arch_task_unmap(task_t *taskp, task_map_t *mptr) |
| { |
| addr_t vaddr; |
| u_int pg; |
| |
| for (pg = 0; pg < (mptr->size / PAGE_SIZE); pg++) { |
| vaddr = mptr->vaddr + (pg * PAGE_SIZE); |
| arm_mmu_unmap_upage(taskp, vaddr); |
| } |
| } |
| |
| task_map_t *arch_task_map_memory(task_t *task, uint64_t addr, u_int size, u_int flags) |
| { |
| task_map_t *mptr; |
| u_int npages, align, offset; |
| |
| if (flags & TM_KERN_SEC_VA) { |
| if (flags & (TM_NS_MEM | TM_NS_MEM_PRIV)) |
| return NULL; |
| } |
| |
| mptr = calloc(1, sizeof(task_map_t)); |
| if (mptr == NULL) |
| return NULL; |
| |
| offset = addr & PAGE_MASK; |
| size = ROUNDUP(offset + size, PAGE_SIZE); |
| npages = size / PAGE_SIZE; |
| |
| /* non-secure mapping may require a stricter vaddr alignment */ |
| align = (flags & TM_NS_MEM) ? NS_VIRT_ADDR_ALIGN : ALIGN_4KB; |
| |
| mptr->vaddr = task_find_address_space(task, size, align); |
| if (!mptr->vaddr) { |
| free (mptr); |
| return NULL; |
| } |
| |
| mptr->size = size; |
| mptr->flags = flags; |
| mptr->id = 0; |
| mptr->offset = offset; |
| |
| task_add_mapping(task, mptr); |
| |
| if (npages == 1) { |
| /* coverity[out-of-bounds: FALSE] |
| * Only single page translations here (mptr->size == PAGE_SIZE), |
| * and only the first element of pagelist parameter gets |
| * referenced. |
| */ |
| arm_mmu_translate_range(addr, &mptr->u_phys.contig, mptr); |
| |
| if (mptr->map_attrs == NULL) { |
| task_delete_mapping (task, mptr); |
| free (mptr); |
| return NULL; |
| } |
| mptr->flags |= TM_PHYS_CONTIG; |
| } else { |
| paddr_t *pagelist; |
| u_int i; |
| |
| /* allocate pagelist, as buffer may not be contiguous */ |
| pagelist = malloc(npages * sizeof(paddr_t)); |
| if (pagelist == NULL) { |
| task_delete_mapping(task, mptr); |
| free (mptr); |
| return NULL; |
| } |
| |
| arm_mmu_translate_range(addr, pagelist, mptr); |
| |
| if (mptr->map_attrs == NULL) { |
| free (pagelist); |
| task_delete_mapping (task, mptr); |
| free (mptr); |
| return NULL; |
| } |
| |
| /* check if the pages ended up being contiguous */ |
| for (i = 1; i < npages; i++) { |
| if ((pagelist[i-1] + PAGE_SIZE) != pagelist[i]) |
| break; |
| } |
| if (i < npages) { |
| /* not contiguous */ |
| mptr->u_phys.pagelist = pagelist; |
| } else { |
| /* turns out it is (don't need pagelist anymore) */ |
| mptr->flags |= TM_PHYS_CONTIG; |
| mptr->u_phys.contig = pagelist[0]; |
| free(pagelist); |
| } |
| } |
| |
| arch_task_map(task, mptr); |
| return mptr; |
| } |
| |
| task_map_t *arch_task_setup_mmio(task_t *task, u_int id, addr_t paddr, u_int size) |
| { |
| task_map_t *mptr; |
| |
| dprintf(SPEW, "%s: id 0x%x paddr 0x%x size 0x%x\n", |
| __func__, id, (u_int)paddr, size); |
| |
| mptr = calloc(1, sizeof(task_map_t)); |
| if (mptr == NULL) |
| return NULL; |
| |
| size = ROUNDUP(size, PAGE_SIZE); |
| |
| mptr->vaddr = task_find_address_space(task, size, ALIGN_4KB); |
| if (!mptr->vaddr) { |
| free (mptr); |
| return NULL; |
| } |
| |
| mptr->size = size; |
| mptr->flags = TM_IO | TM_UW | TM_PHYS_CONTIG; |
| mptr->u_phys.contig = ROUNDDOWN(paddr, PAGE_SIZE); |
| mptr->offset = (paddr & PAGE_MASK); |
| |
| mptr->id = id; |
| mptr->map_attrs = NULL; |
| |
| task_add_mapping(task, mptr); |
| |
| return mptr; |
| } |
| |
| void arch_task_unmap_memory(task_t *task, task_map_t *mptr) |
| { |
| ASSERT(task); |
| ASSERT(mptr); |
| |
| /* if non-contig, free the pagelist */ |
| if ((mptr->flags & TM_PHYS_CONTIG) == 0) |
| free(mptr->u_phys.pagelist); |
| else { |
| /* contig memory: need to free it if allocated with memalign() */ |
| if (mptr->flags & TM_PHYS_ALLOCATED) |
| free_memalign((void *)physical_to_virtual(mptr->u_phys.contig)); |
| } |
| |
| /* If the task pages are not yet mapped (e.g. error while loading a task) |
| * the page table does not yet exist. |
| */ |
| if (task->page_table) { |
| arch_task_unmap(task, mptr); |
| ASSERT(mptr->map_attrs); |
| } |
| task_delete_mapping(task, mptr); |
| if (mptr->map_attrs) |
| free(mptr->map_attrs); |
| free(mptr); |
| } |
| |
| /* Arm architecture ASID (Address Space IDentifier) range defines the max |
| * allowed context_id value to be 255 |
| */ |
| #define ARCH_ASID_MAX_VALUE 254 |
| #define ARCH_ASID_INVALID_VALUE (ARCH_ASID_MAX_VALUE + 1) |
| |
| /* bit masks for context id table values |
| */ |
| #define ARCH_ASID_IN_USE 0x01 /* context id value in use */ |
| |
| /* Table of reserved context id (ASID) values |
| * Indices 0 or 255 are not used as context_id |
| */ |
| static char context_id_table[255]; |
| |
| static void arch_task_release_context_id(uint32_t cid) |
| { |
| if (cid < 1 || cid > ARCH_ASID_MAX_VALUE) |
| return; |
| |
| enter_critical_section(); |
| |
| context_id_table[cid] &= ~ARCH_ASID_IN_USE; |
| |
| exit_critical_section(); |
| } |
| |
| /* |
| * Searches unused CONTEXT_ID values after the Arm TTBR register ASID field |
| * would wrap. |
| * |
| * ASID value 255 is used for error handling in the border case of having 254 |
| * active tasks and a new task load is attempted. Zero is not used for tasks. |
| */ |
| static u_int arch_task_get_context_id() |
| { |
| u_int asid = 0; |
| static u_int next_id = 1; |
| uint32_t i = 0; |
| |
| while (asid == 0) { |
| |
| enter_critical_section(); |
| |
| for (i = next_id; i <= ARCH_ASID_MAX_VALUE; i++) { |
| if ((context_id_table[i] & ARCH_ASID_IN_USE) == 0) { |
| asid = i; |
| context_id_table[i] |= ARCH_ASID_IN_USE; |
| break; |
| } |
| } |
| |
| exit_critical_section(); |
| |
| if (asid == 0) { |
| /* if did not scan from 1 redo loop once */ |
| if (next_id != 1) { |
| next_id = 1; |
| continue; |
| } |
| |
| /* |
| * Currently unsupported to have more |
| * than ARCH_ASID_MAX_VALUE secure |
| * tasks active at the same time. |
| * |
| * This value will get trapped by the |
| * caller and task loading fails. |
| * |
| * Need to unload at least one tasks before |
| * a new task can be loaded. |
| * |
| * The failed partially loaded task will get |
| * auto-unloaded by the error handling. |
| */ |
| dprintf(CRITICAL, "Too many secure tasks -- out of context id values\n"); |
| asid = ARCH_ASID_INVALID_VALUE; |
| } |
| } |
| |
| if (asid >= ARCH_ASID_MAX_VALUE) |
| next_id = 1; |
| else |
| next_id = asid + 1; |
| |
| return asid; |
| } |
| |
| bool arch_task_init(thread_t *thread, task_t *task) |
| { |
| task_map_t *mptr; |
| uint32_t stack_top_off; |
| vaddr_t arg_base; |
| uint32_t *args; |
| uint32_t argc = 0; |
| |
| /* setup/clean user stack (reduced as libc expects args at sp) */ |
| stack_top_off = task->stack_map->size - TASK_ARGS_SPACE; |
| task->sp = task->stack_map->vaddr + stack_top_off; |
| memset((void *)physical_to_virtual(task->stack_map->u_phys.contig), 0, task->stack_map->size); |
| |
| platform_clean_invalidate_cache_range(physical_to_virtual(task->stack_map->u_phys.contig), |
| task->stack_map->size); |
| |
| /* initialize libc expected args */ |
| arg_base = physical_to_virtual(task->stack_map->u_phys.contig) + stack_top_off; |
| args = (uint32_t *)arg_base; |
| |
| /* return value = argc */ |
| argc = get_boot_args(task, args + 1); |
| |
| *args++ = argc; /* argc */ |
| args += argc; /* increment past end of argv[] */ |
| *args++ = 0x0; /* envp */ |
| |
| /* ensure all args fit */ |
| ASSERT(args <= (uint32_t *)(arg_base + TASK_ARGS_SPACE)); |
| |
| #if ARM_WITH_NEON |
| /* alloc per-thread (and NS world) vfp context */ |
| thread->arch.fpctx = calloc(1, sizeof(fpctx_t)); |
| if (!ns_vfp_hw_context) |
| ns_vfp_hw_context = calloc(1, sizeof(fpctx_t)); |
| #endif |
| |
| thread->arch.task = task; |
| arch_task_set_usermode(thread); |
| list_add_tail(&task->thread_node, &thread->task_node); |
| |
| task->context_id = arch_task_get_context_id(); |
| if (task->context_id == ARCH_ASID_INVALID_VALUE) |
| return false; |
| |
| /* create pagetable entries for boot time mappings */ |
| list_for_every_entry(&task->map_list, mptr, task_map_t, node) { |
| if (arm_mmu_set_attrs_task_init(mptr) != NO_ERROR) |
| return false; |
| arch_task_map(task, mptr); |
| } |
| |
| return true; |
| } |
| |
| void arch_task_killed(task_t *task) |
| { |
| if (task) { |
| arch_task_release_context_id(task->context_id); |
| } |
| } |
| |
| void arch_task_thread_killed(thread_t *thread) |
| { |
| if (thread) { |
| #if ARM_WITH_NEON |
| /* Need to free the NEON context when thread dies */ |
| if (thread->arch.fpctx) { |
| free(thread->arch.fpctx); |
| thread->arch.fpctx = NULL; |
| } |
| #endif |
| } |
| } |