tlk: 6/19 update
[3rdparty/ote_partner/tlk.git] / kernel / task.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <debug.h>
24 #include <sys/types.h>
25 #include <compiler.h>
26 #include <assert.h>
27 #include <string.h>
28 #include <malloc.h>
29 #include <err.h>
30 #include <stdlib.h>
31 #include <arch.h>
32 #include <arch/arm.h>
33 #include <arch/arm/mmu.h>
34 #include <kernel/task.h>
35 #include <kernel/thread.h>
36 #include <kernel/elf.h>
37 #include <platform.h>
38 #include <platform/platform_p.h>
39 #include <kernel/task_load.h>
40
41 /*! page aligned area for storing static task headers before heap is initialized */
42 #define TASK_LIST_CARVEOUT_PAGES 1
43
44 /*! max number of tasks embedded in the TLK task image */
45 #define MAX_STATIC_TASK_COUNT ((TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE) / sizeof(task_t))
46
47 /* task list and used index */
48 static u_int task_next_index;   /* next task index [ 0..N ] */
49 static task_t *task_blist;      /* boot time fixed size task list */
50
51 /* task_blist is converted to task_list after heap is initialized */
52 static struct list_node task_list;
53
54 static char *task_image_start;
55 static char *task_image_end;
56 static u_int task_image_size;
57
58 extern u_int __tasks_start;
59 extern u_int __tasks_end;
60
61 extern int _end;        /* end of binary &_end (heap starts after this) */
62 extern int _heap_end;   /* heap ends here, adjusted by carve-outs below */
63
64 /* memory carved off from the top (before heap_init) */
65 #define carveout_taskmem        _heap_end
66
67 static status_t task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32_Shdr *shdr)
68 {
69         status_t err = NO_ERROR;
70         OTE_MANIFEST  *manifest;
71         u_int *config_blob, config_blob_size;
72         u_int i;
73
74         if (shdr->sh_size < offsetof(OTE_MANIFEST, config_options)) {
75                 err = ERR_NOT_VALID;
76                 goto exit;
77         }
78
79         /* init default config options before parsing manifest */
80         taskp->props.min_heap_size = 5 * PAGE_SIZE;
81         taskp->props.min_stack_size = DEFAULT_STACK_SIZE;
82
83         manifest = (OTE_MANIFEST *)(task_image_addr + shdr->sh_offset);
84
85         /*
86          * Informative name field may be zero filled and only non-zero names are used.
87          * Task loading may also override this field value.
88          */
89         memcpy(&taskp->task_name[0], &manifest->name[0], sizeof(taskp->task_name));
90
91         /*
92          * Copy TA specific config data (optional field, define semantics per task).
93          * E.g. could hold SHA1 digest of something you wish to load to the task
94          * at runtime.
95          */
96         memcpy(&taskp->task_private_data[0], &manifest->private_data[0],
97                sizeof(taskp->task_private_data));
98
99         memcpy(&taskp->props.uuid, &manifest->uuid, sizeof(te_service_id_t));
100
101         task_print_uuid(SPEW, "task load uuid = ", taskp);
102
103         config_blob = (u_int *)((char *)manifest + offsetof(OTE_MANIFEST, config_options));
104         config_blob_size = (shdr->sh_size - offsetof(OTE_MANIFEST, config_options));
105
106         taskp->props.config_entry_cnt = config_blob_size / sizeof(u_int);
107
108         /* if no config options we're done */
109         if (taskp->props.config_entry_cnt != 0) {
110
111                 /* save off configuration blob start so it can be accessed later */
112                 taskp->props.config_blob = config_blob;
113
114                 /*
115                  * Step thru configuration blob.
116                  *
117                  * Save off some configuration data while we are here but
118                  * defer processing of other data until it is needed later.
119                  */
120                 for (i = 0; i < taskp->props.config_entry_cnt; i++) {
121                         switch (config_blob[i]) {
122                         case OTE_CONFIG_KEY_MIN_STACK_SIZE:
123                                 /* MIN_STACK_SIZE takes 1 data value */
124                                 if ((taskp->props.config_entry_cnt - i) <= 1) {
125                                         err = ERR_NOT_VALID;
126                                         goto exit;
127                                 }
128                                 taskp->props.min_stack_size =
129                                         ROUNDUP(config_blob[++i], 4096);
130                                 if (taskp->props.min_stack_size <= 0) {
131                                         err = ERR_NOT_VALID;
132                                         goto exit;
133                                 }
134                                 break;
135                         case OTE_CONFIG_KEY_MIN_HEAP_SIZE:
136                                 /* MIN_HEAP_SIZE takes 1 data value */
137                                 if ((taskp->props.config_entry_cnt - i) <= 1) {
138                                         err = ERR_NOT_VALID;
139                                         goto exit;
140                                 }
141                                 taskp->props.min_heap_size =
142                                         ROUNDUP(config_blob[++i], 4096);
143                                 if (taskp->props.min_heap_size <= 0) {
144                                         err = ERR_NOT_VALID;
145                                         goto exit;
146                                 }
147                                 break;
148                         case OTE_CONFIG_KEY_MAP_MEM:
149                                 /* MAP_MEM takes 3 data values */
150                                 if ((taskp->props.config_entry_cnt - i) <= 3) {
151                                         err = ERR_NOT_VALID;
152                                         goto exit;
153                                 }
154                                 taskp->props.map_io_mem_cnt++;
155                                 i += 3;
156                                 break;
157                         case OTE_CONFIG_KEY_RESTRICT_ACCESS:
158                                 /* Set clients who are restricted access.  */
159                                 if ((taskp->props.config_entry_cnt - i) <= 1) {
160                                         err = ERR_NOT_VALID;
161                                         goto exit;
162                                 }
163                                 taskp->props.restrict_access = config_blob[++i];
164                                 break;
165                         case OTE_CONFIG_KEY_AUTHORIZE:
166                                 /* tasks which are authorized to perform restricted operations */
167                                 if ((taskp->props.config_entry_cnt - i) <= 1) {
168                                         err = ERR_NOT_VALID;
169                                         goto exit;
170                                 }
171                                 taskp->props.authorizations = config_blob[++i];
172                                 break;
173                         case OTE_CONFIG_KEY_TASK_ISTATE:
174                                 /* task initial state attributes */
175                                 if ((taskp->props.config_entry_cnt - i) <= 1) {
176                                         err = ERR_NOT_VALID;
177                                         goto exit;
178                                 }
179                                 taskp->props.initial_state = config_blob[++i];
180                                 break;
181                         default:
182                                 dprintf(CRITICAL,
183                                         "%s: unknown OTE_CONFIG_KEY_VALUE: %d\n",
184                                         __func__, config_blob[i]);
185                                 err = ERR_NOT_VALID;
186                                 goto exit;
187                         }
188                 }
189         }
190
191         if (0) {
192         exit:
193                 if (err == NO_ERROR) {
194                         err = ERR_NOT_VALID;
195                 }
196         }
197         return err;
198 }
199
200 static void task_setup_mmio(task_t *taskp)
201 {
202         u_int i;
203         u_int id, offset, size;
204
205         /* step thru configuration blob looking for I/O mapping requests */
206         for (i = 0; i < taskp->props.config_entry_cnt; i++) {
207                 if (taskp->props.config_blob[i] == OTE_CONFIG_KEY_MAP_MEM) {
208                         /* found one; setup mapping to io range */
209                         id = taskp->props.config_blob[++i];
210                         offset = taskp->props.config_blob[++i];
211                         size = taskp->props.config_blob[++i];
212
213                         arch_task_setup_mmio(taskp, id, offset, size);
214                 } else {
215                         /* all other config options take 1 data value */
216                         i++;
217                 }
218         }
219 }
220
221 static void task_set_valloc_start(task_t *taskp)
222 {
223         struct list_node *node;
224         task_map_t *mptr;
225
226         /*
227          * Dynamic allocs start after the static alloc preceding the
228          * stack (expected to be called before dynamic allocs begin).
229          */
230         node = &taskp->stack_map->node;
231         taskp->valloc_list = list_prev(node, node);
232
233         mptr = containerof(taskp->valloc_list, task_map_t, node);
234         taskp->valloc_start = mptr->vaddr + mptr->size;
235         taskp->valloc_end = taskp->stack_map->vaddr;
236 }
237
238 addr_t task_find_address_space(task_t *taskp, u_int size, u_int align)
239 {
240         addr_t astart, aend;
241         task_map_t *mptr;
242
243         astart = ROUNDUP(taskp->valloc_start, align);
244         aend = astart + size;
245
246         /* find first fit */
247         list_for_every_entry(taskp->valloc_list, mptr, task_map_t, node) {
248                 if (aend < mptr->vaddr)
249                         break;  /* fits before mptr alloc */
250                 if (mptr->vaddr == taskp->valloc_end) {
251                         /* hit end without finding space */
252                         dprintf(CRITICAL, "failed to find task address space\n");
253                         return 0;
254                 }
255                 astart = ROUNDUP((mptr->vaddr + mptr->size), align);
256                 aend = astart + size;
257         }
258         ASSERT(!(astart & (align - 1)));
259         return astart;
260 }
261
262 void task_add_mapping(task_t *taskp, task_map_t *new_mptr)
263 {
264         task_map_t *mptr;
265
266         ASSERT(taskp);
267         ASSERT(new_mptr);
268         ASSERT(new_mptr->vaddr && new_mptr->size);
269         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
270                 if (mptr->vaddr > new_mptr->vaddr) {
271                         ASSERT((new_mptr->vaddr + new_mptr->size) <= mptr->vaddr);
272                         list_add_before(&mptr->node, &new_mptr->node);
273                         return;
274                 }
275         }
276         list_add_tail(&taskp->map_list, &new_mptr->node);
277 }
278
279 void task_delete_mapping(task_t *taskp, task_map_t *mptr)
280 {
281         list_delete(&mptr->node);
282 }
283
284 task_map_t *task_find_mapping(task_t *taskp, addr_t vaddr, u_int size)
285 {
286         task_map_t *mptr;
287
288         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
289                 if ((mptr->vaddr <= vaddr) &&
290                     ((mptr->vaddr + mptr->size) >= (vaddr + size))) {
291                         return mptr;
292                 }
293         }
294
295         dprintf(CRITICAL,
296                 "task %d: vaddr 0x%08x for 0x%08x bytes not mapped\n",
297                 taskp->task_index, (u_int)vaddr, size);
298         return NULL;
299 }
300
301 task_map_t *task_find_mapping_by_id(task_t *taskp, u_int id)
302 {
303         task_map_t *mptr;
304
305         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
306                 if (mptr->id == id)
307                         return mptr;
308         }
309
310         return NULL;
311 }
312
313 status_t task_get_physaddr(task_t *taskp, addr_t vaddr, paddr_t *paddr)
314 {
315         task_map_t *mptr;
316
317         mptr = task_find_mapping(taskp, vaddr, 0);
318         if (mptr == NULL)
319                 return ERR_INVALID_ARGS;
320
321         if (mptr->flags & TM_PHYS_CONTIG) {
322                 *paddr = mptr->u_phys.contig + (vaddr - mptr->vaddr);
323         } else {
324                 uint32_t pageno = (vaddr - mptr->vaddr) / PAGE_SIZE;
325                 *paddr = mptr->u_phys.pagelist[pageno] + (vaddr & PAGE_MASK);
326         }
327         return NO_ERROR;
328 }
329
330 bool task_valid_address(vaddr_t addr, u_int size)
331 {
332         task_t *taskp;
333         task_map_t *mptr;
334
335         taskp = current_thread->arch.task;
336         mptr = task_find_mapping(taskp, addr, size);
337         return !!mptr;
338 }
339
340 static status_t task_init_stack(task_t *taskp)
341 {
342         task_map_t *mptr;
343
344         mptr = malloc(sizeof(task_map_t));
345         if (mptr == NULL)
346                 return ERR_NO_MEMORY;
347
348         mptr->size  = taskp->props.min_stack_size;
349         mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size);
350         if (mptr->u_phys.contig == NULL) {
351                 free(mptr);
352                 return ERR_NO_MEMORY;
353         }
354
355         mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig);
356
357         mptr->vaddr = TASK_STACK_ADDR - mptr->size;
358         mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG);
359         mptr->offset = 0;
360         mptr->map_attrs = NULL;
361
362         taskp->stack_map = mptr;
363         task_add_mapping(taskp, mptr);
364
365         dprintf(SPEW,
366                 "task %d: stack vaddr 0x%08lx, paddr 0x%08llx, msize 0x%08x\n",
367                 taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig, mptr->size);
368
369         return NO_ERROR;
370 }
371
372 static status_t task_init_brk(u_int task_image_addr, task_t *taskp, Elf32_Ehdr *ehdr)
373 {
374         task_map_t *mptr;
375         vaddr_t vbrk;
376         uint32_t brklen;
377
378         /* find mapping in which brk resides */
379         mptr = task_find_mapping(taskp, taskp->start_brk, 0);
380         if (mptr == NULL) {
381                 dprintf(CRITICAL, "task failed to find brk in mappings\n");
382                 halt();
383         }
384
385         /* what's leftover in the mapping goes to brk */
386         taskp->curr_brk = taskp->start_brk;
387         taskp->end_brk  = taskp->start_brk +
388                 ((mptr->vaddr + mptr->size) - taskp->start_brk);
389
390         /* mmap expects MAP_ANONYMOUS to be zeros */
391         vbrk = physical_to_virtual(mptr->u_phys.contig) +
392                 (taskp->start_brk - mptr->vaddr);
393         brklen = taskp->end_brk - taskp->curr_brk;
394
395         memset((void *)vbrk, 0, brklen);
396
397         platform_clean_invalidate_cache_range(vbrk, brklen);
398
399         /* increase user mode heap (if not enough remains) */
400         if ((taskp->end_brk - taskp->curr_brk) < taskp->props.min_heap_size) {
401                 mptr = malloc(sizeof(task_map_t));
402                 if (mptr == NULL)
403                         return ERR_NO_MEMORY;
404
405                 mptr->size  = taskp->props.min_heap_size;
406                 mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size);
407                 if (mptr->u_phys.contig == NULL) {
408                         free(mptr);
409                         return ERR_NO_MEMORY;
410                 }
411
412                 /* mmap expects MAP_ANONYMOUS to be zeros */
413                 memset((void *)(addr_t)mptr->u_phys.contig, 0, mptr->size);
414
415                 /* mptr->paddr still virtual at this point */
416                 platform_clean_invalidate_cache_range(mptr->u_phys.contig, mptr->size);
417
418                 mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig);
419
420                 mptr->vaddr = taskp->end_brk;
421                 mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG);
422                 mptr->offset = 0;
423                 mptr->map_attrs = NULL;
424
425                 task_add_mapping(taskp, mptr);
426
427                 taskp->end_brk += mptr->size;
428         }
429
430         dprintf(SPEW,
431                 "task %d: brk vaddr 0x%08lx, msize 0x%08x\n",
432                 taskp->task_index, taskp->start_brk,
433                 (u_int)(taskp->end_brk - taskp->start_brk));
434
435         return NO_ERROR;
436 }
437
438 static status_t task_alloc_address_map(task_t *taskp)
439 {
440         Elf32_Ehdr *elf_hdr;
441         Elf32_Phdr *prg_hdr;
442         u_int i;
443         u_int task_image_addr;
444         task_map_t *mptr;
445         status_t ret;
446
447         elf_hdr = taskp->elf_hdr;
448         task_image_addr = (u_int)elf_hdr;
449
450         taskp->start_code = ~0;
451
452         /* alloc user stack */
453         ret = task_init_stack(taskp);
454         if (ret != NO_ERROR) {
455                 dprintf(CRITICAL, "failed to load task: stack creation error\n");
456                 return ret;
457         }
458
459         /* create mappings for PT_LOAD sections */
460         for (i = 0; i < elf_hdr->e_phnum; i++) {
461                 addr_t first, last, last_mem;
462
463                 prg_hdr = (Elf32_Phdr *)((u_int)elf_hdr + elf_hdr->e_phoff +
464                                 (i * sizeof(Elf32_Phdr)));
465
466                 if (prg_hdr->p_type != PT_LOAD)
467                         continue;
468
469                 /* skip PT_LOAD if it's below task start or above .bss */
470                 if ((prg_hdr->p_vaddr < TASK_START_ADDR) ||
471                     (prg_hdr->p_vaddr >= taskp->end_bss))
472                         continue;
473
474                 /*
475                  * We're expecting to be able to execute the task in-place,
476                  * meaning its PT_LOAD segments, should be page-aligned.
477                  */
478                 /* XXX TODO: convert this assert to error return later */
479                 ASSERT(!(prg_hdr->p_vaddr & PAGE_MASK) &&
480                        !(prg_hdr->p_offset & PAGE_MASK));
481
482                 mptr = malloc(sizeof(task_map_t));
483                 if (mptr == NULL)
484                         return ERR_NO_MEMORY;
485
486                 mptr->size = (prg_hdr->p_memsz + PAGE_MASK) & ~PAGE_MASK;
487                 mptr->u_phys.contig = virtual_to_physical(task_image_addr) + prg_hdr->p_offset;
488                 mptr->vaddr = prg_hdr->p_vaddr;
489                 mptr->flags = (prg_hdr->p_flags & PF_FLAG_MASK) | TM_PHYS_CONTIG;
490                 mptr->offset = 0;
491                 mptr->map_attrs = NULL;
492
493                 task_add_mapping(taskp, mptr);
494
495                 /* check for overlap into user stack range */
496                 if ((TASK_STACK_ADDR - taskp->stack_map->size) < (mptr->vaddr + mptr->size)) {
497                         dprintf(CRITICAL,
498                                 "failed to load task: (overlaps user stack 0x%lx)\n",
499                                  TASK_STACK_ADDR - taskp->stack_map->size);
500                         return ERR_TOO_BIG;
501                 }
502
503                 dprintf(SPEW,
504                         "task %d: load vaddr 0x%08lx, paddr 0x%08llx"
505                         " rsize 0x%08x, msize 0x%08x, flags 0x%08x\n",
506                         taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig,
507                         mptr->size, prg_hdr->p_memsz, mptr->flags);
508
509                 /* start of code/data */
510                 first = prg_hdr->p_vaddr;
511                 if (first < taskp->start_code)
512                         taskp->start_code = first;
513                 if (taskp->start_data < first)
514                         taskp->start_data = first;
515
516                 /* end of code/data */
517                 last = prg_hdr->p_vaddr + prg_hdr->p_filesz;
518                 if ((prg_hdr->p_flags & PF_X) && taskp->end_code < last)
519                         taskp->end_code = last;
520                 if (taskp->end_data < last)
521                         taskp->end_data = last;
522
523                 /* end of brk */
524                 last_mem = prg_hdr->p_vaddr + prg_hdr->p_memsz;
525                 if (last_mem > taskp->start_brk)
526                         taskp->start_brk = last_mem;
527         }
528
529         ret = task_init_brk(task_image_addr, taskp, elf_hdr);
530         if (ret != NO_ERROR) {
531                 dprintf(CRITICAL, "failed to load task: task heap creation error\n");
532                 return ret;
533         }
534
535         dprintf(SPEW, "task %d: code: start 0x%08lx end 0x%08lx\n",
536                 taskp->task_index, taskp->start_code, taskp->end_code);
537         dprintf(SPEW, "task %d: data: start 0x%08lx end 0x%08lx\n",
538                 taskp->task_index, taskp->start_data, taskp->end_data);
539         dprintf(SPEW, "task %d: bss:                end 0x%08lx\n",
540                 taskp->task_index, taskp->end_bss);
541         dprintf(SPEW, "task %d: brk:  start 0x%08lx end 0x%08lx\n",
542                 taskp->task_index, taskp->start_brk, taskp->end_brk);
543
544         taskp->entry = elf_hdr->e_entry;
545         dprintf(SPEW, "task %d: entry 0x%08lx\n", taskp->task_index, taskp->entry);
546
547         return NO_ERROR;
548 }
549
550 /*
551  * Align the next task to a page boundary, by copying what remains
552  * in the task image to the aligned next task start. This should be
553  * called after we're done with the section headers as the previous
554  * tasks .shstrtab section will be clobbered.
555  *
556  * Note: task_image_size remains the carved out part in LK to exit
557  * the bootloader loop, so still increment by max_extent. Because of
558  * the copy down to an aligned next task addr, task_image_size is
559  * more than what we're actually using.
560  */
561 static char *task_align_next_task(Elf32_Ehdr *elf_hdr, Elf32_Shdr *pad_hdr)
562 {
563         char *next_task_align_start;
564         char *next_task_fsize_start;
565         char *task_image_addr;
566         u_int copy_size;
567         u_int max_extent;
568
569         ASSERT(pad_hdr);
570         ASSERT(elf_hdr);
571
572         task_image_addr = (char *)elf_hdr;
573         max_extent = (elf_hdr->e_shoff + (elf_hdr->e_shnum * elf_hdr->e_shentsize)) - 1;
574         ASSERT((task_image_addr + max_extent + 1) <= task_image_end);
575
576         next_task_align_start = task_image_addr + pad_hdr->sh_offset + pad_hdr->sh_size;
577         next_task_fsize_start = task_image_addr + max_extent + 1;
578         ASSERT(next_task_align_start <= next_task_fsize_start);
579
580         copy_size = task_image_end - next_task_fsize_start;
581         if (copy_size) {
582                 /*
583                  * Copy remaining image bytes to aligned start for the next
584                  * (and subsequent) tasks. Also decrement task_image_end, so
585                  * we copy less each time we realign for the next task.
586                  */
587                 memcpy(next_task_align_start, next_task_fsize_start, copy_size);
588
589                 platform_clean_invalidate_cache_range((addr_t)next_task_align_start,
590                                 copy_size);
591
592                 task_image_end -= (next_task_fsize_start - next_task_align_start);
593         }
594
595         task_image_size -= (max_extent + 1);
596         return next_task_align_start;
597 }
598
599 status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp,
600                       Elf32_Shdr **bss_pad_shdr_p, task_type_t task_type)
601 {
602         status_t    err = NO_ERROR;
603         Elf32_Ehdr *ehdr = NULL;
604         Elf32_Shdr *shdr = NULL;
605         Elf32_Shdr *bss_shdr = NULL;
606         Elf32_Shdr *bss_pad_shdr = NULL;
607         Elf32_Shdr *manifest_shdr = NULL;
608         char       *shstbl = NULL;
609         vaddr_t     bss_addr = 0;
610         u_int       i = 0;
611         u_int       task_max_extent = 0;
612
613         if (!task_addr || !taskp || task_size == 0) {
614                 err = ERR_INVALID_ARGS;
615                 goto exit;
616         }
617
618         /* For the preloaded tasks: the size includes this task and all
619          * other tasks that follow in the same image.
620          */
621         dprintf(SPEW, "%s task: start %p size %d (0x%08x)\n",
622                 __func__, task_addr, task_size, task_size);
623
624         ehdr = (Elf32_Ehdr *) task_addr;
625         if (strncmp((char *)ehdr->e_ident, ELFMAG, SELFMAG)) {
626                 dprintf(CRITICAL, "%s: ELF header not found\n",
627                         __func__);
628                 err = ERR_NOT_VALID;
629                 goto exit;
630         }
631
632         if (bss_pad_shdr_p)
633                 *bss_pad_shdr_p = NULL;
634
635         shdr   = (Elf32_Shdr *)((u_int)ehdr + ehdr->e_shoff);
636         shstbl = (char *)((u_int)ehdr + shdr[ehdr->e_shstrndx].sh_offset);
637
638         bss_shdr = bss_pad_shdr = manifest_shdr = NULL;
639
640         /* calculate task end */
641         for (i = 0; i < ehdr->e_shnum; i++) {
642                 u_int extent;
643
644                 if (shdr[i].sh_type == SHT_NULL)
645                         continue;
646 #if 0
647                 dprintf(CRITICAL, "task: sect %d, off 0x%08x, size 0x%08x, name %s\n",
648                         i, shdr[i].sh_offset, shdr[i].sh_size, shstbl + shdr[i].sh_name);
649 #endif
650
651                 /* track bss and manifest sections */
652                 if (!strcmp((shstbl + shdr[i].sh_name), ".bss")) {
653                         bss_shdr = shdr + i;
654                         taskp->end_bss = bss_shdr->sh_addr + bss_shdr->sh_size;
655                 }
656                 else if (!strcmp((shstbl + shdr[i].sh_name), ".bss-pad")) {
657                         bss_pad_shdr = shdr + i;
658                 }
659                 else if (!strcmp((shstbl + shdr[i].sh_name),
660                                  ".ote.manifest")) {
661                         manifest_shdr = shdr + i;
662                 }
663
664                 if (shdr[i].sh_type != SHT_NOBITS) {
665                         extent = shdr[i].sh_offset + shdr[i].sh_size;
666                         if (task_max_extent < extent)
667                                 task_max_extent = extent;
668                 }
669         }
670
671         /*
672          * We need these sections.
673          * Manifest is handled later.
674          */
675         if (!bss_shdr || !bss_pad_shdr) {
676                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
677                 err = ERR_NOT_VALID;
678                 goto exit;
679         }
680
681         if (bss_pad_shdr_p)
682                 *bss_pad_shdr_p = bss_pad_shdr;
683
684         if ((bss_shdr->sh_offset + bss_shdr->sh_size) > task_max_extent) {
685                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
686                 err = ERR_NOT_VALID;
687                 goto exit;
688         }
689
690         if (ROUNDUP(task_max_extent, 4) != ehdr->e_shoff) {
691                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
692                 err = ERR_NOT_VALID;
693                 goto exit;
694         }
695
696         /* clear .bss */
697         bss_addr = (vaddr_t)(task_addr + bss_shdr->sh_offset);
698
699         memset((uint8_t *)bss_addr, 0, bss_shdr->sh_size);
700
701         platform_clean_invalidate_cache_range(bss_addr, bss_shdr->sh_size);
702
703         /* let the caller decide if it can handle binaries without manifest */
704         if (manifest_shdr == NULL) {
705                 taskp->props.manifest_exists = 0;
706         } else {
707                 taskp->props.manifest_exists = 1;
708                 err = task_load_config_options((u_int)task_addr, taskp, manifest_shdr);
709                 if (err != NO_ERROR) {
710                         dprintf(CRITICAL, "Invalid task manifest: 0x%x\n", err);
711                         goto exit;
712                 }
713         }
714
715         taskp->elf_hdr = ehdr;
716         taskp->task_size = task_size;
717
718         taskp->task_type  = task_type;
719         taskp->task_state = TASK_STATE_INIT;
720
721         if (0) {
722         exit:
723                 if (err == NO_ERROR)
724                         err = ERR_GENERIC;
725         }
726
727         return err;
728 }
729
730 /*
731  * Carveout memory for task headers.
732  * Called before heap_init.
733  *
734  * The task headers are converted to a list after the heap is initialized.
735  */
736 static void task_mem_init()
737 {
738         if (task_image_size != 0) {
739                 carveout_taskmem &= ~PAGE_MASK;
740
741                 /* list of tasks (static and loaded) */
742                 carveout_taskmem -= (TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
743                 task_blist = (task_t *)carveout_taskmem;
744
745                 task_load_config((vaddr_t)&_end,
746                                  (vaddr_t *)&carveout_taskmem);
747         }
748
749         ASSERT(!(carveout_taskmem & PAGE_MASK));
750 }
751
752 /*
753  * Look in the kernel's ELF header for task sections and
754  * carveout memory for their LOAD-able sections. This is
755  * called before heap_init.
756  *
757  * This sets up the built-in tasks, they are started later with task_init.
758  */
759 static void task_bootloader()
760 {
761         char       *task_image_addr  = NULL;
762         task_t     *taskp            = NULL;
763         status_t    err              = NO_ERROR;
764
765         dprintf(SPEW, "static tasks: start %p size 0x%08x end %p\n",
766                 task_image_start, task_image_size, task_image_end);
767
768         task_image_addr = task_image_start;
769
770         task_mem_init();
771
772         memset(task_blist, 0, TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
773
774         taskp = task_blist;
775
776         while (task_image_size > 0) {
777                 u_int i = 0;
778                 Elf32_Shdr *bss_pad_shdr = NULL;
779
780                 if ((task_next_index + 1) > MAX_STATIC_TASK_COUNT) {
781                         dprintf(CRITICAL, "%s: Too many (%d) tasks in image\n",
782                                 __func__, task_next_index+1);
783                         halt();
784                 }
785
786                 err = task_prepare(task_image_addr, task_image_size,
787                                    taskp, &bss_pad_shdr, TASK_TYPE_STATIC);
788
789                 /* statically loaded tasks must run or the system halts */
790                 if (err != NO_ERROR) {
791                         dprintf(CRITICAL, "%s: task#%u preparation failed (%d)\n",
792                                __func__, task_next_index, err);
793                        halt();
794                 }
795
796                 /* Because the size passed to task_prepare above can be larger than the
797                  * actual task size in memory (it is larger unless this is the
798                  * last task of the image) => fixup the task size here.
799                  *
800                  * BSS-PAD section is the last accepted PT_LOAD elf section of the secure task,
801                  * so task actual memory size can be calculated as below.
802                  */
803                 taskp->task_size = bss_pad_shdr->sh_offset + bss_pad_shdr->sh_size;
804
805                 /* static tasks must contain a manifest section */
806                 if (!taskp->props.manifest_exists) {
807                         dprintf(CRITICAL, "%s: Invalid task#%u in image, no manifest\n",
808                                 __func__, task_next_index);
809                         halt();
810                 }
811
812                 /*
813                  * Make sure UUID doesn't already exist.  Note that
814                  * this search won't include the task we are processing
815                  * here because task_next_index hasn't been incremented yet.
816                  *
817                  * task_find_task_by_uuid() can not yet be used.
818                  */
819                 for (i = 0; i < task_next_index; i++) {
820                         task_t *ts = &task_blist[i];
821                         if (!memcmp(&ts->props.uuid, &taskp->props.uuid, sizeof(te_service_id_t))) {
822                                 dprintf(CRITICAL, "%s: task#%u duplicate UUID found!\n",
823                                         __func__, task_next_index);
824                                 halt();
825                         }
826                 }
827
828                 /*
829                  * The next tasks in the image are moved down to the next free
830                  * page aligned address after the current task.
831                  */
832                 task_image_addr = task_align_next_task(taskp->elf_hdr, bss_pad_shdr);
833
834                 taskp->task_index = task_next_index++;
835                 taskp++;
836         }
837 }
838
839 void task_early_init()
840 {
841         task_image_start = (char *)&__tasks_start;
842         task_image_end = (char *)&__tasks_end;
843         task_image_size = (task_image_end - task_image_start);
844
845         ASSERT(!((uint32_t)task_image_start & PAGE_MASK));
846
847         task_bootloader();
848 }
849
850 status_t task_init_one_task(task_t *task)
851 {
852         status_t err = NO_ERROR;
853         char name[32];
854         thread_t *thread;
855         const char *state_str = "(unknown)";
856
857         if (!task || task->task_index >= task_next_index) {
858                 err = ERR_INVALID_ARGS;
859                 goto exit;
860         }
861
862         if (task->task_state != TASK_STATE_INIT) {
863                 dprintf(CRITICAL, "%s: Task not startable in state %d\n",
864                         __func__, task->task_state);
865                 err = ERR_TASK_GENERIC;
866                 goto exit;
867         }
868
869         list_initialize(&task->map_list);
870         list_initialize(&task->thread_node);
871
872         err = task_alloc_address_map(task);
873         if (err != NO_ERROR) {
874                 dprintf(CRITICAL, "%s: failed to load address map\n",
875                         __func__);
876                 goto exit;
877         }
878
879         /* setup dynamic alloc range */
880         task_set_valloc_start(task);
881
882         /* reserve mmio va ranges here */
883         if (task->props.map_io_mem_cnt > 0) {
884                 task_setup_mmio(task);
885
886                 /* reset valloc start */
887                 task_set_valloc_start(task);
888         }
889
890         /* force zero terminated task context derived thread names */
891         if (task->task_name[0] != '\000') {
892                 snprintf(name, sizeof(name) - 1, "%s_%u_T0", task->task_name,
893                          task->task_index);
894         } else {
895                 snprintf(name, sizeof(name) - 1, "task_%u_T0", task->task_index);
896         }
897         name[sizeof(name) - 1] = '\000';
898
899         thread = thread_create(name, (thread_start_routine)(task->entry), 0, LOW_PRIORITY, 4096);
900         if (thread == NULL) {
901                 dprintf(CRITICAL, "%s: allocate user thread failed\n",
902                         __func__);
903                 err = ERR_GENERIC;
904                 goto exit;
905         }
906
907         if (arch_task_init(thread, task) == false) {
908                 dprintf(CRITICAL, "%s: arch thread/task init failed\n",
909                         __func__);
910                 err = ERR_GENERIC;
911                 goto exit;
912         }
913
914         if (task->props.initial_state & OTE_MANIFEST_TASK_ISTATE_BLOCKED) {
915                 task->task_state = TASK_STATE_BLOCKED;
916                 state_str = "blocked";
917         } else {
918                 task->task_state = TASK_STATE_ACTIVE;
919                 state_str = "active";
920         }
921
922         /* start it */
923         if (task->entry) {
924                 dprintf(INFO, "starting %s task#%u%s\n", state_str, task->task_index,
925                         task_get_name_str(task, " (", ")", name, sizeof(name)));
926
927                 thread_resume(thread);
928         }
929
930         if (0) {
931         exit:
932                 if (err == NO_ERROR)
933                         err = ERR_GENERIC;
934         }
935         return err;
936 }
937
938 /*
939  * Start static tasks initialized by task_early_init()
940  *
941  * Convert the boot time task_blist into run-time task_list.
942  */
943 void task_init()
944 {
945         status_t err = NO_ERROR;
946         task_t *task;
947         u_int i;
948
949         task_image_start = NULL;
950         task_image_end   = NULL;
951         task_image_size  = 0;
952
953         list_initialize(&task_list);
954
955         /* convert the boot time task_blist into a run-time task_list */
956
957         for (i = 0, task = task_blist; i < task_next_index; i++, task++) {
958                 task_t *taskp = malloc(sizeof(task_t));
959                 if (!taskp) {
960                         dprintf(CRITICAL, "%s: out of memory -- halting\n", __func__);
961                         halt();
962                 }
963                 memcpy(taskp, task, sizeof(task_t));
964
965                 err = task_init_one_task(taskp);
966                 if (err != NO_ERROR) {
967                         dprintf(CRITICAL, "%s: static task start failed %d -- halting\n",
968                                 __func__, err);
969                         halt();
970                 }
971
972                 list_add_tail(&task_list, &taskp->node);
973         }
974
975         /* boot time task header pages are no longer used */
976         memset(task_blist, 0, TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
977         task_blist = NULL;
978
979         task_unload_init();
980         task_load_init();
981 }
982
983 task_t *task_find_task_by_uuid(te_service_id_t *uuid)
984 {
985         task_t *task = NULL;
986
987         /* find task for this uuid */
988         if (uuid) {
989                 list_for_every_entry(&task_list, task, task_t, node) {
990                         if (task->task_state != TASK_STATE_UNKNOWN) {
991                                 if (!memcmp(&task->props.uuid, uuid, sizeof(te_service_id_t))) {
992                                         return task;
993                                 }
994                         }
995                 }
996         }
997         return NULL;
998 }
999
1000 task_t *task_find_task_by_index(uint32_t index)
1001 {
1002         task_t *task = NULL;
1003
1004         if (index >= task_next_index)
1005                 return NULL;
1006
1007         list_for_every_entry(&task_list, task, task_t, node) {
1008                 if (task->task_state != TASK_STATE_UNKNOWN) {
1009                         if (task->task_index == index) {
1010                                 return task;
1011                         }
1012                 }
1013         }
1014         return NULL;
1015 }
1016
1017 const char *task_get_name_str(const task_t *task, const char *prefix, const char *suffix,
1018                               char *buf, uint32_t buflen)
1019 {
1020         uint32_t pslen = 0;
1021
1022         if (prefix)
1023                 pslen += strlen(prefix);
1024         else
1025                 prefix = "";
1026
1027         if (suffix)
1028                 pslen += strlen(suffix);
1029         else
1030                 suffix = "";
1031
1032         /* OTE_TASK_NAME_MAX_LENGTH includes the NUL character at end of task name */
1033         if (!task || !buf || buflen < (OTE_TASK_NAME_MAX_LENGTH + pslen) ||
1034             !task->task_name[0])
1035                 return "";
1036
1037         snprintf(buf, buflen, "%s%s%s", prefix, task->task_name, suffix);
1038         return buf;
1039 }
1040
1041 void
1042 task_print_uuid(uint32_t level, const char *prefix, const task_t *taskp)
1043 {
1044         if (taskp) {
1045                 char tp_name[OTE_TASK_NAME_MAX_LENGTH+3];
1046                 const te_service_id_t *uuid = &taskp->props.uuid;
1047
1048                 dprintf(level, "%s%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x%s\n",
1049                         (prefix    ? prefix     : ""),
1050                         uuid->time_low,
1051                         uuid->time_mid,
1052                         uuid->time_hi_and_version,
1053                         uuid->clock_seq_and_node[0],    /* clock_seq_hi_and_reserved */
1054                         uuid->clock_seq_and_node[1],    /* clock_seq_low */
1055                         uuid->clock_seq_and_node[2],
1056                         uuid->clock_seq_and_node[3],
1057                         uuid->clock_seq_and_node[4],
1058                         uuid->clock_seq_and_node[5],
1059                         uuid->clock_seq_and_node[6],
1060                         uuid->clock_seq_and_node[7],
1061                         task_get_name_str(taskp, " (", ")", tp_name, sizeof(tp_name)));
1062         }
1063 }
1064
1065 /*
1066  * This is only used by task loading code but placed here because it modifies
1067  * task_next_index and the task_list when a new task is loaded.
1068  */
1069 status_t task_register(task_t **task_p)
1070 {
1071         status_t err  = NO_ERROR;
1072         task_t *dtask = NULL;
1073
1074         if (!task_p || !*task_p) {
1075                 err = ERR_INVALID_ARGS;
1076                 goto exit;
1077         }
1078
1079         dtask = malloc(sizeof(task_t));
1080         if (!dtask) {
1081                 err = ERR_NO_MEMORY;
1082                 dprintf(CRITICAL, "error allocating task header: 0x%x\n", err);
1083                 goto exit;
1084         }
1085
1086         memcpy(dtask, *task_p, sizeof(task_t));
1087
1088         enter_critical_section();
1089
1090         do {
1091                 /*
1092                  * Make sure UUID doesn't already exist.
1093                  */
1094                 if (task_find_task_by_uuid(&dtask->props.uuid) != NULL) {
1095                         err = ERR_ALREADY_EXISTS;
1096                         break;
1097                 }
1098
1099                 /* Current task index now reserved for this task */
1100                 dtask->task_index = task_next_index++;
1101
1102                 list_add_tail(&task_list, &dtask->node);
1103         } while (0);
1104
1105         exit_critical_section();
1106
1107         if (err != NO_ERROR)
1108                 goto exit;
1109
1110         memset(*task_p, 0, sizeof(task_t));
1111
1112         /* swap *task_p to point to the registered object */
1113         *task_p = dtask;
1114
1115         if (0) {
1116         exit:
1117                 if (dtask)
1118                         free(dtask);
1119         }
1120         return err;
1121 }
1122
1123 u_int task_get_count()
1124 {
1125         return task_next_index;
1126 }
1127
1128 u_int task_get_active_count()
1129 {
1130         task_t *task = NULL;
1131         int count = 0;
1132
1133         list_for_every_entry(&task_list, task, task_t, node) {
1134                 if ((task->task_state != TASK_STATE_UNKNOWN) &&
1135                     (task->task_state != TASK_STATE_TERMINATED) &&
1136                     (task->task_state != TASK_STATE_INIT)) {
1137                         count++;
1138                 }
1139         }
1140         return count;
1141 }