First version
[3rdparty/ote_partner/tlk.git] / kernel / task.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <debug.h>
24 #include <sys/types.h>
25 #include <compiler.h>
26 #include <assert.h>
27 #include <string.h>
28 #include <malloc.h>
29 #include <err.h>
30 #include <stdlib.h>
31 #include <arch.h>
32 #include <arch/arm.h>
33 #include <arch/arm/mmu.h>
34 #include <kernel/task.h>
35 #include <kernel/thread.h>
36 #include <kernel/elf.h>
37 #include <platform.h>
38 #include <platform/platform_p.h>
39
40 /* page aligned area for storing task headers */
41 #define TASK_LIST_CARVEOUT_PAGES 1
42
43 #define MAX_TASK_COUNT ((TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE) / sizeof(task_t))
44
45 /* task list and count */
46 static u_int task_count;
47 static task_t *task_list;
48
49 static char *task_image_start;
50 static char *task_image_end;
51 static u_int task_image_size;
52
53 extern u_int __tasks_start;
54 extern u_int __tasks_end;
55
56 extern int _heap_end;
57
58 /* memory carved off from the top (before heap_init) */
59 #define carveout_taskmem        _heap_end
60
61 static void task_load_config_options(u_int task_image_addr, task_t *taskp, Elf32_Shdr *shdr)
62 {
63         OTE_MANIFEST  *manifest;
64         u_int *config_blob, config_blob_size;
65         u_int i;
66
67         ASSERT(shdr->sh_size >= offsetof(OTE_MANIFEST, config_options));
68
69         /* init default config options before parsing manifest */
70         taskp->props.min_heap_size = 5 * PAGE_SIZE;
71         taskp->props.min_stack_size = DEFAULT_STACK_SIZE;
72
73         manifest = (OTE_MANIFEST *)(task_image_addr + shdr->sh_offset);
74
75         memcpy(&taskp->props.uuid, &manifest->uuid, sizeof(te_service_id_t));
76
77         task_print_uuid(SPEW, "task load uuid = ", taskp);
78
79         config_blob = (u_int *)((char *)manifest + offsetof(OTE_MANIFEST, config_options));
80         config_blob_size = (shdr->sh_size - offsetof(OTE_MANIFEST, config_options));
81
82         taskp->props.config_entry_cnt = config_blob_size / sizeof (u_int);
83
84         /* if no config options we're done */
85         if (taskp->props.config_entry_cnt == 0) {
86                 return;
87         }
88
89         /* save off configuration blob start so it can be accessed later */
90         taskp->props.config_blob = config_blob;
91
92         /*
93          * Step thru configuration blob.
94          *
95          * Save off some configuration data while we are here but
96          * defer processing of other data until it is needed later.
97          */
98         for (i = 0; i < taskp->props.config_entry_cnt; i++) {
99                 switch (config_blob[i]) {
100                 case OTE_CONFIG_KEY_MIN_STACK_SIZE:
101                         /* MIN_STACK_SIZE takes 1 data value */
102                         ASSERT((taskp->props.config_entry_cnt - i) > 1);
103                         taskp->props.min_stack_size =
104                                 ROUNDUP(config_blob[++i], 4096);
105                         ASSERT(taskp->props.min_stack_size > 0);
106                         break;
107                 case OTE_CONFIG_KEY_MIN_HEAP_SIZE:
108                         /* MIN_HEAP_SIZE takes 1 data value */
109                         ASSERT((taskp->props.config_entry_cnt - i) > 1);
110                         taskp->props.min_heap_size =
111                                 ROUNDUP(config_blob[++i], 4096);
112                         ASSERT(taskp->props.min_heap_size > 0);
113                         break;
114                 case OTE_CONFIG_KEY_MAP_MEM:
115                         /* MAP_MEM takes 3 data values */
116                         ASSERT((taskp->props.config_entry_cnt - i) > 3);
117                         taskp->props.map_io_mem_cnt++;
118                         i += 3;
119                         break;
120                 case OTE_CONFIG_KEY_RESTRICT_ACCESS:
121                         /* Set clients who are restricted access.  */
122                         taskp->props.restrict_access = config_blob[++i];
123                         break;
124                 case OTE_CONFIG_KEY_INSTALL:
125                         /* tasks which are allowed to install other tasks. */
126                         ASSERT((taskp->props.config_entry_cnt - i) > 1);
127                         taskp->props.install_priv = config_blob[++i];
128                         break;
129                 case OTE_CONFIG_KEY_IMMUTABLE:
130                         /* prevents any manifest data overrides by installer. */
131                         ASSERT((taskp->props.config_entry_cnt - i) > 1);
132                         taskp->props.immutable = config_blob[++i];
133                         break;
134                 default:
135                         dprintf(CRITICAL,
136                                 "%s: unknown OTE_CONFIG_KEY_VALUE: %d\n",
137                                 __func__, config_blob[i]);
138                         ASSERT(0);
139                         i++;
140                         break;
141                 }
142         }
143 }
144
145 static void task_setup_mmio(task_t *taskp)
146 {
147         u_int i;
148         u_int id, offset, size;
149
150         /* step thru configuration blob looking for I/O mapping requests */
151         for (i = 0; i < taskp->props.config_entry_cnt; i++) {
152                 if (taskp->props.config_blob[i] == OTE_CONFIG_KEY_MAP_MEM) {
153                         /* found one; setup mapping to io range */
154                         id = taskp->props.config_blob[++i];
155                         offset = taskp->props.config_blob[++i];
156                         size = taskp->props.config_blob[++i];
157
158                         arch_task_setup_mmio(taskp, id, offset, size);
159                 } else {
160                         /* all other config options take 1 data value */
161                         i++;
162                 }
163         }
164 }
165
166 static void task_set_valloc_start(task_t *taskp)
167 {
168         struct list_node *node;
169         task_map_t *mptr;
170
171         /*
172          * Dynamic allocs start after the static alloc preceding the
173          * stack (expected to be called before dynamic allocs begin).
174          */
175         node = &taskp->stack_map->node;
176         taskp->valloc_list = list_prev(node, node);
177
178         mptr = containerof(taskp->valloc_list, task_map_t, node);
179         taskp->valloc_start = mptr->vaddr + mptr->size;
180         taskp->valloc_end = taskp->stack_map->vaddr;
181 }
182
183 addr_t task_find_address_space(task_t *taskp, u_int size, u_int align)
184 {
185         addr_t astart, aend;
186         task_map_t *mptr;
187
188         astart = ROUNDUP(taskp->valloc_start, align);
189         aend = astart + size;
190
191         /* find first fit */
192         list_for_every_entry(taskp->valloc_list, mptr, task_map_t, node) {
193                 if (aend < mptr->vaddr)
194                         break;  /* fits before mptr alloc */
195                 if (mptr->vaddr == taskp->valloc_end) {
196                         /* hit end without finding space */
197                         dprintf(CRITICAL, "failed to find task address space\n");
198                         return 0;
199                 }
200                 astart = ROUNDUP((mptr->vaddr + mptr->size), align);
201                 aend = astart + size;
202         }
203         ASSERT(!(astart & (align - 1)));
204         return astart;
205 }
206
207 void task_add_mapping(task_t *taskp, task_map_t *new_mptr)
208 {
209         task_map_t *mptr;
210
211         ASSERT(new_mptr->vaddr && new_mptr->size);
212         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
213                 if (mptr->vaddr > new_mptr->vaddr) {
214                         ASSERT((new_mptr->vaddr + new_mptr->size) <= mptr->vaddr);
215                         list_add_before(&mptr->node, &new_mptr->node);
216                         return;
217                 }
218         }
219         list_add_tail(&taskp->map_list, &new_mptr->node);
220 }
221
222 void task_delete_mapping(task_t *taskp, task_map_t *mptr)
223 {
224         list_delete(&mptr->node);
225 }
226
227 task_map_t *task_find_mapping(task_t *taskp, addr_t vaddr, u_int size)
228 {
229         task_map_t *mptr;
230
231         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
232                 if ((mptr->vaddr <= vaddr) &&
233                     ((mptr->vaddr + mptr->size) >= (vaddr + size))) {
234                         return mptr;
235                 }
236         }
237
238         dprintf(CRITICAL,
239                 "task %d: vaddr 0x%08x for 0x%08x bytes not mapped\n",
240                 taskp->task_index, (u_int)vaddr, size);
241         return NULL;
242 }
243
244 task_map_t *task_find_mapping_by_id(task_t *taskp, u_int id)
245 {
246         task_map_t *mptr;
247
248         list_for_every_entry(&taskp->map_list, mptr, task_map_t, node) {
249                 if (mptr->id == id)
250                         return mptr;
251         }
252
253         return NULL;
254 }
255
256 status_t task_get_physaddr(task_t *taskp, addr_t vaddr, paddr_t *paddr)
257 {
258         task_map_t *mptr;
259
260         mptr = task_find_mapping(taskp, vaddr, 0);
261         if (mptr == NULL)
262                 return ERR_INVALID_ARGS;
263
264         if (mptr->flags & TM_PHYS_CONTIG) {
265                 *paddr = mptr->u_phys.contig + (vaddr - mptr->vaddr);
266         } else {
267                 uint32_t pageno = (vaddr - mptr->vaddr) / PAGE_SIZE;
268                 *paddr = mptr->u_phys.pagelist[pageno] + (vaddr & PAGE_MASK);
269         }
270         return NO_ERROR;
271 }
272
273 bool task_valid_address(vaddr_t addr, u_int size)
274 {
275         task_t *taskp;
276         task_map_t *mptr;
277
278         taskp = current_thread->arch.task;
279         mptr = task_find_mapping(taskp, addr, size);
280         return !!mptr;
281 }
282
283 static status_t task_init_stack(task_t *taskp)
284 {
285         task_map_t *mptr;
286
287         mptr = malloc(sizeof(task_map_t));
288         if (mptr == NULL)
289                 return ERR_NO_MEMORY;
290
291         mptr->size  = taskp->props.min_stack_size;
292         mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size);
293         if (mptr->u_phys.contig == NULL) {
294                 free (mptr);
295                 return ERR_NO_MEMORY;
296         }
297
298         mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig);
299
300         mptr->vaddr = TASK_STACK_ADDR - mptr->size;
301         mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG);
302         mptr->offset = 0;
303
304         taskp->stack_map = mptr;
305         task_add_mapping(taskp, mptr);
306
307         dprintf(SPEW,
308                 "task %d: stack vaddr 0x%08lx, paddr 0x%08llx, msize 0x%08x\n",
309                 taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig, mptr->size);
310
311         return NO_ERROR;
312 }
313
314 static status_t task_init_brk(u_int task_image_addr, task_t *taskp, Elf32_Ehdr *ehdr)
315 {
316         task_map_t *mptr;
317         vaddr_t vbrk;
318         uint32_t brklen;
319
320         /* find mapping in which brk resides */
321         mptr = task_find_mapping(taskp, taskp->start_brk, 0);
322         if (mptr == NULL) {
323                 dprintf(CRITICAL, "task failed to find brk in mappings\n");
324                 halt();
325         }
326
327         /* what's leftover in the mapping goes to brk */
328         taskp->curr_brk = taskp->start_brk;
329         taskp->end_brk  = taskp->start_brk +
330                 ((mptr->vaddr + mptr->size) - taskp->start_brk);
331
332         /* mmap expects MAP_ANONYMOUS to be zeros */
333         vbrk = physical_to_virtual(mptr->u_phys.contig) +
334                 (taskp->start_brk - mptr->vaddr);
335         brklen = taskp->end_brk - taskp->curr_brk;
336
337         memset((void *)vbrk, 0, brklen);
338
339         platform_clean_invalidate_cache_range(vbrk, brklen);
340
341         /* increase user mode heap (if not enough remains) */
342         if ((taskp->end_brk - taskp->curr_brk) < taskp->props.min_heap_size) {
343                 mptr = malloc(sizeof(task_map_t));
344                 if (mptr == NULL)
345                         return ERR_NO_MEMORY;
346
347                 mptr->size  = taskp->props.min_heap_size;
348                 mptr->u_phys.contig = (addr_t) memalign(PAGE_SIZE, mptr->size);
349                 if (mptr->u_phys.contig == NULL)
350                         return ERR_NO_MEMORY;
351
352                 /* mmap expects MAP_ANONYMOUS to be zeros */
353                 memset((void *)(addr_t)mptr->u_phys.contig, 0, mptr->size);
354
355                 /* mptr->paddr still virtual at this point */
356                 platform_clean_invalidate_cache_range(mptr->u_phys.contig, mptr->size);
357
358                 mptr->u_phys.contig = virtual_to_physical(mptr->u_phys.contig);
359
360                 mptr->vaddr = taskp->end_brk;
361                 mptr->flags = (TM_UW | TM_UR | TM_PHYS_CONTIG);
362                 mptr->offset = 0;
363                 task_add_mapping(taskp, mptr);
364
365                 taskp->end_brk += mptr->size;
366         }
367
368         dprintf(SPEW,
369                 "task %d: brk vaddr 0x%08lx, msize 0x%08x\n",
370                 taskp->task_index, taskp->start_brk,
371                 (u_int)(taskp->end_brk - taskp->start_brk));
372
373         return NO_ERROR;
374 }
375
376 static status_t task_alloc_address_map(task_t *taskp)
377 {
378         Elf32_Ehdr *elf_hdr;
379         Elf32_Phdr *prg_hdr;
380         u_int i;
381         u_int task_image_addr;
382         task_map_t *mptr;
383         status_t ret;
384
385         elf_hdr = taskp->elf_hdr;
386         task_image_addr = (u_int)elf_hdr;
387
388         taskp->start_code = ~0;
389
390         /* alloc user stack */
391         ret = task_init_stack(taskp);
392         if (ret != NO_ERROR) {
393                 dprintf(CRITICAL, "failed to load task: stack creation error\n");
394                 return ret;
395         }
396
397         /* create mappings for PT_LOAD sections */
398         for (i = 0; i < elf_hdr->e_phnum; i++) {
399                 addr_t first, last, last_mem;
400
401                 prg_hdr = (Elf32_Phdr *)((u_int)elf_hdr + elf_hdr->e_phoff +
402                                 (i * sizeof(Elf32_Phdr)));
403
404                 if (prg_hdr->p_type != PT_LOAD)
405                         continue;
406
407                 /* skip PT_LOAD if it's below task start or above .bss */
408                 if ((prg_hdr->p_vaddr < TASK_START_ADDR) ||
409                     (prg_hdr->p_vaddr >= taskp->end_bss))
410                         continue;
411
412                 /*
413                  * We're expecting to be able to execute the task in-place,
414                  * meaning its PT_LOAD segments, should be page-aligned.
415                  */
416                 ASSERT(!(prg_hdr->p_vaddr & PAGE_MASK) &&
417                        !(prg_hdr->p_offset & PAGE_MASK));
418
419                 mptr = malloc(sizeof(task_map_t));
420                 if (mptr == NULL)
421                         return ERR_NO_MEMORY;
422
423                 mptr->size = (prg_hdr->p_memsz + PAGE_MASK) & ~PAGE_MASK;
424                 mptr->u_phys.contig = virtual_to_physical(task_image_addr) + prg_hdr->p_offset;
425                 mptr->vaddr = prg_hdr->p_vaddr;
426                 mptr->flags = (prg_hdr->p_flags & PF_FLAG_MASK) | TM_PHYS_CONTIG;
427                 mptr->offset = 0;
428
429                 task_add_mapping(taskp, mptr);
430
431                 /* check for overlap into user stack range */
432                 if ((TASK_STACK_ADDR - taskp->stack_map->size) < (mptr->vaddr + mptr->size)) {
433                         dprintf(CRITICAL,
434                                 "failed to load task: (overlaps user stack 0x%lx)\n",
435                                  TASK_STACK_ADDR - taskp->stack_map->size);
436                         return ERR_TOO_BIG;
437                 }
438
439                 dprintf(SPEW,
440                         "task %d: load vaddr 0x%08lx, paddr 0x%08llx"
441                         " rsize 0x%08x, msize 0x%08x, flags 0x%08x\n",
442                         taskp->task_index, mptr->vaddr, (uint64_t)mptr->u_phys.contig,
443                         mptr->size, prg_hdr->p_memsz, mptr->flags);
444
445                 /* start of code/data */
446                 first = prg_hdr->p_vaddr;
447                 if (first < taskp->start_code)
448                         taskp->start_code = first;
449                 if (taskp->start_data < first)
450                         taskp->start_data = first;
451
452                 /* end of code/data */
453                 last = prg_hdr->p_vaddr + prg_hdr->p_filesz;
454                 if ((prg_hdr->p_flags & PF_X) && taskp->end_code < last)
455                         taskp->end_code = last;
456                 if (taskp->end_data < last)
457                         taskp->end_data = last;
458
459                 /* end of brk */
460                 last_mem = prg_hdr->p_vaddr + prg_hdr->p_memsz;
461                 if (last_mem > taskp->start_brk)
462                         taskp->start_brk = last_mem;
463         }
464
465         ret = task_init_brk(task_image_addr, taskp, elf_hdr);
466         if (ret != NO_ERROR) {
467                 dprintf(CRITICAL, "failed to load task: task heap creation error\n");
468                 return ret;
469         }
470
471         dprintf(SPEW, "task %d: code: start 0x%08lx end 0x%08lx\n",
472                 taskp->task_index, taskp->start_code, taskp->end_code);
473         dprintf(SPEW, "task %d: data: start 0x%08lx end 0x%08lx\n",
474                 taskp->task_index, taskp->start_data, taskp->end_data);
475         dprintf(SPEW, "task %d: bss:                end 0x%08lx\n",
476                 taskp->task_index, taskp->end_bss);
477         dprintf(SPEW, "task %d: brk:  start 0x%08lx end 0x%08lx\n",
478                 taskp->task_index, taskp->start_brk, taskp->end_brk);
479
480         taskp->entry = elf_hdr->e_entry;
481         dprintf(SPEW, "task %d: entry 0x%08lx\n", taskp->task_index, taskp->entry);
482
483         return NO_ERROR;
484 }
485
486 /*
487  * Align the next task to a page boundary, by copying what remains
488  * in the task image to the aligned next task start. This should be
489  * called after we're done with the section headers as the previous
490  * tasks .shstrtab section will be clobbered.
491  *
492  * Note: task_image_size remains the carved out part in LK to exit
493  * the bootloader loop, so still increment by max_extent. Because of
494  * the copy down to an aligned next task addr, task_image_size is
495  * more than what we're actually using.
496  */
497 static char *task_align_next_task(Elf32_Ehdr *elf_hdr, Elf32_Shdr *pad_hdr)
498 {
499         char *next_task_align_start;
500         char *next_task_fsize_start;
501         char *task_image_addr;
502         u_int copy_size;
503         u_int max_extent;
504
505         ASSERT(pad_hdr);
506         ASSERT(elf_hdr);
507
508         task_image_addr = (char *)elf_hdr;
509         max_extent = (elf_hdr->e_shoff + (elf_hdr->e_shnum * elf_hdr->e_shentsize)) - 1;
510         ASSERT((task_image_addr + max_extent + 1) <= task_image_end);
511
512         next_task_align_start = task_image_addr + pad_hdr->sh_offset + pad_hdr->sh_size;
513         next_task_fsize_start = task_image_addr + max_extent + 1;
514         ASSERT(next_task_align_start <= next_task_fsize_start);
515
516         copy_size = task_image_end - next_task_fsize_start;
517         if (copy_size) {
518                 /*
519                  * Copy remaining image bytes to aligned start for the next
520                  * (and subsequent) tasks. Also decrement task_image_end, so
521                  * we copy less each time we realign for the next task.
522                  */
523                 memcpy(next_task_align_start, next_task_fsize_start, copy_size);
524
525                 platform_clean_invalidate_cache_range((addr_t)next_task_align_start,
526                                 copy_size);
527
528                 task_image_end -= (next_task_fsize_start - next_task_align_start);
529         }
530
531         task_image_size -= (max_extent + 1);
532         return next_task_align_start;
533 }
534
535 status_t task_prepare(char *task_addr, u_int task_size, task_t *taskp,
536                       Elf32_Shdr **bss_pad_shdr_p)
537 {
538         status_t    err = NO_ERROR;
539         Elf32_Ehdr *ehdr = NULL;
540         Elf32_Shdr *shdr = NULL;
541         Elf32_Shdr *bss_shdr = NULL;
542         Elf32_Shdr *bss_pad_shdr = NULL;
543         Elf32_Shdr *manifest_shdr = NULL;
544         char       *shstbl = NULL;
545         vaddr_t     bss_addr = 0;
546         u_int       i = 0;
547         u_int       task_max_extent = 0;
548
549         if (!task_addr || !taskp || task_size == 0) {
550                 err = ERR_INVALID_ARGS;
551                 goto exit;
552         }
553
554         dprintf(SPEW, "%s task: start 0x%p size %d (0x%08x)\n",
555                 __func__, task_addr, task_size, task_size);
556
557         ehdr = (Elf32_Ehdr *) task_addr;
558         if (strncmp((char *)ehdr->e_ident, ELFMAG, SELFMAG)) {
559                 dprintf(CRITICAL, "%s: ELF header not found\n",
560                         __func__);
561                 err = ERR_NOT_VALID;
562                 goto exit;
563         }
564
565         if (bss_pad_shdr_p)
566                 *bss_pad_shdr_p = NULL;
567
568         shdr   = (Elf32_Shdr *) ((u_int)ehdr + ehdr->e_shoff);
569         shstbl = (char *)((u_int)ehdr + shdr[ehdr->e_shstrndx].sh_offset);
570
571         bss_shdr = bss_pad_shdr = manifest_shdr = NULL;
572
573         /* calculate task end */
574         for (i = 0; i < ehdr->e_shnum; i++) {
575                 u_int extent;
576
577                 if (shdr[i].sh_type == SHT_NULL)
578                         continue;
579 #if 0
580                 dprintf(CRITICAL, "task: sect %d, off 0x%08x, size 0x%08x, name %s\n",
581                         i, shdr[i].sh_offset, shdr[i].sh_size, shstbl + shdr[i].sh_name);
582 #endif
583
584                 /* track bss and manifest sections */
585                 if (!strcmp((shstbl + shdr[i].sh_name), ".bss")) {
586                         bss_shdr = shdr + i;
587                         taskp->end_bss = bss_shdr->sh_addr + bss_shdr->sh_size;
588                 }
589                 else if (!strcmp((shstbl + shdr[i].sh_name), ".bss-pad")) {
590                         bss_pad_shdr = shdr + i;
591                 }
592                 else if (!strcmp((shstbl + shdr[i].sh_name),
593                                  ".ote.manifest")) {
594                         manifest_shdr = shdr + i;
595                 }
596
597                 if (shdr[i].sh_type != SHT_NOBITS) {
598                         extent = shdr[i].sh_offset + shdr[i].sh_size;
599                         if (task_max_extent < extent)
600                                 task_max_extent = extent;
601                 }
602         }
603
604         /*
605          * We need these sections.
606          * Manifest is handled later.
607          */
608         if (!bss_shdr || !bss_pad_shdr) {
609                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
610                 err = ERR_NOT_VALID;
611                 goto exit;
612         }
613
614         if (bss_pad_shdr_p)
615                 *bss_pad_shdr_p = bss_pad_shdr;
616
617         if ((bss_shdr->sh_offset + bss_shdr->sh_size) > task_max_extent) {
618                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
619                 err = ERR_NOT_VALID;
620                 goto exit;
621         }
622
623         if (ROUNDUP(task_max_extent, 4) != ehdr->e_shoff) {
624                 dprintf(CRITICAL, "%s: Invalid task image\n", __func__);
625                 err = ERR_NOT_VALID;
626                 goto exit;
627         }
628
629         /* clear .bss */
630         bss_addr = (vaddr_t)(task_addr + bss_shdr->sh_offset);
631
632         memset((uint8_t *)bss_addr, 0, bss_shdr->sh_size);
633
634         platform_clean_invalidate_cache_range(bss_addr, bss_shdr->sh_size);
635
636         /* let the caller decide if it can handle binaries without manifest */
637         if (manifest_shdr == NULL) {
638                 taskp->props.manifest_exists = 0;
639         } else {
640                 task_load_config_options((u_int)task_addr, taskp, manifest_shdr);
641                 taskp->props.manifest_exists = 1;
642         }
643
644         taskp->elf_hdr = ehdr;
645         taskp->task_state = TASK_STATE_INIT;
646
647         if (0) {
648         exit:
649                 if (err == NO_ERROR)
650                         err = ERR_GENERIC;
651         }
652
653         return err;
654 }
655
656 /*
657  * Carveout memory for task headers.
658  * Called before heap_init.
659  */
660 static void task_mem_init()
661 {
662         if (task_image_size != 0) {
663                 carveout_taskmem &= ~PAGE_MASK;
664
665                 /* list of tasks (static and loaded) */
666                 carveout_taskmem -= (TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
667                 task_list = (task_t *)carveout_taskmem;
668         }
669
670         ASSERT(!(carveout_taskmem & PAGE_MASK));
671 }
672
673 /*
674  * Look in the kernel's ELF header for task sections and
675  * carveout memory for their LOAD-able sections. This is
676  * called before heap_init.
677  *
678  * This sets up the built-in tasks, they are started later with task_init.
679  */
680 static void task_bootloader()
681 {
682         char       *task_image_addr  = NULL;
683         task_t     *taskp            = NULL;
684         status_t    err              = NO_ERROR;
685
686         dprintf(SPEW, "static tasks: start 0x%p size 0x%08x end 0x%p\n",
687                 task_image_start, task_image_size, task_image_end);
688
689         task_image_addr = task_image_start;
690
691         task_mem_init();
692
693         memset(task_list, 0, TASK_LIST_CARVEOUT_PAGES * PAGE_SIZE);
694
695         taskp = task_list;
696
697         while (task_image_size > 0) {
698                 Elf32_Shdr *bss_pad_shdr = NULL;
699
700                 ASSERT((task_count + 1) <= MAX_TASK_COUNT);
701
702                 err = task_prepare(task_image_addr, task_image_size,
703                                    taskp, &bss_pad_shdr);
704
705                 /* statically loaded tasks must run or the system halts */
706                 if (err != NO_ERROR) {
707                         dprintf (CRITICAL, "%s: task %d preparation failed (%d)\n",
708                                  __func__, task_count, err);
709                         halt();
710                 }
711
712                 /* static tasks must contain a manifest section */
713                 if (!taskp->props.manifest_exists) {
714                         dprintf(CRITICAL, "%s: Invalid task image (%d)\n",
715                                 __func__, task_count);
716                         halt();
717                 }
718
719                 /*
720                  * Make sure UUID doesn't already exist.  Note that
721                  * this search won't include the task we are processing
722                  * here because task_count hasn't been incremented yet.
723                  */
724                 if (task_find_task_by_uuid(&taskp->props.uuid) != NULL) {
725                         dprintf(CRITICAL, "%s: task %d duplicate UUID found!\n",
726                                 __func__, task_count);
727                         halt();
728                 }
729
730                 task_image_addr = task_align_next_task(taskp->elf_hdr,
731                                                        bss_pad_shdr);
732
733                 /* task_state is now TASK_STATE_INIT */
734                 taskp->task_index = task_count;
735                 task_count++;
736                 taskp++;
737         }
738 }
739
740 void task_early_init()
741 {
742         task_image_start = (char *)&__tasks_start;
743         task_image_end = (char *)&__tasks_end;
744         task_image_size = (task_image_end - task_image_start);
745
746         ASSERT(!((uint32_t)task_image_start & PAGE_MASK));
747
748         task_bootloader();
749 }
750
751 status_t task_init_one_task(task_t *task, u_int task_type)
752 {
753         status_t err = NO_ERROR;
754         char name[32];
755         thread_t *thread;
756
757         if (!task || task->task_index > task_count) {
758                 err = ERR_INVALID_ARGS;
759                 goto exit;
760         }
761
762         if (task->task_state != TASK_STATE_INIT) {
763                 dprintf(CRITICAL, "%s: Task not startable in state %d\n",
764                         __func__, task->task_state);
765                 err = ERR_TASK_GENERIC;
766                 goto exit;
767         }
768
769         list_initialize(&task->map_list);
770
771         err = task_alloc_address_map(task);
772         if (err != NO_ERROR) {
773                 dprintf(CRITICAL, "%s: failed to load address map\n",
774                         __func__);
775                 goto exit;
776         }
777
778         /* setup dynamic alloc range */
779         task_set_valloc_start(task);
780
781         /* reserve mmio va ranges here */
782         if (task->props.map_io_mem_cnt > 0) {
783                 task_setup_mmio(task);
784
785                 /* reset valloc start */
786                 task_set_valloc_start(task);
787         }
788
789         snprintf(name, sizeof(name) - 1, "task_%u_T0", task->task_index);
790         name[sizeof(name) - 1] = '\000';
791
792         task->task_type = task_type;
793
794         thread = thread_create(name, (thread_start_routine)(task->entry), 0, LOW_PRIORITY, 4096);
795         if (thread == NULL) {
796                 dprintf(CRITICAL, "%s: allocate user thread failed\n",
797                         __func__);
798                 err = ERR_GENERIC;
799                 goto exit;
800         }
801
802         list_initialize(&task->thread_node);
803
804         if (arch_task_init(thread, task) == false) {
805                 dprintf(CRITICAL, "%s: arch thread/task init failed\n",
806                         __func__);
807                 err = ERR_GENERIC;
808                 goto exit;
809         }
810
811         task->task_state = TASK_STATE_ACTIVE;
812
813         /* start it */
814         if (task->entry) {
815                 dprintf(INFO, "starting task#%u\n",
816                         task->task_index);
817                 thread_resume(thread);
818         }
819
820         if (0) {
821         exit:
822                 if (err == NO_ERROR)
823                         err = ERR_GENERIC;
824         }
825         return err;
826 }
827
828 /*
829  * Start static tasks initialized by task_early_init()
830  */
831 void task_init()
832 {
833         status_t err = NO_ERROR;
834         task_t *task;
835         u_int i;
836
837         for (i = 0, task = task_list; i < task_count; i++, task++) {
838                 err = task_init_one_task (task, TASK_TYPE_STATIC);
839                 if (err != NO_ERROR) {
840                         dprintf(CRITICAL, "%s: static task start failed %d -- halting\n",
841                                 __func__, err);
842                         halt ();
843                 }
844         }
845 }
846
847 task_t *task_find_task_by_uuid(te_service_id_t *uuid)
848 {
849         task_t *task;
850         u_int i;
851
852         /* find task for this uuid */
853         for (i = 0, task = task_list; i < task_count; i++, task++) {
854                 if (!memcmp(&task->props.uuid, uuid, sizeof(te_service_id_t))) {
855                         break;
856                 }
857         }
858
859         if (i == task_count)
860                 return NULL;
861
862         return task;
863 }
864
865 void
866 task_print_uuid(uint32_t level, const char *prefix, const task_t *taskp)
867 {
868         if (taskp) {
869                 const te_service_id_t *uuid = &taskp->props.uuid;
870
871                 dprintf(level, "%s%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x\n",
872                          (prefix    ? prefix    : ""),
873                          uuid->time_low,
874                          uuid->time_mid,
875                          uuid->time_hi_and_version,
876                          uuid->clock_seq_and_node[0],   /* clock_seq_hi_and_reserved */
877                          uuid->clock_seq_and_node[1],   /* clock_seq_low */
878                          uuid->clock_seq_and_node[2],
879                          uuid->clock_seq_and_node[3],
880                          uuid->clock_seq_and_node[4],
881                          uuid->clock_seq_and_node[5],
882                          uuid->clock_seq_and_node[6],
883                          uuid->clock_seq_and_node[7]);
884         }
885 }