First version
[3rdparty/ote_partner/tlk.git] / platform / tegra / common / platform.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <errno.h>
26 #include <err.h>
27 #include <debug.h>
28 #include <rand.h>
29 #include <string.h>
30 #include <stdlib.h>
31 #include <lib/heap.h>
32 #include <arch/arm/mmu.h>
33 #include <arch/ops.h>
34 #include <arch/arm.h>
35 #include <platform.h>
36 #include <platform/memmap.h>
37 #include <platform/irqs.h>
38 #include <kernel/task.h>
39 #include <target/debugconfig.h>
40 #include <lib/monitor/monitor_vector.h>
41 #if ARM_WITH_OUTER_CACHE
42 #include <arch/outercache.h>
43 #endif
44 #include <platform/platform_p.h>
45
46 #define MB              (1024 * 1024)
47
48 extern unsigned long boot_secondary_cpu_addr;
49 extern unsigned int coldboot_normal_os;
50 extern unsigned int normal_os_coldboot_fn;
51 extern uint32_t device_uid[4];
52
53 #if !defined(WITH_MONITOR_BIN)
54 extern uint32_t __save_boot_regs[9];
55 extern uint32_t __save_boot_cpsr;
56 extern uint32_t __jumpback_addr;
57 #endif
58
59 uint32_t debug_uart_id = DEFAULT_DEBUG_PORT;
60
61 static uint32_t _jump_to_ns_ss_op;
62 static te_ss_op_t *ss_op_shmem;
63 static te_ss_op_t *ns_ss_op_shmem;
64
65 /* track available kernel VA space */
66 vaddr_t platform_vaspace_ptr;
67 vaddr_t platform_vaspace_end;
68
69 extern unsigned long cbstruct_addr;
70 extern unsigned long cbuf_addr;
71
72 static addr_t user_vector_page;
73
74 void platform_early_init(void)
75 {
76         platform_init_debug_port(debug_uart_id);
77 }
78
79 void platform_idle(void)
80 {
81         struct tz_monitor_frame frame;
82 #if ARM_CPU_CORTEX_A9
83         uint32_t val;
84 #endif
85
86 #if !defined(WITH_MONITOR_BIN)
87         /* mark the entire TLK carveout as secure in the MC */
88         platform_secure_dram_aperture();
89 #endif
90
91         memset(&frame, 0, sizeof(frame));
92
93 #if !defined(WITH_MONITOR_BIN)
94         ASSERT(__jumpback_addr);
95         frame.pc = __jumpback_addr;
96         frame.spsr = __save_boot_cpsr;  /* interrupts disabled, in svc */
97         memcpy(&frame.r[4], __save_boot_regs, sizeof(__save_boot_regs));
98 #endif
99
100 #if ARM_CPU_CORTEX_A9
101         /*
102          * Before going to the NS world for the first time, set ACTLR.FW.
103          * The NSACR.NS_SMP setting granted it the capability to set ACTLR.SMP,
104          * but it doesn't cover NS writing ACTLR.FW.
105          */
106         val = arm_read_actlr();
107         val |= 0x1;
108         arm_write_actlr(val);
109 #endif
110
111         dputs(CRITICAL, "TLK initialization complete. Jumping to non-secure world\n");
112
113         platform_handle_tz_smc(SMC_TOS_INITIAL_NS_RETURN, &frame);
114 }
115
116 void platform_init(void)
117 {
118         task_map_t mptr;
119         arm_phys_attrs_t attrs;
120         uint32_t reg = 0;
121
122         platform_init_cpu();
123
124         /* map read-only user vector page in kernel */
125         arm_mmu_desc_set_default_kernel(&attrs);
126
127         mptr.map_attrs = &attrs;
128         mptr.size = PAGE_SIZE;
129         mptr.flags = TM_UR;
130
131         arm_mmu_map_kpage(0xFFFF0000, virtual_to_physical(user_vector_page), &mptr);
132         arm_invalidate_tlb();
133
134         memset((void *)user_vector_page, 0, mptr.size);
135
136         /* load user vectors (used by libc to get tls) */
137         memcpy((char *)user_vector_page + 0xFE0, arm_get_tls, 0x8);
138         memcpy((char *)user_vector_page + 0xFE8, arm_get_tls, 0x8);
139
140         platform_clean_invalidate_cache_range(user_vector_page, mptr.size);
141
142         platform_setup_keys();
143
144         /*
145          * Set SE_TZRAM_SECURITY sticky bit to respect secure TZRAM accesses.
146          * Note: No need to reprogram it after LP0 exit as it's part of SE's
147          * sticky bits HW LP0 context, so will be restored by the BR.
148          */
149         reg = *(volatile uint32_t *)(TEGRA_SE_BASE + 0x4);
150         reg &= ~(0x1);
151         *(volatile uint32_t *)(TEGRA_SE_BASE + 0x4) = reg;
152 }
153
154 void platform_init_mmu_mappings(void)
155 {
156         extern int _heap_end;
157         extern uint32_t __load_phys_size, __early_heap_allocs;
158
159         /*
160          * End of the kernel's heap is the carveout size, reduced by
161          * any early allocations (e.g. pages used for pagetables).
162          */
163         _heap_end = (VMEMBASE + __load_phys_size) - __early_heap_allocs;
164         _heap_end &= ~PAGE_MASK;
165
166         /* reserve user vector page (before heap is initialized) */
167         _heap_end -= PAGE_SIZE;
168         user_vector_page = _heap_end;
169
170         /* setup available vaspace (starts at end of carveout memory) */
171         platform_vaspace_ptr = VMEMBASE + __load_phys_size;
172         platform_vaspace_end = platform_vaspace_ptr + (VMEMSIZE - __load_phys_size);
173 }
174
175 uint64_t platform_translate_nsaddr(nsaddr_t vaddr, uint32_t type)
176 {
177 #if defined(WITH_MONITOR_BIN)
178         struct tz_monitor_frame frame;
179         frame.r[0] = vaddr;
180         frame.r[1] = type;
181         monitor_send_receive(SMC_TOS_ADDR_TRANSLATE, &frame);
182         return frame.r[0];
183 #else
184         arm_write_v2p(vaddr, type);
185         return arm_read_par();
186 #endif
187 }
188
189 uint32_t platform_get_rand32(void)
190 {
191         return rand();
192 }
193
194 uint32_t platform_get_time_us(void)
195 {
196         return *(volatile uint32_t *)(TEGRA_TMRUS_BASE);
197 }
198
199 void platform_handle_tz_smc(uint32_t smc_type, struct tz_monitor_frame *ns_frame)
200 {
201         struct tz_monitor_frame *smc_frame;
202
203         smc_frame = tz_switch_to_ns(smc_type, ns_frame);
204         while (smc_frame) {
205                 tz_handle_monitor(smc_frame);
206                 smc_frame = tz_switch_to_ns(SMC_TOS_COMPLETION, ns_frame);
207         }
208 }
209
210 status_t platform_ss_register_handler(struct tz_monitor_frame *frame)
211 {
212         /* r[1] -> address to ns fs callback function */
213         _jump_to_ns_ss_op = frame->r[1];
214
215         /* r[2] -> address of shared fs operation buffer */
216         ns_ss_op_shmem = (te_ss_op_t *)(uintptr_t)frame->r[2];
217
218         ss_op_shmem = (te_ss_op_t *)tz_map_shared_mem((addr_t)ns_ss_op_shmem,
219                                                 sizeof(*ss_op_shmem));
220         if (!ss_op_shmem)
221                 return ERR_GENERIC;
222
223         return NO_ERROR;
224 }
225
226 /*
227  * Calculate the physical address of the shared buffer that we have got for
228  * logging from the linux kernel. All references to the shared buffer from
229  * within tlk are made directly to the physical address.
230  */
231 status_t set_log_phy_addr(nsaddr_t _ns_cb_struct_addr)
232 {
233         struct circular_buffer *cbstruct;
234         nsaddr_t cbuf;
235
236         cbstruct = (struct circular_buffer *)
237                         tz_map_shared_mem(_ns_cb_struct_addr, PAGE_SIZE);
238         if (cbstruct == NULL) {
239                 dprintf(CRITICAL, "%s: failed to map cbstruct\n", __func__);
240                 return ERR_NO_MEMORY;
241         }
242
243         cbuf = tz_map_shared_mem(cbstruct->buf, cbstruct->size);
244         if (cbuf == NULL) {
245                 dprintf(CRITICAL, "%s: failed to map cbuf\n", __func__);
246                 return ERR_NO_MEMORY;
247         }
248
249         cbstruct_addr = (unsigned long)cbstruct;
250         cbuf_addr = (unsigned long)cbuf;
251
252         return NO_ERROR;
253 }
254
255 int platform_ss_request_handler(te_storage_request_t *req)
256 {
257         struct tz_monitor_frame frame;
258
259         ss_op_shmem->type = req->type;
260         ss_op_shmem->result = 0;
261
262         dprintf(INFO, "%s: type 0x%x\n", __func__, req->type);
263
264         switch (ss_op_shmem->type) {
265         case OTE_FILE_REQ_TYPE_CREATE:
266                 /* arg #0: dirname buffer */
267                 strncpy(ss_op_shmem->params.f_create.dname,
268                         req->args[0].mem.base,
269                         sizeof(ss_op_shmem->params.f_create.dname));
270                 /* arg #1: filename buffer */
271                 strncpy(ss_op_shmem->params.f_create.fname,
272                         req->args[1].mem.base,
273                         sizeof(ss_op_shmem->params.f_create.fname));
274                 /* arg #2: flags */
275                 ss_op_shmem->params.f_create.flags = req->args[2].val.a;
276
277                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_create);
278                 break;
279         case OTE_FILE_REQ_TYPE_DELETE:
280                 /* arg #0: dirname buffer */
281                 strncpy((char *)ss_op_shmem->params.f_delete.dname,
282                         req->args[0].mem.base,
283                         sizeof(ss_op_shmem->params.f_delete.dname));
284                 /* arg #1: filename buffer */
285                 strncpy((char *)ss_op_shmem->params.f_delete.fname,
286                         req->args[1].mem.base,
287                         sizeof(ss_op_shmem->params.f_delete.fname));
288
289                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_delete);
290                 break;
291         case OTE_FILE_REQ_TYPE_OPEN:
292                 /* arg #0: dirname buffer */
293                 strncpy((char *)ss_op_shmem->params.f_open.dname,
294                         req->args[0].mem.base,
295                         sizeof(ss_op_shmem->params.f_open.dname));
296                 /* arg #1: filename buffer */
297                 strncpy((char *)ss_op_shmem->params.f_open.fname,
298                         req->args[1].mem.base,
299                         sizeof(ss_op_shmem->params.f_open.fname));
300                 /* arg #2: open flags */
301                 ss_op_shmem->params.f_open.flags = req->args[2].val.a;
302
303                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_open);
304                 break;
305         case OTE_FILE_REQ_TYPE_CLOSE:
306                 /* arg #0: file handle */
307                 ss_op_shmem->params.f_close.handle = req->args[0].val.a;
308
309                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_close);
310                 break;
311         case OTE_FILE_REQ_TYPE_READ:
312                 /* validate read buffer */
313                 if (!task_valid_address((vaddr_t)req->args[1].mem.base,
314                                         req->args[1].mem.len)) {
315                         return -EFAULT;
316                 }
317
318                 /* arg #0: file handle */
319                 ss_op_shmem->params.f_close.handle = req->args[0].val.a;
320                 /* arg #1: read buffer attributes */
321                 ss_op_shmem->params.f_read.data_size = req->args[1].mem.len;
322
323                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_read);
324                 break;
325         case OTE_FILE_REQ_TYPE_WRITE:
326                 /* validate write buffer */
327                 if (!task_valid_address((vaddr_t)req->args[1].mem.base,
328                                         req->args[1].mem.len)) {
329                         return -EFAULT;
330                 }
331
332                 /* arg #0: file handle */
333                 ss_op_shmem->params.f_write.handle = req->args[0].val.a;
334                 /* arg #1: write buffer attributes */
335                 ss_op_shmem->params.f_write.data_size =
336                         MIN(req->args[1].mem.len,
337                                 sizeof(ss_op_shmem->params.f_write.data));
338                 memcpy((void*)ss_op_shmem->params.f_write.data,
339                         req->args[1].mem.base,
340                         ss_op_shmem->params.f_write.data_size);
341
342                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_write);
343                 break;
344         case OTE_FILE_REQ_TYPE_GET_SIZE:
345                 /* arg #0: file handle */
346                 ss_op_shmem->params.f_getsize.handle = req->args[0].val.a;
347
348                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_getsize);
349                 break;
350         case OTE_FILE_REQ_TYPE_SEEK:
351                 /* arg #0: file handle */
352                 ss_op_shmem->params.f_seek.handle = req->args[0].val.a;
353                 /* arg #1: offset */
354                 ss_op_shmem->params.f_seek.offset = req->args[1].val.a;
355                 /* arg #2: whence value */
356                 ss_op_shmem->params.f_seek.whence = req->args[2].val.a;
357
358                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_seek);
359                 break;
360         case OTE_FILE_REQ_TYPE_TRUNC:
361                 /* arg #0: file handle */
362                 ss_op_shmem->params.f_trunc.handle = req->args[0].val.a;
363                 /* arg #0: truncate length */
364                 ss_op_shmem->params.f_trunc.length = req->args[1].val.a;
365
366                 ss_op_shmem->params_size = sizeof(ss_op_shmem->params.f_trunc);
367                 break;
368         default:
369                 return -EINVAL;
370         }
371
372         /* actual file read/write */
373         memset(&frame, 0, sizeof(struct tz_monitor_frame));
374
375         frame.pc = _jump_to_ns_ss_op;
376         frame.spsr = MODE_SVC;
377
378         /* adjust size down to only include required parameters */
379         frame.r[0] = (sizeof(te_ss_op_t) - sizeof(te_ss_req_params_t)) +
380                 ss_op_shmem->params_size;
381
382         platform_handle_tz_smc(SMC_TOS_PREEMPT_BY_FS, &frame);
383
384         req->result = ss_op_shmem->result;
385         if (req->result != 0) {
386                 dprintf(CRITICAL, "%s: call to non-secure world failed 0x%x\n",
387                         __func__, req->result);
388                 return req->result;
389         }
390
391         /* move any expected return data into request structure */
392         switch (ss_op_shmem->type) {
393         case OTE_FILE_REQ_TYPE_OPEN:
394                 /* arg #3: return file handle */
395                 req->args[3].val.a = ss_op_shmem->params.f_open.handle;
396                 break;
397         case OTE_FILE_REQ_TYPE_READ:
398                 /* arg #1: return amount of data read */
399                 req->args[1].mem.len = ss_op_shmem->params.f_read.data_size;
400                 /* arg #1: return data read */
401                 memcpy(req->args[1].mem.base,
402                         (void *)ss_op_shmem->params.f_read.data,
403                         req->args[1].mem.len);
404                 break;
405         case OTE_FILE_REQ_TYPE_GET_SIZE:
406                 /* arg #1: return file size */
407                 req->args[1].val.a = ss_op_shmem->params.f_getsize.size;
408                 break;
409         default:
410                 break;
411         }
412
413         return 0;
414 }
415
416 void platform_get_device_id(te_device_id_args_t *out)
417 {
418         if (out)
419                 memcpy(out, device_uid, sizeof(te_device_id_args_t));
420 }
421
422 void platform_clean_invalidate_cache_range(vaddr_t range, uint32_t length)
423 {
424 #if defined(ARM_USE_CPU_CACHING)
425         arch_clean_invalidate_cache_range(range, length);
426
427 #if ARM_WITH_OUTER_CACHE
428         outer_clean_range(virtual_to_physical(range), length);
429         outer_inv_range(virtual_to_physical(range), length);
430 #endif
431 #endif
432 }