tlk: 6/19 update
[3rdparty/ote_partner/tlk.git] / platform / tegra / common / tz.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <err.h>
25 #include <errno.h>
26 #include <debug.h>
27 #include <assert.h>
28 #include <malloc.h>
29 #include <string.h>
30 #include <platform.h>
31 #include <arch.h>
32 #include <stdlib.h>
33 #include <lib/heap.h>
34 #include <arch/outercache.h>
35 #include <platform/memmap.h>
36 #include <platform/tzrammap.h>
37 #include <platform/irqs.h>
38 #include <arch/arm.h>
39 #include <arch/arm/mmu.h>
40 #include <platform/platform_p.h>
41 #include <ote_intf.h>
42
43 #define TZ_UNSUPPORTED_PARAM    0xDEADBEEF
44
45 extern unsigned long _jump_to_ns_irq_addr;
46
47 extern vaddr_t platform_vaspace_ptr;
48 extern vaddr_t platform_vaspace_end;
49 extern unsigned int ote_logger_enabled;
50
51 #if !defined(WITH_MONITOR_BIN)
52 extern struct tz_monitor_frame *go_nonsecure(uint32_t smc_type, struct tz_monitor_frame *);
53 extern unsigned long mon_fastcall_frame_addr;
54 extern unsigned long mon_stdcall_frame_addr;
55 #endif
56
57 /* location in NS of req/param structs */
58 static vaddr_t tz_shared_req_param_buf;
59 static uint32_t tz_shared_req_param_size;
60
61 /* valid DRAM ranges as specified by the bootloader */
62 #define MAX_NUM_DRAM_RANGES     8
63 struct dram_range {
64         paddr_t dram_base;
65         paddr_t dram_size;
66 };
67 static struct dram_range tz_dram_range[MAX_NUM_DRAM_RANGES];
68 static uint32_t tz_dram_range_count;
69
70 void tz_add_dram_range(paddr_t base, paddr_t size)
71 {
72         ASSERT(tz_dram_range_count < MAX_NUM_DRAM_RANGES);
73
74         tz_dram_range[tz_dram_range_count].dram_base = base;
75         tz_dram_range[tz_dram_range_count].dram_size = size;
76         tz_dram_range_count++;
77 }
78
79 static bool valid_dram_address(paddr_t addr, paddr_t size)
80 {
81         uint32_t i;
82
83         for (i = 0; i < tz_dram_range_count; i++) {
84                 if ((tz_dram_range[i].dram_base <= addr) &&
85                         ((tz_dram_range[i].dram_base + tz_dram_range[i].dram_size) >= (addr + size)))
86                         return true;
87         }
88
89         dprintf(CRITICAL, "%s: illegal addr 0x%08llx, size 0x%08llx reference\n",
90                 __func__, (uint64_t)addr, (uint64_t)size);
91         return false;
92 }
93
94 vaddr_t tz_map_shared_mem(nsaddr_t ns_addr, uint32_t size)
95 {
96         vaddr_t vaddr, vsize;
97         uint32_t offset;
98         paddr_t *pagelist;
99         uint32_t pg, npages;
100         task_map_t mptr;
101
102         offset = ns_addr & PAGE_MASK;
103         size = ROUNDUP(offset + size, PAGE_SIZE);
104         npages = size / PAGE_SIZE;
105
106         /* referencing NS memory, so setup proper vaddr alignment */
107         vaddr = ROUNDUP(platform_vaspace_ptr, NS_VIRT_ADDR_ALIGN);
108         vsize = ROUNDUP(size, NS_VIRT_ADDR_ALIGN);
109         if ((vaddr + vsize) > platform_vaspace_end)
110                 return NULL;
111
112         pagelist = malloc(npages * sizeof(paddr_t));
113         if (pagelist == NULL)
114                 return NULL;
115
116         mptr.flags = TM_NS_MEM_PRIV;
117         mptr.size = size;
118
119         arm_mmu_translate_range(ns_addr, pagelist, &mptr);
120
121         /* verify all pages reside in DRAM */
122         for (pg = 0; pg < npages; pg++) {
123                 ASSERT(!(pagelist[pg] & PAGE_MASK));
124
125                 if (valid_dram_address(pagelist[pg], PAGE_SIZE) == false) {
126                         free(pagelist);
127                         return NULL;
128                 }
129         }
130
131         /* map pages and TLB invalidate by addr */
132         for (pg = 0; pg < npages; pg++) {
133                 vaddr_t map_addr;
134
135                 map_addr = vaddr + (pg * PAGE_SIZE);
136                 arm_mmu_map_kpage(map_addr, pagelist[pg], &mptr);
137                 arm_invalidate_tlb_byaddr(map_addr);
138         }
139
140         free(pagelist);
141
142         platform_vaspace_ptr = (vaddr + vsize);
143         return vaddr + offset;
144 }
145
146 void tz_init(void)
147 {
148 #if !defined(WITH_MONITOR_BIN)
149         /* allocate space for fast/std call monitor frames */
150         mon_fastcall_frame_addr = (unsigned long)calloc(1, sizeof(uint64_t) * 15);
151         ASSERT(mon_fastcall_frame_addr);
152
153         mon_stdcall_frame_addr = (unsigned long)calloc(1, sizeof(uint64_t) * 15);
154         ASSERT(mon_stdcall_frame_addr);
155
156         pm_init();
157 #endif
158         te_intf_init();
159 }
160
161 struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_frame *frame)
162 {
163         struct tz_monitor_frame *incoming_smc;
164
165         enter_critical_section();
166
167         if (s_vfp_hw_context && s_vfp_hw_context->valid) {
168                 /*
169                  * Save the secure world vfp state on the way out and mark
170                  * the buffer as invalid as on entry we'll disable fpexc,
171                  * so we can save out the NS copy and fault in the new one.
172                  */
173                 arch_vfp_save(s_vfp_hw_context);
174                 s_vfp_hw_context->fpexc = 0x0;
175                 s_vfp_hw_context->valid = false;
176         }
177
178         if (ns_vfp_hw_context && ns_vfp_hw_context->valid) {
179                 /*
180                  * Restore the non-secure world vfp state if during our
181                  * running we context switched away from what was loaded in
182                  * the HW (NS version of fpexc is always saved/restored).
183                  */
184                 arch_vfp_restore(ns_vfp_hw_context);
185                 ns_vfp_hw_context->valid = false;
186         }
187
188         arm_set_vfp_fpexc(ns_vfp_hw_context->fpexc);
189
190 #if defined(WITH_MONITOR_BIN)
191         /* go to monitor, for return to NS */
192         incoming_smc = monitor_send_receive(smc_type, frame);
193 #else
194         /* go to NS, on return there'll be a new SMC */
195         incoming_smc = go_nonsecure(smc_type, frame);
196 #endif
197
198         exit_critical_section();
199
200         /* on entry, save NS fpexc and disable to detect vfp usage */
201         ns_vfp_hw_context->fpexc = arm_get_vfp_fpexc();
202         arm_set_vfp_fpexc(0x0);
203
204         return incoming_smc;
205 }
206
207 status_t tz_register_req_param_buf(struct tz_monitor_frame *frame)
208 {
209         uint32_t size;
210         nsaddr_t ns_addr;
211
212         ns_addr = frame->r[1];
213         size = frame->r[2];
214
215         if (!size) {
216                 /* probing only for supported SMC */
217                 return NO_ERROR;
218         }
219
220         tz_shared_req_param_buf = tz_map_shared_mem(ns_addr, size);
221         if (tz_shared_req_param_buf == NULL)
222                 return ERR_GENERIC;
223
224         tz_shared_req_param_size = size;
225         return NO_ERROR;
226 }
227
228 /*
229  * System related SMCs handled on the current idle stack.
230  * These should be simple operations that can't block.
231  */
232 void tz_handle_system_smc(struct tz_monitor_frame *frame)
233 {
234         int error = 0;
235         nsaddr_t _ns_cb_struct_addr = NULL;
236
237         switch (frame->r[0]) {
238
239                 case SMC_TOS_SS_REGISTER_HANDLER:
240                         error = platform_ss_register_handler(frame);
241                         break;
242
243                 case SMC_TOS_NS_REG_REQPARAM_BUF:
244                         error = tz_register_req_param_buf(frame);
245                         break;
246
247                 case SMC_TOS_INIT_LOGGER:
248                         _ns_cb_struct_addr = frame->r[1];
249
250                         if (!_ns_cb_struct_addr) {
251                                 ote_logger_enabled = 0;
252                                 dputs(CRITICAL, early_logbuf);
253                                 error = ERR_NOT_SUPPORTED;
254                                 break;
255                         }
256
257                         /* physical address of the circular buffer */
258                         if (set_log_phy_addr(_ns_cb_struct_addr)) {
259                                 ote_logger_enabled = 0;
260                                 dputs(CRITICAL, early_logbuf);
261                                 error = ERR_NOT_SUPPORTED;
262                                 break;
263                         }
264
265                         /* copy early prints into the shared buffer */
266                         dprintf(CRITICAL, "%s", early_logbuf);
267
268                         break;
269 #if !defined(WITH_MONITOR_BIN)
270                 case SMC_TOS_PROGRAM_VPR:
271                         error = platform_program_vpr(frame->r[1], frame->r[2]);
272                         break;
273 #endif
274         }
275
276         frame->r[0] = error;
277 }
278
279 static uint32_t tz_copyin_params(te_request_t *req, te_oper_param_t *in_params)
280 {
281         uint32_t i, len;
282         uint32_t extent;
283         te_oper_param_t *req_params;
284
285         if (req->params_size == 0)
286                 return OTE_SUCCESS;
287
288         if (in_params == NULL)
289                 return OTE_ERROR_BAD_PARAMETERS;
290
291         len = req->params_size * sizeof(te_oper_param_t);
292         extent = len + (vaddr_t)in_params;
293
294         if ((len >= tz_shared_req_param_size) ||
295             (extent >= (tz_shared_req_param_buf + tz_shared_req_param_size))) {
296                 dprintf(CRITICAL,
297                         "%s: nparams (0x%08x) exceeds map size (0x%08x)\n",
298                         __func__, req->params_size, tz_shared_req_param_size);
299                 return OTE_ERROR_BAD_PARAMETERS;
300         }
301
302         req_params = calloc(1, len);
303         if (req_params == NULL)
304                 return OTE_ERROR_OUT_OF_MEMORY;
305
306         memcpy(req_params, in_params, len);
307
308         dprintf(SPEW, "%s: params %p len 0x%x\n", __func__, in_params, len);
309
310         for (i = 0; i < req->params_size; i++) {
311                 switch (req_params[i].type) {
312                 case TE_PARAM_TYPE_INT_RO:
313                         dprintf(SPEW, "%s: %d: INT_RO: val 0x%x\n",
314                                 __func__, i, req_params[i].u.Int.val);
315                         break;
316                 case TE_PARAM_TYPE_INT_RW:
317                         dprintf(SPEW, "%s: %d: INT_RW: val 0x%x\n",
318                                 __func__, i, req_params[i].u.Int.val);
319                         break;
320                 case TE_PARAM_TYPE_MEM_RO:
321                         dprintf(SPEW, "%s: %d: MEM_RO: len 0x%x\n",
322                                 __func__, i, req_params[i].u.Mem.len);
323                         break;
324                 case TE_PARAM_TYPE_MEM_RW:
325                         dprintf(SPEW, "%s: %d: MEM_RW: len 0x%x\n",
326                                 __func__, i, req_params[i].u.Mem.len);
327                         break;
328                 default:
329                         dprintf(INFO, "%s: unhandled param type 0x%x\n",
330                                 __func__, req_params[i].type);
331                         break;
332                 }
333         }
334         req->params = (uintptr_t)req_params;
335         return OTE_SUCCESS;
336 }
337
338 static void tz_copyout_params(te_oper_param_t *out_params, te_request_t *req)
339 {
340         uint32_t i;
341         te_oper_param_t *req_params;
342
343         if (out_params == NULL || req->params_size == 0)
344                 return;
345
346         req_params = (te_oper_param_t *)(uintptr_t)req->params;
347         for (i = 0; i < req->params_size; i++) {
348                 switch (out_params[i].type) {
349                 case TE_PARAM_TYPE_INT_RO:
350                         dprintf(SPEW, "%s: %d: INT_RO: val 0x%x\n",
351                                 __func__, i, req_params[i].u.Int.val);
352                         break;
353                 case TE_PARAM_TYPE_INT_RW:
354                         dprintf(SPEW, "%s: %d: INT_RW: val 0x%x\n",
355                                 __func__, i, req_params[i].u.Int.val);
356                         out_params[i].u.Int.val = req_params[i].u.Int.val;
357                         break;
358                 case TE_PARAM_TYPE_MEM_RO:
359                         dprintf(SPEW, "%s: %d: MEM_RO: len 0x%x\n",
360                                 __func__, i, req_params[i].u.Mem.len);
361                         break;
362                 case TE_PARAM_TYPE_MEM_RW:
363                         dprintf(SPEW, "%s: %d: MEM_RW: len 0x%x\n",
364                                 __func__, i, req_params[i].u.Mem.len);
365                         out_params[i].u.Mem.len = req_params[i].u.Mem.len;
366                         break;
367                 default:
368                         dprintf(INFO, "%s: unhandled param type 0x%x\n",
369                                 __func__, out_params[i].type);
370                         break;
371                 }
372         }
373         free(req_params);
374 }
375
376 /*
377  * TE related SMCs that require some setup and will
378  * trigger a TA thread to run before the SMC completes.
379  */
380 static void tz_handle_trusted_app_smc(struct tz_monitor_frame *frame)
381 {
382         te_request_t *caller_req;
383         te_oper_param_t *caller_params;
384         te_request_t *req;
385         uint32_t req_off, param_off;
386         te_error_t result = OTE_SUCCESS;
387
388         if (!tz_shared_req_param_buf || !tz_shared_req_param_size) {
389                 frame->r[0] = OTE_ERROR_GENERIC;
390                 return;
391         }
392
393         req_off = (uint32_t)frame->r[1];
394         param_off = (uint32_t)frame->r[2];
395
396         /* check req/param offsets are within the mapped buffer */
397         if ((req_off + sizeof(te_request_t)) >= tz_shared_req_param_size) {
398                 dprintf(CRITICAL,
399                         "%s: req offset (0x%08x) beyond map size (0x%08x)\n",
400                         __func__, req_off, tz_shared_req_param_size);
401                 frame->r[0] = OTE_ERROR_BAD_PARAMETERS;
402                 return;
403         }
404         if ((param_off + sizeof(te_oper_param_t)) >= tz_shared_req_param_size) {
405                 dprintf(CRITICAL,
406                         "%s: param offset (0x%08x) beyond map size (0x%08x)\n",
407                         __func__, param_off, tz_shared_req_param_size);
408                 frame->r[0] = OTE_ERROR_BAD_PARAMETERS;
409                 return;
410         }
411
412         /* save caller request and param block addresses */
413         caller_req = (te_request_t *)(tz_shared_req_param_buf + req_off);
414         caller_params = (te_oper_param_t *)(tz_shared_req_param_buf + param_off);
415
416         /* copy caller request info to new buffer */
417         req = calloc(1, sizeof(te_request_t));
418         if (req == NULL) {
419                 frame->r[0] = OTE_ERROR_OUT_OF_MEMORY;
420                 return;
421         }
422         memcpy(req, caller_req, sizeof(te_request_t));
423
424         /* move optional parameters into request struct */
425         result = tz_copyin_params(req, caller_params);
426         if (result != OTE_SUCCESS) {
427                 free(req);
428                 frame->r[0] = result;
429                 return;
430         }
431
432         switch (frame->r[0]) {
433                 case SMC_TA_OPEN_SESSION:
434                         result = te_handle_open_session(req, false);
435                         break;
436                 case SMC_TA_CLOSE_SESSION:
437                         result = te_handle_close_session(req, false);
438                         break;
439                 case SMC_TA_LAUNCH_OPERATION:
440                         result = te_handle_launch_op(req, false);
441                         break;
442         }
443         req->result = result;
444
445         /* consider any failure here to have occured in common TE code */
446         if (req->result != OTE_SUCCESS) {
447                 req->result_origin = OTE_RESULT_ORIGIN_KERNEL;
448         }
449
450         te_get_completed_cmd(req, false);
451
452         /* move request results back to caller struct */
453         caller_req->result = req->result;
454         caller_req->result_origin = req->result_origin;
455         if (frame->r[0] == SMC_TA_OPEN_SESSION) {
456                 caller_req->session_id = req->session_id;
457         }
458
459         /* move optional param results back to caller */
460         tz_copyout_params(caller_params, req);
461
462         free(req);
463         frame->r[0] = result;
464 }
465
466 void tz_stdcall_handler(struct tz_monitor_frame *frame)
467 {
468         dprintf(SPEW, "%s: 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", __func__,
469                 frame->r[0], frame->r[1], frame->r[2], frame->r[3]);
470
471         switch (frame->r[0]) {
472         case SMC_TOS_SS_REGISTER_HANDLER:
473         case SMC_TOS_NS_REG_REQPARAM_BUF:
474         case SMC_TOS_INIT_LOGGER:
475         case SMC_TOS_PROGRAM_VPR:
476                 tz_handle_system_smc(frame);
477                 break;
478
479         case SMC_TA_OPEN_SESSION:
480         case SMC_TA_CLOSE_SESSION:
481         case SMC_TA_LAUNCH_OPERATION:
482                 tz_handle_trusted_app_smc(frame);
483                 break;
484
485         default:
486                 dprintf(CRITICAL, "%s: unhandled function 0x%x\n",
487                         __func__, (uint32_t)frame->r[0]);
488                 frame->r[0] = TZ_UNSUPPORTED_PARAM;
489                 break;
490         }
491 }
492
493 void tz_fastcall_handler(struct tz_monitor_frame *frame)
494 {
495         switch (frame->r[0]) {
496 #if !defined(WITH_MONITOR_BIN)
497         case SMC_SIP_L2_MANAGEMENT:
498         case SMC_SIP_CPU_RESET_VECTOR:
499         case SMC_SIP_CPU_RESET_VECTOR_LEGACY:
500         case SMC_SIP_DEVICE_SUSPEND:
501                 pm_handle_platform_smc(frame);
502                 break;
503
504         case SMC_SIP_PROGRAM_VPR:
505                 frame->r[0] = platform_program_vpr(frame->r[1], frame->r[2]);
506                 break;
507 #endif
508
509         default:
510                 frame->r[0] = TZ_UNSUPPORTED_PARAM;
511                 break;
512         }
513 }