First version
[3rdparty/ote_partner/tlk.git] / platform / tegra / common / tz.c
1 /*
2  * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <err.h>
25 #include <errno.h>
26 #include <debug.h>
27 #include <assert.h>
28 #include <malloc.h>
29 #include <string.h>
30 #include <platform.h>
31 #include <arch.h>
32 #include <stdlib.h>
33 #include <lib/heap.h>
34 #include <arch/outercache.h>
35 #include <platform/memmap.h>
36 #include <platform/tzrammap.h>
37 #include <platform/irqs.h>
38 #include <arch/arm.h>
39 #include <arch/arm/mmu.h>
40 #include <platform/platform_p.h>
41 #include <ote_intf.h>
42
43 #define TZ_UNSUPPORTED_PARAM    0xDEADBEEF
44
45 extern unsigned long _jump_to_ns_irq_addr;
46
47 extern vaddr_t platform_vaspace_ptr;
48 extern vaddr_t platform_vaspace_end;
49 extern unsigned int ote_logger_enabled;
50
51 #if !defined(WITH_MONITOR_BIN)
52 extern struct tz_monitor_frame *go_nonsecure(uint32_t smc_type, struct tz_monitor_frame *);
53 #endif
54
55 /* location in NS of req/param structs */
56 static vaddr_t tz_shared_req_param_buf;
57 static uint32_t tz_shared_req_param_size;
58
59 /* valid DRAM ranges as specified by the bootloader */
60 #define MAX_NUM_DRAM_RANGES     8
61 struct dram_range {
62         paddr_t dram_base;
63         paddr_t dram_size;
64 };
65 static struct dram_range tz_dram_range[MAX_NUM_DRAM_RANGES];
66 static uint32_t tz_dram_range_count;
67
68 void tz_add_dram_range(paddr_t base, paddr_t size)
69 {
70         ASSERT(tz_dram_range_count < MAX_NUM_DRAM_RANGES);
71
72         tz_dram_range[tz_dram_range_count].dram_base = base;
73         tz_dram_range[tz_dram_range_count].dram_size = size;
74         tz_dram_range_count++;
75 }
76
77 static bool valid_dram_address(paddr_t addr, paddr_t size)
78 {
79         uint32_t i;
80
81         for (i = 0; i < tz_dram_range_count; i++) {
82                 if ((tz_dram_range[i].dram_base <= addr) &&
83                         ((tz_dram_range[i].dram_base + tz_dram_range[i].dram_size) >= (addr + size)))
84                         return true;
85         }
86
87         dprintf(CRITICAL, "%s: illegal addr 0x%08llx, size 0x%08llx reference\n",
88                 __func__, (uint64_t)addr, (uint64_t)size);
89         return false;
90 }
91
92 vaddr_t tz_map_shared_mem(nsaddr_t ns_addr, uint32_t size)
93 {
94         vaddr_t vaddr, vsize;
95         uint32_t offset;
96         paddr_t *pagelist;
97         uint32_t pg, npages;
98         task_map_t mptr;
99
100         offset = ns_addr & PAGE_MASK;
101         size = ROUNDUP(offset + size, PAGE_SIZE);
102         npages = size / PAGE_SIZE;
103
104         /* referencing NS memory, so setup proper vaddr alignment */
105         vaddr = ROUNDUP(platform_vaspace_ptr, NS_VIRT_ADDR_ALIGN);
106         vsize = ROUNDUP(size, NS_VIRT_ADDR_ALIGN);
107         if ((vaddr + vsize) > platform_vaspace_end)
108                 return NULL;
109
110         pagelist = malloc(npages * sizeof(paddr_t));
111         if (pagelist == NULL)
112                 return NULL;
113
114         mptr.flags = TM_NS_MEM_PRIV;
115         mptr.size = size;
116
117         arm_mmu_translate_range(ns_addr, pagelist, &mptr);
118
119         /* verify all pages reside in DRAM */
120         for (pg = 0; pg < npages; pg++) {
121                 ASSERT(!(pagelist[pg] & PAGE_MASK));
122
123                 if (valid_dram_address(pagelist[pg], PAGE_SIZE) == false) {
124                         free(pagelist);
125                         return NULL;
126                 }
127         }
128
129         /* map pages and TLB invalidate by addr */
130         for (pg = 0; pg < npages; pg++) {
131                 vaddr_t map_addr;
132
133                 map_addr = vaddr + (pg * PAGE_SIZE);
134                 arm_mmu_map_kpage(map_addr, pagelist[pg], &mptr);
135                 arm_invalidate_tlb_byaddr(map_addr);
136         }
137
138         free(pagelist);
139
140         platform_vaspace_ptr = (vaddr + vsize);
141         return vaddr + offset;
142 }
143
144 void tz_init(void)
145 {
146 #if !defined(WITH_MONITOR_BIN)
147         pm_init();
148 #endif
149         te_intf_init();
150         platform_set_intr_ready_state(false, NULL);
151 }
152
153 struct tz_monitor_frame *tz_switch_to_ns(uint32_t smc_type, struct tz_monitor_frame *frame)
154 {
155         struct tz_monitor_frame *incoming_smc;
156
157         enter_critical_section();
158
159         if (s_vfp_hw_context && s_vfp_hw_context->valid) {
160                 /*
161                  * Save the secure world vfp state on the way out and mark
162                  * the buffer as invalid as on entry we'll disable fpexc,
163                  * so we can save out the NS copy and fault in the new one.
164                  */
165                 arch_vfp_save(s_vfp_hw_context);
166                 s_vfp_hw_context->fpexc = 0x0;
167                 s_vfp_hw_context->valid = false;
168         }
169
170         if (ns_vfp_hw_context && ns_vfp_hw_context->valid) {
171                 /*
172                  * Restore the non-secure world vfp state if during our
173                  * running we context switched away from what was loaded in
174                  * the HW (NS version of fpexc is always saved/restored).
175                  */
176                 arch_vfp_restore(ns_vfp_hw_context);
177                 ns_vfp_hw_context->valid = false;
178         }
179
180         exit_critical_section();
181
182         arm_set_vfp_fpexc(ns_vfp_hw_context->fpexc);
183
184 #if defined(WITH_MONITOR_BIN)
185         /* go to monitor, for return to NS */
186         incoming_smc = monitor_send_receive(smc_type, frame);
187 #else
188         /* go to NS, on return there'll be a new SMC */
189         incoming_smc = go_nonsecure(smc_type, frame);
190 #endif
191
192         /* on entry, save NS fpexc and disable to detect vfp usage */
193         ns_vfp_hw_context->fpexc = arm_get_vfp_fpexc();
194         arm_set_vfp_fpexc(0x0);
195
196         return incoming_smc;
197 }
198
199 status_t tz_register_req_param_buf(struct tz_monitor_frame *frame)
200 {
201         uint32_t size;
202         nsaddr_t ns_addr;
203
204         ns_addr = frame->r[1];
205         size = frame->r[2];
206
207         if (!size) {
208                 /* probing only for supported SMC */
209                 return NO_ERROR;
210         }
211
212         tz_shared_req_param_buf = tz_map_shared_mem(ns_addr, size);
213         if (tz_shared_req_param_buf == NULL)
214                 return ERR_GENERIC;
215
216         tz_shared_req_param_size = size;
217         return NO_ERROR;
218 }
219
220 /*
221  * System related SMCs handled on the current idle stack.
222  * These should be simple operations that can't block.
223  */
224 void tz_handle_system_smc(struct tz_monitor_frame *frame)
225 {
226         int error = 0;
227         nsaddr_t _ns_cb_struct_addr = NULL;
228
229         switch (frame->r[0]) {
230                 case SMC_TOS_NS_IRQ_PENDING_VECTOR:
231                         _jump_to_ns_irq_addr = frame->r[1];
232                         platform_set_intr_ready_state(true, frame);
233                         break;
234
235                 case SMC_TOS_SS_REGISTER_HANDLER:
236                         error = platform_ss_register_handler(frame);
237                         break;
238
239                 case SMC_TOS_NS_REG_REQPARAM_BUF:
240                         error = tz_register_req_param_buf(frame);
241                         break;
242
243                 case SMC_TOS_INIT_LOGGER:
244                         _ns_cb_struct_addr = frame->r[1];
245
246                         if (!_ns_cb_struct_addr) {
247                                 ote_logger_enabled = 0;
248                                 dputs(CRITICAL, early_logbuf);
249                                 error = ERR_NOT_SUPPORTED;
250                                 break;
251                         }
252
253                         /* physical address of the circular buffer */
254                         if (set_log_phy_addr(_ns_cb_struct_addr)) {
255                                 ote_logger_enabled = 0;
256                                 dputs(CRITICAL, early_logbuf);
257                                 error = ERR_NOT_SUPPORTED;
258                                 break;
259                         }
260
261                         /* copy early prints into the shared buffer */
262                         dprintf(CRITICAL, "%s", early_logbuf);
263
264                         break;
265
266                 case SMC_TOS_PROGRAM_VPR:
267                 case SMC_SIP_PROGRAM_VPR:
268                         error = platform_program_vpr(frame->r[1], frame->r[2]);
269                         break;
270         }
271
272         frame->r[0] = error;
273 }
274
275 static uint32_t tz_copyin_params(te_request_t *req, te_oper_param_t *in_params)
276 {
277         uint32_t i, len;
278         uint32_t extent;
279         te_oper_param_t *req_params;
280
281         if (req->params_size == 0)
282                 return OTE_SUCCESS;
283
284         if (in_params == NULL)
285                 return OTE_ERROR_BAD_PARAMETERS;
286
287         len = req->params_size * sizeof(te_oper_param_t);
288         extent = len + (vaddr_t)in_params;
289
290         if ((len >= tz_shared_req_param_size) ||
291             (extent >= (tz_shared_req_param_buf + tz_shared_req_param_size))) {
292                 dprintf(CRITICAL,
293                         "%s: nparams (0x%08x) exceeds map size (0x%08x)\n",
294                         __func__, req->params_size, tz_shared_req_param_size);
295                 return OTE_ERROR_BAD_PARAMETERS;
296         }
297
298         req_params = calloc(1, len);
299         if (req_params == NULL)
300                 return OTE_ERROR_OUT_OF_MEMORY;
301
302         memcpy(req_params, in_params, len);
303
304         dprintf(SPEW, "%s: params %p len 0x%x\n", __func__, in_params, len);
305
306         for (i = 0; i < req->params_size; i++) {
307                 switch (req_params[i].type) {
308                 case TE_PARAM_TYPE_INT_RO:
309                         dprintf(SPEW, "%s: %d: INT_RO: val 0x%x\n",
310                                 __func__, i, req_params[i].u.Int.val);
311                         break;
312                 case TE_PARAM_TYPE_INT_RW:
313                         dprintf(SPEW, "%s: %d: INT_RW: val 0x%x\n",
314                                 __func__, i, req_params[i].u.Int.val);
315                         break;
316                 case TE_PARAM_TYPE_MEM_RO:
317                         dprintf(SPEW, "%s: %d: MEM_RO: len 0x%x\n",
318                                 __func__, i, req_params[i].u.Mem.len);
319                         break;
320                 case TE_PARAM_TYPE_MEM_RW:
321                         dprintf(SPEW, "%s: %d: MEM_RW: len 0x%x\n",
322                                 __func__, i, req_params[i].u.Mem.len);
323                         break;
324                 default:
325                         dprintf(INFO, "%s: unhandled param type 0x%x\n",
326                                 __func__, req_params[i].type);
327                         break;
328                 }
329         }
330         req->params = (uintptr_t)req_params;
331         return OTE_SUCCESS;
332 }
333
334 static void tz_copyout_params(te_oper_param_t *out_params, te_request_t *req)
335 {
336         uint32_t i;
337         te_oper_param_t *req_params;
338
339         if (out_params == NULL || req->params_size == 0)
340                 return;
341
342         req_params = (te_oper_param_t *)(uintptr_t)req->params;
343         for (i = 0; i < req->params_size; i++) {
344                 switch (out_params[i].type) {
345                 case TE_PARAM_TYPE_INT_RO:
346                         dprintf(SPEW, "%s: %d: INT_RO: val 0x%x\n",
347                                 __func__, i, req_params[i].u.Int.val);
348                         break;
349                 case TE_PARAM_TYPE_INT_RW:
350                         dprintf(SPEW, "%s: %d: INT_RW: val 0x%x\n",
351                                 __func__, i, req_params[i].u.Int.val);
352                         out_params[i].u.Int.val = req_params[i].u.Int.val;
353                         break;
354                 case TE_PARAM_TYPE_MEM_RO:
355                         dprintf(SPEW, "%s: %d: MEM_RO: len 0x%x\n",
356                                 __func__, i, req_params[i].u.Mem.len);
357                         break;
358                 case TE_PARAM_TYPE_MEM_RW:
359                         dprintf(SPEW, "%s: %d: MEM_RW: len 0x%x\n",
360                                 __func__, i, req_params[i].u.Mem.len);
361                         out_params[i].u.Mem.len = req_params[i].u.Mem.len;
362                         break;
363                 default:
364                         dprintf(INFO, "%s: unhandled param type 0x%x\n",
365                                 __func__, out_params[i].type);
366                         break;
367                 }
368         }
369         free(req_params);
370 }
371
372 /*
373  * TE related SMCs that require some setup and will
374  * trigger a TA thread to run before the SMC completes.
375  */
376 static void tz_handle_trusted_app_smc(struct tz_monitor_frame *frame)
377 {
378         te_request_t *caller_req;
379         te_oper_param_t *caller_params;
380         te_request_t *req;
381         uint32_t req_off, param_off;
382         te_error_t result = OTE_SUCCESS;
383
384         if (!tz_shared_req_param_buf || !tz_shared_req_param_size) {
385                 frame->r[0] = OTE_ERROR_GENERIC;
386                 return;
387         }
388
389         req_off = (uint32_t)frame->r[1];
390         param_off = (uint32_t)frame->r[2];
391
392         /* check req/param offsets are within the mapped buffer */
393         if ((req_off + sizeof(te_request_t)) >= tz_shared_req_param_size) {
394                 dprintf(CRITICAL,
395                         "%s: req offset (0x%08x) beyond map size (0x%08x)\n",
396                         __func__, req_off, tz_shared_req_param_size);
397                 frame->r[0] = OTE_ERROR_BAD_PARAMETERS;
398                 return;
399         }
400         if ((param_off + sizeof(te_oper_param_t)) >= tz_shared_req_param_size) {
401                 dprintf(CRITICAL,
402                         "%s: param offset (0x%08x) beyond map size (0x%08x)\n",
403                         __func__, param_off, tz_shared_req_param_size);
404                 frame->r[0] = OTE_ERROR_BAD_PARAMETERS;
405                 return;
406         }
407
408         /* save caller request and param block addresses */
409         caller_req = (te_request_t *)(tz_shared_req_param_buf + req_off);
410         caller_params = (te_oper_param_t *)(tz_shared_req_param_buf + param_off);
411
412         /* copy caller request info to new buffer */
413         req = calloc(1, sizeof(te_request_t));
414         if (req == NULL) {
415                 frame->r[0] = OTE_ERROR_OUT_OF_MEMORY;
416                 return;
417         }
418         memcpy(req, caller_req, sizeof(te_request_t));
419
420         /* move optional parameters into request struct */
421         result = tz_copyin_params(req, caller_params);
422         if (result != OTE_SUCCESS) {
423                 free(req);
424                 frame->r[0] = result;
425                 return;
426         }
427
428         switch (frame->r[0]) {
429                 case SMC_TA_OPEN_SESSION:
430                         result = te_handle_open_session(req, false);
431                         break;
432                 case SMC_TA_CLOSE_SESSION:
433                         result = te_handle_close_session(req, false);
434                         break;
435                 case SMC_TA_LAUNCH_OPERATION:
436                         result = te_handle_launch_op(req, false);
437                         break;
438         }
439         req->result = result;
440
441         /* consider any failure here to have occured in common TE code */
442         if (req->result != OTE_SUCCESS) {
443                 req->result_origin = OTE_RESULT_ORIGIN_KERNEL;
444         }
445
446         te_get_completed_cmd(req, false);
447
448         /* move request results back to caller struct */
449         caller_req->result = req->result;
450         caller_req->result_origin = req->result_origin;
451         if (frame->r[0] == SMC_TA_OPEN_SESSION) {
452                 caller_req->session_id = req->session_id;
453         }
454
455         /* move optional param results back to caller */
456         tz_copyout_params(caller_params, req);
457
458         free(req);
459         frame->r[0] = result;
460 }
461
462 void tz_handle_monitor(struct tz_monitor_frame *frame)
463 {
464         dprintf(SPEW,
465                 "%s: 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
466                 __func__, frame->r[0], frame->r[1], frame->r[2], frame->r[3],
467                 frame->r[4], frame->r[5], frame->r[6], frame->r[7]);
468
469         switch (frame->r[0])
470         {
471 #if !defined(WITH_MONITOR_BIN)
472                 case SMC_SIP_L2_MANAGEMENT:
473                 case SMC_SIP_CPU_RESET_VECTOR:
474                 case SMC_SIP_CPU_RESET_VECTOR_LEGACY:
475                 case SMC_SIP_DEVICE_SUSPEND:
476                         pm_handle_platform_smc(frame);
477                         break;
478 #endif
479
480                 case SMC_TOS_NS_IRQ_PENDING_VECTOR:
481                 case SMC_TOS_SS_REGISTER_HANDLER:
482                 case SMC_TOS_NS_REG_REQPARAM_BUF:
483                 case SMC_TOS_INIT_LOGGER:
484                 case SMC_TOS_PROGRAM_VPR:
485                 case SMC_SIP_PROGRAM_VPR:
486                         tz_handle_system_smc(frame);
487                         break;
488
489                 case SMC_TA_OPEN_SESSION:
490                 case SMC_TA_CLOSE_SESSION:
491                 case SMC_TA_LAUNCH_OPERATION:
492                         tz_handle_trusted_app_smc(frame);
493                         break;
494
495                 default:
496                         dprintf(CRITICAL, "%s: unhandled function 0x%x\n",
497                                 __func__, (uint32_t)frame->r[0]);
498                         frame->r[0] = TZ_UNSUPPORTED_PARAM;
499                         break;
500         }
501 }