[FOSS_TLK]security: tlk_driver: add daemon recovery support
[tegra/ote_partner/tlk_driver.git] / security / tlk_driver / ote_comms.c
1 /*
2  * Copyright (c) 2012-2015 NVIDIA Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/atomic.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/fs.h>
24 #include <linux/printk.h>
25 #include <linux/ioctl.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <linux/syscalls.h>
30 #include <asm/smp_plat.h>
31
32 #include "ote_protocol.h"
33
34 bool verbose_smc;
35 core_param(verbose_smc, verbose_smc, bool, 0644);
36
37 #define SET_RESULT(req, r, ro)  { req->result = r; req->result_origin = ro; }
38
39 static int te_pin_user_pages(void *buffer, size_t size,
40                 unsigned long *pages_ptr, uint32_t buf_type, bool *is_locked)
41 {
42         int ret = 0;
43         unsigned int nr_pages;
44         struct page **pages = NULL;
45         bool writable;
46         struct vm_area_struct *vma = NULL;
47         unsigned int flags;
48         int i;
49         bool is_locked_prev;
50
51         nr_pages = (((uintptr_t)buffer & (PAGE_SIZE - 1)) +
52                         (size + PAGE_SIZE - 1)) >> PAGE_SHIFT;
53
54         pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
55         if (!pages)
56                 return -ENOMEM;
57
58         writable = (buf_type == TE_PARAM_TYPE_MEM_RW ||
59                 buf_type == TE_PARAM_TYPE_PERSIST_MEM_RW);
60
61         down_read(&current->mm->mmap_sem);
62         ret = get_user_pages(current, current->mm, (unsigned long)buffer,
63                         nr_pages, writable,
64                         0, pages, NULL);
65
66         up_read(&current->mm->mmap_sem);
67
68         if (ret <= 0) {
69                 pr_err("%s: Error %d in get_user_pages\n", __func__, ret);
70                 return ret;
71         }
72
73         *pages_ptr = (unsigned long) pages;
74         nr_pages = ret;
75
76         down_read(&current->mm->mmap_sem);
77
78         is_locked_prev = false;
79         vma = find_extend_vma(current->mm, (unsigned long)buffer);
80         if (vma && (vma->vm_flags & VM_LOCKED))
81                 is_locked_prev = true;
82
83         up_read(&current->mm->mmap_sem);
84
85         /*
86          * Lock the pages if they are not already locked to ensure that
87          * AF bit is not set to zero.
88          */
89         *is_locked = false;
90         if (!is_locked_prev) {
91                 ret = sys_mlock((unsigned long)buffer, size);
92                 if (!ret)
93                         *is_locked = true;
94                 else
95                         /*
96                          * Follow through even if mlock failed as it can be
97                          * failed due to memory restrictions or invalid
98                          * capabilities
99                          */
100                         pr_warn("%s: Error %d in mlock, continuing session\n",
101                                                                 __func__, ret);
102         }
103
104         down_read(&current->mm->mmap_sem);
105
106         /* Fault pages to set the AF bit in PTE */
107         flags = FAULT_FLAG_USER;
108         if (writable)
109                 flags |= FAULT_FLAG_WRITE;
110         for (i = 0; i < nr_pages; i++) {
111                 ret = fixup_user_fault(current, current->mm,
112                         (unsigned long)(buffer + (i * PAGE_SIZE)), flags);
113                 if (ret) {
114                         pr_err("%s: Error %d in fixup_user_fault\n",
115                                                         __func__, ret);
116                         break;
117                 }
118         }
119
120         up_read(&current->mm->mmap_sem);
121
122         if (ret) {
123                 if (*is_locked)
124                         sys_munlock((unsigned long)buffer, size);
125                 return ret;
126         }
127
128         /* Return the number of pages pinned */
129         return nr_pages;
130 }
131
132 static int te_prep_mem_buffer(uint32_t session_id,
133                 void *buffer, size_t size, uint32_t buf_type,
134                 struct te_session *session)
135 {
136         unsigned long pages = 0;
137         struct te_shmem_desc *shmem_desc = NULL;
138         int ret = 0, nr_pages = 0;
139         bool is_locked = false;
140
141         /* allocate new shmem descriptor */
142         shmem_desc = kzalloc(sizeof(struct te_shmem_desc), GFP_KERNEL);
143         if (!shmem_desc) {
144                 pr_err("%s: te_add_shmem_desc failed\n", __func__);
145                 ret = OTE_ERROR_OUT_OF_MEMORY;
146                 goto error;
147         }
148
149         /* pin pages */
150         nr_pages = te_pin_user_pages(buffer, size, &pages,
151                                         buf_type, &is_locked);
152         if (nr_pages <= 0) {
153                 pr_err("%s: te_pin_user_pages failed (%d)\n", __func__,
154                         nr_pages);
155                 ret = OTE_ERROR_OUT_OF_MEMORY;
156                 kfree(shmem_desc);
157                 goto error;
158         }
159
160         /* initialize shmem descriptor */
161         INIT_LIST_HEAD(&(shmem_desc->list));
162         shmem_desc->buffer = buffer;
163         shmem_desc->size = size;
164         shmem_desc->nr_pages = nr_pages;
165         shmem_desc->pages = (struct page **)(uintptr_t)pages;
166         shmem_desc->is_locked = is_locked;
167
168         /* add shmem descriptor to proper list */
169         if ((buf_type == TE_PARAM_TYPE_MEM_RO) ||
170                 (buf_type == TE_PARAM_TYPE_MEM_RW))
171                 list_add_tail(&shmem_desc->list, &session->temp_shmem_list);
172         else {
173                 list_add_tail(&shmem_desc->list,
174                         &session->inactive_persist_shmem_list);
175         }
176
177         return OTE_SUCCESS;
178 error:
179         return ret;
180 }
181
182 static int te_prep_mem_buffers(struct te_request *request,
183                         struct te_session *session)
184 {
185         uint32_t i;
186         int ret = OTE_SUCCESS;
187         struct te_oper_param *params;
188
189         params = (struct te_oper_param *)(uintptr_t)request->params;
190         for (i = 0; i < request->params_size; i++) {
191                 switch (params[i].type) {
192                 case TE_PARAM_TYPE_NONE:
193                 case TE_PARAM_TYPE_INT_RO:
194                 case TE_PARAM_TYPE_INT_RW:
195                         break;
196                 case TE_PARAM_TYPE_MEM_RO:
197                 case TE_PARAM_TYPE_MEM_RW:
198                 case TE_PARAM_TYPE_PERSIST_MEM_RO:
199                 case TE_PARAM_TYPE_PERSIST_MEM_RW:
200                         ret = te_prep_mem_buffer(request->session_id,
201                                 (void *)(uintptr_t)params[i].u.Mem.base,
202                                 params[i].u.Mem.len,
203                                 params[i].type,
204                                 session);
205                         if (ret < 0) {
206                                 pr_err("%s failed with err (%d)\n",
207                                         __func__, ret);
208                                 ret = OTE_ERROR_BAD_PARAMETERS;
209                                 break;
210                         }
211                         break;
212                 default:
213                         pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
214                         ret = OTE_ERROR_BAD_PARAMETERS;
215                         break;
216                 }
217         }
218         return ret;
219 }
220
221 static void te_release_mem_buffer(struct te_shmem_desc *shmem_desc)
222 {
223         uint32_t i;
224         int status;
225
226         list_del(&shmem_desc->list);
227         for (i = 0; i < shmem_desc->nr_pages; i++) {
228                 if ((shmem_desc->type == TE_PARAM_TYPE_MEM_RW) ||
229                         (shmem_desc->type == TE_PARAM_TYPE_PERSIST_MEM_RW))
230                         set_page_dirty_lock(shmem_desc->pages[i]);
231                 page_cache_release(shmem_desc->pages[i]);
232         }
233         kfree(shmem_desc->pages);
234
235         if (shmem_desc->is_locked && current->mm) {
236                 status = sys_munlock((unsigned long)shmem_desc->buffer,
237                                                         shmem_desc->size);
238                 if (status)
239                         pr_err("%s:Error %d in munlock\n", __func__, status);
240         }
241
242         kfree(shmem_desc);
243 }
244
245 static void te_release_mem_buffers(struct list_head *buflist)
246 {
247         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
248
249         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc, buflist, list) {
250                 te_release_mem_buffer(shmem_desc);
251         }
252 }
253
254 static void te_activate_persist_mem_buffers(struct te_session *session)
255 {
256         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
257
258         /* move persist mem buffers from inactive list to active list */
259         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
260                 &session->inactive_persist_shmem_list, list) {
261
262                 list_move_tail(&shmem_desc->list, &session->persist_shmem_list);
263         }
264 }
265
266 static struct te_session *te_get_session(struct tlk_context *context,
267                                         uint32_t session_id)
268 {
269         struct te_session *session, *tmp_session;
270
271         list_for_each_entry_safe(session, tmp_session,
272                 &context->session_list, list) {
273                 if (session->session_id == session_id)
274                         return session;
275         }
276
277         return NULL;
278 }
279
280 #ifdef CONFIG_SMP
281 cpumask_t saved_cpu_mask;
282 static long switch_cpumask_to_cpu0(void)
283 {
284         long ret;
285         cpumask_t local_cpu_mask = CPU_MASK_NONE;
286
287         cpu_set(0, local_cpu_mask);
288         cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
289         ret = sched_setaffinity(0, &local_cpu_mask);
290         if (ret)
291                 pr_err("%s: sched_setaffinity #1 -> 0x%lX", __func__, ret);
292
293         return ret;
294 }
295
296 static void restore_cpumask(void)
297 {
298         long ret = sched_setaffinity(0, &saved_cpu_mask);
299         if (ret)
300                 pr_err("%s: sched_setaffinity #2 -> 0x%lX", __func__, ret);
301 }
302 #else
303 static inline long switch_cpumask_to_cpu0(void) { return 0; };
304 static inline void restore_cpumask(void) {};
305 #endif
306
307 struct tlk_smc_work_args {
308         uint32_t arg0;
309         uintptr_t arg1;
310         uint32_t arg2;
311 };
312
313 static long tlk_generic_smc_on_cpu0(void *args)
314 {
315         struct tlk_smc_work_args *work;
316         int callback_status = 0;
317         uint32_t retval;
318
319         work = (struct tlk_smc_work_args *)args;
320         retval = _tlk_generic_smc(work->arg0, work->arg1, work->arg2);
321
322         while (retval == TE_ERROR_PREEMPT_BY_IRQ ||
323                retval == TE_ERROR_PREEMPT_BY_FS) {
324                 if (retval == TE_ERROR_PREEMPT_BY_FS)
325                         callback_status = tlk_ss_op();
326                 retval = _tlk_generic_smc(TE_SMC_RESTART, callback_status, 0);
327         }
328
329         /* Print TLK logs if any */
330         ote_print_logs();
331
332         return retval;
333 }
334
335 uint32_t send_smc(uint32_t arg0, uintptr_t arg1, uintptr_t arg2)
336 {
337         long ret;
338         struct tlk_smc_work_args work_args;
339
340         work_args.arg0 = arg0;
341         work_args.arg1 = arg1;
342         work_args.arg2 = arg2;
343
344         if (current->flags &
345             (PF_WQ_WORKER | PF_NO_SETAFFINITY | PF_KTHREAD)) {
346                 int cpu = cpu_logical_map(get_cpu());
347                 put_cpu();
348
349                 /* workers don't change CPU. depending on the CPU, execute
350                  * directly or sched work */
351                 if (cpu == 0 && (current->flags & PF_WQ_WORKER))
352                         return tlk_generic_smc_on_cpu0(&work_args);
353                 else
354                         return work_on_cpu(0,
355                                         tlk_generic_smc_on_cpu0, &work_args);
356         }
357
358         /* switch to CPU0 */
359         ret = switch_cpumask_to_cpu0();
360         if (ret) {
361                 /* not able to switch, schedule work on CPU0 */
362                 ret = work_on_cpu(0, tlk_generic_smc_on_cpu0, &work_args);
363         } else {
364                 /* switched to CPU0 */
365                 ret = tlk_generic_smc_on_cpu0(&work_args);
366                 restore_cpumask();
367         }
368
369         return ret;
370 }
371
372 /*
373  * Do an SMC call
374  */
375 static void do_smc(struct te_request *request, struct tlk_device *dev)
376 {
377         uint32_t smc_args;
378         uint32_t smc_params = 0;
379
380         smc_args = (char *)request - dev->req_param_buf;
381         if (request->params) {
382                 smc_params =
383                         (char *)(uintptr_t)request->params - dev->req_param_buf;
384         }
385
386         (void)send_smc(request->type, smc_args, smc_params);
387 }
388
389 /*
390  * VPR programming SMC
391  *
392  * This routine is called both from normal threads and worker threads.
393  * The worker threads are per-cpu and have PF_NO_SETAFFINITY set, so
394  * any calls to sched_setaffinity will fail.
395  *
396  * If it's a worker thread on CPU0, just invoke the SMC directly. If
397  * it's running on a non-CPU0, use work_on_cpu() to schedule the SMC
398  * on CPU0.
399  */
400 int te_set_vpr_params(void *vpr_base, size_t vpr_size)
401 {
402         uint32_t retval;
403
404         /* Share the same lock used when request is send from user side */
405         mutex_lock(&smc_lock);
406
407         retval = send_smc(TE_SMC_PROGRAM_VPR, (uintptr_t)vpr_base, vpr_size);
408
409         mutex_unlock(&smc_lock);
410
411         if (retval != OTE_SUCCESS) {
412                 pr_err("%s: smc failed err (0x%x)\n", __func__, retval);
413                 return -EINVAL;
414         }
415         return 0;
416 }
417 EXPORT_SYMBOL(te_set_vpr_params);
418
419 void te_restore_keyslots(void)
420 {
421         uint32_t retval;
422
423         /* Share the same lock used when request is send from user side */
424         mutex_lock(&smc_lock);
425
426         retval = send_smc(TE_SMC_TA_EVENT, TA_EVENT_RESTORE_KEYS, 0);
427
428         mutex_unlock(&smc_lock);
429
430         if (retval != OTE_SUCCESS) {
431                 pr_err("%s: smc failed err (0x%x)\n", __func__, retval);
432         }
433 }
434 EXPORT_SYMBOL(te_restore_keyslots);
435
436 /*
437  * VRR Set Buffer
438  *
439  * Called from the DC driver and implemented as a monitor fastcall
440  * to avoid taking the smc_lock. This call passes in the physical
441  * address for the shared memory buffer.
442  */
443 int te_vrr_set_buf(phys_addr_t addr)
444 {
445         return _tlk_generic_smc(TE_SMC_VRR_SET_BUF, addr, 0);
446 }
447 EXPORT_SYMBOL(te_vrr_set_buf);
448
449 /*
450  * VRR Sec
451  *
452  * Called from the DC driver and implemented as a monitor fastcall
453  * to avoid taking the smc_lock.
454  */
455 void te_vrr_sec(void)
456 {
457         _tlk_generic_smc(TE_SMC_VRR_SEC, 0, 0);
458 }
459 EXPORT_SYMBOL(te_vrr_sec);
460
461 /*
462  * Open session SMC (supporting client-based te_open_session() calls)
463  */
464 void te_open_session(struct te_opensession *cmd,
465                     struct te_request *request,
466                     struct tlk_context *context)
467 {
468         struct te_session *session;
469         int ret;
470
471         session = kzalloc(sizeof(struct te_session), GFP_KERNEL);
472         if (!session) {
473                 SET_RESULT(request, OTE_ERROR_OUT_OF_MEMORY,
474                         OTE_RESULT_ORIGIN_API);
475                 return;
476         }
477
478         INIT_LIST_HEAD(&session->list);
479         INIT_LIST_HEAD(&session->temp_shmem_list);
480         INIT_LIST_HEAD(&session->inactive_persist_shmem_list);
481         INIT_LIST_HEAD(&session->persist_shmem_list);
482
483         request->type = TE_SMC_OPEN_SESSION;
484
485         ret = te_prep_mem_buffers(request, session);
486         if (ret != OTE_SUCCESS) {
487                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
488                         __func__, ret);
489                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
490                 kfree(session);
491                 return;
492         }
493
494         memcpy(&request->dest_uuid,
495                &cmd->dest_uuid,
496                sizeof(struct te_service_id));
497
498         pr_info("OPEN_CLIENT_SESSION: 0x%x 0x%x 0x%x 0x%x\n",
499                 request->dest_uuid[0],
500                 request->dest_uuid[1],
501                 request->dest_uuid[2],
502                 request->dest_uuid[3]);
503
504         do_smc(request, context->dev);
505
506         if (request->result) {
507                 /* release any persistent mem buffers if we failed */
508                 te_release_mem_buffers(&session->inactive_persist_shmem_list);
509
510                 kfree(session);
511         } else {
512                 /* otherwise mark active any persistent mem buffers */
513                 te_activate_persist_mem_buffers(session);
514
515                 /* save off session_id and add to list */
516                 session->session_id = request->session_id;
517                 list_add_tail(&session->list, &context->session_list);
518         }
519
520         te_release_mem_buffers(&session->temp_shmem_list);
521 }
522
523 /*
524  * Close session SMC (supporting client-based te_close_session() calls)
525  */
526 void te_close_session(struct te_closesession *cmd,
527                      struct te_request *request,
528                      struct tlk_context *context)
529 {
530         struct te_session *session;
531
532         request->session_id = cmd->session_id;
533         request->type = TE_SMC_CLOSE_SESSION;
534
535         do_smc(request, context->dev);
536         if (request->result)
537                 pr_info("%s: error closing session: 0x%08x\n",
538                         __func__, request->result);
539
540         session = te_get_session(context, cmd->session_id);
541         if (!session) {
542                 pr_info("%s: session_id not found: 0x%x\n",
543                         __func__, cmd->session_id);
544                 return;
545         }
546
547         /* free session state */
548         te_release_mem_buffers(&session->persist_shmem_list);
549         list_del(&session->list);
550         kfree(session);
551 }
552
553 /*
554  * Launch operation SMC (supporting client-based te_launch_operation() calls)
555  */
556 void te_launch_operation(struct te_launchop *cmd,
557                         struct te_request *request,
558                         struct tlk_context *context)
559 {
560         struct te_session *session;
561         int ret;
562
563         session = te_get_session(context, cmd->session_id);
564         if (!session) {
565                 pr_info("%s: session_id not found: 0x%x\n",
566                         __func__, cmd->session_id);
567                 return;
568         }
569
570         request->session_id = cmd->session_id;
571         request->command_id = cmd->operation.command;
572         request->type = TE_SMC_LAUNCH_OPERATION;
573
574         ret = te_prep_mem_buffers(request, session);
575         if (ret != OTE_SUCCESS) {
576                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
577                         __func__, ret);
578                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
579                 return;
580         }
581
582         do_smc(request, context->dev);
583
584         if (request->result) {
585                 /* release any inactive persistent mem buffers if we failed */
586                 te_release_mem_buffers(&session->inactive_persist_shmem_list);
587         } else {
588                 /* otherwise mark active any persistent mem buffers */
589                 te_activate_persist_mem_buffers(session);
590         }
591
592         te_release_mem_buffers(&session->temp_shmem_list);
593 }