[FOSS_TLK]security: tlk_driver: lock/fault userspace pages
[tegra/ote_partner/tlk_driver.git] / security / tlk_driver / ote_comms.c
1 /*
2  * Copyright (c) 2012-2014 NVIDIA Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/atomic.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/fs.h>
24 #include <linux/printk.h>
25 #include <linux/ioctl.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <linux/syscalls.h>
30 #include <asm/smp_plat.h>
31
32 #include "ote_protocol.h"
33
34 bool verbose_smc;
35 core_param(verbose_smc, verbose_smc, bool, 0644);
36
37 #define SET_RESULT(req, r, ro)  { req->result = r; req->result_origin = ro; }
38
39 static int te_pin_user_pages(void *buffer, size_t size,
40                 unsigned long *pages_ptr, uint32_t buf_type, bool *is_locked)
41 {
42         int ret = 0;
43         unsigned int nr_pages;
44         struct page **pages = NULL;
45         bool writable;
46         struct vm_area_struct *vma = NULL;
47         unsigned int flags;
48         int i;
49         bool is_locked_prev;
50
51         nr_pages = (((uintptr_t)buffer & (PAGE_SIZE - 1)) +
52                         (size + PAGE_SIZE - 1)) >> PAGE_SHIFT;
53
54         pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
55         if (!pages)
56                 return -ENOMEM;
57
58         writable = (buf_type == TE_PARAM_TYPE_MEM_RW ||
59                 buf_type == TE_PARAM_TYPE_PERSIST_MEM_RW);
60
61         down_read(&current->mm->mmap_sem);
62         ret = get_user_pages(current, current->mm, (unsigned long)buffer,
63                         nr_pages, writable,
64                         0, pages, NULL);
65
66         up_read(&current->mm->mmap_sem);
67
68         if (ret <= 0) {
69                 pr_err("%s: Error %d in get_user_pages\n", __func__, ret);
70                 return ret;
71         }
72
73         *pages_ptr = (unsigned long) pages;
74         nr_pages = ret;
75
76         down_read(&current->mm->mmap_sem);
77
78         is_locked_prev = false;
79         vma = find_extend_vma(current->mm, (unsigned long)buffer);
80         if (vma && (vma->vm_flags & VM_LOCKED))
81                 is_locked_prev = true;
82
83         up_read(&current->mm->mmap_sem);
84
85         /*
86          * Lock the pages if they are not already locked to ensure that
87          * AF bit is not set to zero.
88          */
89         *is_locked = false;
90         if (!is_locked_prev) {
91                 ret = sys_mlock((unsigned long)buffer, size);
92                 if (!ret)
93                         *is_locked = true;
94                 else
95                         /*
96                          * Follow through even if mlock failed as it can be
97                          * failed due to memory restrictions or invalid
98                          * capabilities
99                          */
100                         pr_warn("%s: Error %d in mlock, continuing session\n",
101                                                                 __func__, ret);
102         }
103
104         down_read(&current->mm->mmap_sem);
105
106         /* Fault pages to set the AF bit in PTE */
107         flags = FAULT_FLAG_USER;
108         if (writable)
109                 flags |= FAULT_FLAG_WRITE;
110         for (i = 0; i < nr_pages; i++) {
111                 ret = fixup_user_fault(current, current->mm,
112                         (unsigned long)(buffer + (i * PAGE_SIZE)), flags);
113                 if (ret) {
114                         pr_err("%s: Error %d in fixup_user_fault\n",
115                                                         __func__, ret);
116                         break;
117                 }
118         }
119
120         up_read(&current->mm->mmap_sem);
121
122         if (ret) {
123                 if (*is_locked)
124                         sys_munlock((unsigned long)buffer, size);
125                 return ret;
126         }
127
128         /* Return the number of pages pinned */
129         return nr_pages;
130 }
131
132 static int te_prep_mem_buffer(uint32_t session_id,
133                 void *buffer, size_t size, uint32_t buf_type,
134                 struct te_session *session)
135 {
136         unsigned long pages = 0;
137         struct te_shmem_desc *shmem_desc = NULL;
138         int ret = 0, nr_pages = 0;
139         bool is_locked = false;
140
141         /* allocate new shmem descriptor */
142         shmem_desc = kzalloc(sizeof(struct te_shmem_desc), GFP_KERNEL);
143         if (!shmem_desc) {
144                 pr_err("%s: te_add_shmem_desc failed\n", __func__);
145                 ret = OTE_ERROR_OUT_OF_MEMORY;
146                 goto error;
147         }
148
149         /* pin pages */
150         nr_pages = te_pin_user_pages(buffer, size, &pages,
151                                         buf_type, &is_locked);
152         if (nr_pages <= 0) {
153                 pr_err("%s: te_pin_user_pages failed (%d)\n", __func__,
154                         nr_pages);
155                 ret = OTE_ERROR_OUT_OF_MEMORY;
156                 kfree(shmem_desc);
157                 goto error;
158         }
159
160         /* initialize shmem descriptor */
161         INIT_LIST_HEAD(&(shmem_desc->list));
162         shmem_desc->buffer = buffer;
163         shmem_desc->size = size;
164         shmem_desc->nr_pages = nr_pages;
165         shmem_desc->pages = (struct page **)(uintptr_t)pages;
166         shmem_desc->is_locked = is_locked;
167
168         /* add shmem descriptor to proper list */
169         if ((buf_type == TE_PARAM_TYPE_MEM_RO) ||
170                 (buf_type == TE_PARAM_TYPE_MEM_RW))
171                 list_add_tail(&shmem_desc->list, &session->temp_shmem_list);
172         else {
173                 list_add_tail(&shmem_desc->list,
174                         &session->inactive_persist_shmem_list);
175         }
176
177         return OTE_SUCCESS;
178 error:
179         return ret;
180 }
181
182 static int te_prep_mem_buffers(struct te_request *request,
183                         struct te_session *session)
184 {
185         uint32_t i;
186         int ret = OTE_SUCCESS;
187         struct te_oper_param *params;
188
189         params = (struct te_oper_param *)(uintptr_t)request->params;
190         for (i = 0; i < request->params_size; i++) {
191                 switch (params[i].type) {
192                 case TE_PARAM_TYPE_NONE:
193                 case TE_PARAM_TYPE_INT_RO:
194                 case TE_PARAM_TYPE_INT_RW:
195                         break;
196                 case TE_PARAM_TYPE_MEM_RO:
197                 case TE_PARAM_TYPE_MEM_RW:
198                 case TE_PARAM_TYPE_PERSIST_MEM_RO:
199                 case TE_PARAM_TYPE_PERSIST_MEM_RW:
200                         ret = te_prep_mem_buffer(request->session_id,
201                                 (void *)(uintptr_t)params[i].u.Mem.base,
202                                 params[i].u.Mem.len,
203                                 params[i].type,
204                                 session);
205                         if (ret < 0) {
206                                 pr_err("%s failed with err (%d)\n",
207                                         __func__, ret);
208                                 ret = OTE_ERROR_BAD_PARAMETERS;
209                                 break;
210                         }
211                         break;
212                 default:
213                         pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
214                         ret = OTE_ERROR_BAD_PARAMETERS;
215                         break;
216                 }
217         }
218         return ret;
219 }
220
221 static void te_release_mem_buffer(struct te_shmem_desc *shmem_desc)
222 {
223         uint32_t i;
224         int status;
225
226         list_del(&shmem_desc->list);
227         for (i = 0; i < shmem_desc->nr_pages; i++) {
228                 if ((shmem_desc->type == TE_PARAM_TYPE_MEM_RW) ||
229                         (shmem_desc->type == TE_PARAM_TYPE_PERSIST_MEM_RW))
230                         set_page_dirty_lock(shmem_desc->pages[i]);
231                 page_cache_release(shmem_desc->pages[i]);
232         }
233         kfree(shmem_desc->pages);
234
235         if (shmem_desc->is_locked) {
236                 status = sys_munlock((unsigned long)shmem_desc->buffer,
237                                                         shmem_desc->size);
238                 if (status)
239                         pr_err("%s:Error %d in munlock\n", __func__, status);
240         }
241
242         kfree(shmem_desc);
243 }
244
245 static void te_release_mem_buffers(struct list_head *buflist)
246 {
247         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
248
249         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc, buflist, list) {
250                 te_release_mem_buffer(shmem_desc);
251         }
252 }
253
254 static void te_activate_persist_mem_buffers(struct te_session *session)
255 {
256         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
257
258         /* move persist mem buffers from inactive list to active list */
259         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
260                 &session->inactive_persist_shmem_list, list) {
261
262                 list_move_tail(&shmem_desc->list, &session->persist_shmem_list);
263         }
264 }
265
266 static struct te_session *te_get_session(struct tlk_context *context,
267                                         uint32_t session_id)
268 {
269         struct te_session *session, *tmp_session;
270
271         list_for_each_entry_safe(session, tmp_session,
272                 &context->session_list, list) {
273                 if (session->session_id == session_id)
274                         return session;
275         }
276
277         return NULL;
278 }
279
280 #ifdef CONFIG_SMP
281 cpumask_t saved_cpu_mask;
282 static long switch_cpumask_to_cpu0(void)
283 {
284         long ret;
285         cpumask_t local_cpu_mask = CPU_MASK_NONE;
286
287         cpu_set(0, local_cpu_mask);
288         cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
289         ret = sched_setaffinity(0, &local_cpu_mask);
290         if (ret)
291                 pr_err("%s: sched_setaffinity #1 -> 0x%lX", __func__, ret);
292
293         return ret;
294 }
295
296 static void restore_cpumask(void)
297 {
298         long ret = sched_setaffinity(0, &saved_cpu_mask);
299         if (ret)
300                 pr_err("%s: sched_setaffinity #2 -> 0x%lX", __func__, ret);
301 }
302 #else
303 static inline long switch_cpumask_to_cpu0(void) { return 0; };
304 static inline void restore_cpumask(void) {};
305 #endif
306
307 struct tlk_smc_work_args {
308         uint32_t arg0;
309         uintptr_t arg1;
310         uint32_t arg2;
311 };
312
313 static long tlk_generic_smc_on_cpu0(void *args)
314 {
315         struct tlk_smc_work_args *work;
316         uint32_t retval;
317
318         work = (struct tlk_smc_work_args *)args;
319         retval = _tlk_generic_smc(work->arg0, work->arg1, work->arg2);
320
321         while (retval == TE_ERROR_PREEMPT_BY_IRQ ||
322                retval == TE_ERROR_PREEMPT_BY_FS) {
323                 if (retval == TE_ERROR_PREEMPT_BY_FS)
324                         tlk_ss_op();
325                 retval = _tlk_generic_smc(TE_SMC_RESTART, 0, 0);
326         }
327
328         /* Print TLK logs if any */
329         ote_print_logs();
330
331         return retval;
332 }
333
334 uint32_t send_smc(uint32_t arg0, uintptr_t arg1, uintptr_t arg2)
335 {
336         long ret;
337         struct tlk_smc_work_args work_args;
338
339         work_args.arg0 = arg0;
340         work_args.arg1 = arg1;
341         work_args.arg2 = arg2;
342
343         if (current->flags &
344             (PF_WQ_WORKER | PF_NO_SETAFFINITY | PF_KTHREAD)) {
345                 int cpu = cpu_logical_map(get_cpu());
346                 put_cpu();
347
348                 /* workers don't change CPU. depending on the CPU, execute
349                  * directly or sched work */
350                 if (cpu == 0 && (current->flags & PF_WQ_WORKER))
351                         return tlk_generic_smc_on_cpu0(&work_args);
352                 else
353                         return work_on_cpu(0,
354                                         tlk_generic_smc_on_cpu0, &work_args);
355         }
356
357         /* switch to CPU0 */
358         ret = switch_cpumask_to_cpu0();
359         if (ret) {
360                 /* not able to switch, schedule work on CPU0 */
361                 ret = work_on_cpu(0, tlk_generic_smc_on_cpu0, &work_args);
362         } else {
363                 /* switched to CPU0 */
364                 ret = tlk_generic_smc_on_cpu0(&work_args);
365                 restore_cpumask();
366         }
367
368         return ret;
369 }
370
371 /*
372  * Do an SMC call
373  */
374 static void do_smc(struct te_request *request, struct tlk_device *dev)
375 {
376         uint32_t smc_args;
377         uint32_t smc_params = 0;
378
379         smc_args = (char *)request - dev->req_param_buf;
380         if (request->params) {
381                 smc_params =
382                         (char *)(uintptr_t)request->params - dev->req_param_buf;
383         }
384
385         (void)send_smc(request->type, smc_args, smc_params);
386 }
387
388 /*
389  * VPR programming SMC
390  *
391  * This routine is called both from normal threads and worker threads.
392  * The worker threads are per-cpu and have PF_NO_SETAFFINITY set, so
393  * any calls to sched_setaffinity will fail.
394  *
395  * If it's a worker thread on CPU0, just invoke the SMC directly. If
396  * it's running on a non-CPU0, use work_on_cpu() to schedule the SMC
397  * on CPU0.
398  */
399 int te_set_vpr_params(void *vpr_base, size_t vpr_size)
400 {
401         uint32_t retval;
402
403         /* Share the same lock used when request is send from user side */
404         mutex_lock(&smc_lock);
405
406         retval = send_smc(TE_SMC_PROGRAM_VPR, (uintptr_t)vpr_base, vpr_size);
407
408         mutex_unlock(&smc_lock);
409
410         if (retval != OTE_SUCCESS) {
411                 pr_err("%s: smc failed err (0x%x)\n", __func__, retval);
412                 return -EINVAL;
413         }
414         return 0;
415 }
416 EXPORT_SYMBOL(te_set_vpr_params);
417
418 /*
419  * Open session SMC (supporting client-based te_open_session() calls)
420  */
421 void te_open_session(struct te_opensession *cmd,
422                     struct te_request *request,
423                     struct tlk_context *context)
424 {
425         struct te_session *session;
426         int ret;
427
428         session = kzalloc(sizeof(struct te_session), GFP_KERNEL);
429         if (!session) {
430                 SET_RESULT(request, OTE_ERROR_OUT_OF_MEMORY,
431                         OTE_RESULT_ORIGIN_API);
432                 return;
433         }
434
435         INIT_LIST_HEAD(&session->list);
436         INIT_LIST_HEAD(&session->temp_shmem_list);
437         INIT_LIST_HEAD(&session->inactive_persist_shmem_list);
438         INIT_LIST_HEAD(&session->persist_shmem_list);
439
440         request->type = TE_SMC_OPEN_SESSION;
441
442         ret = te_prep_mem_buffers(request, session);
443         if (ret != OTE_SUCCESS) {
444                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
445                         __func__, ret);
446                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
447                 kfree(session);
448                 return;
449         }
450
451         memcpy(&request->dest_uuid,
452                &cmd->dest_uuid,
453                sizeof(struct te_service_id));
454
455         pr_info("OPEN_CLIENT_SESSION: 0x%x 0x%x 0x%x 0x%x\n",
456                 request->dest_uuid[0],
457                 request->dest_uuid[1],
458                 request->dest_uuid[2],
459                 request->dest_uuid[3]);
460
461         do_smc(request, context->dev);
462
463         if (request->result) {
464                 /* release any persistent mem buffers if we failed */
465                 te_release_mem_buffers(&session->inactive_persist_shmem_list);
466         } else {
467                 /* otherwise mark active any persistent mem buffers */
468                 te_activate_persist_mem_buffers(session);
469
470                 /* save off session_id and add to list */
471                 session->session_id = request->session_id;
472                 list_add_tail(&session->list, &context->session_list);
473         }
474
475         te_release_mem_buffers(&session->temp_shmem_list);
476 }
477
478 /*
479  * Close session SMC (supporting client-based te_close_session() calls)
480  */
481 void te_close_session(struct te_closesession *cmd,
482                      struct te_request *request,
483                      struct tlk_context *context)
484 {
485         struct te_session *session;
486
487         request->session_id = cmd->session_id;
488         request->type = TE_SMC_CLOSE_SESSION;
489
490         do_smc(request, context->dev);
491         if (request->result)
492                 pr_info("%s: error closing session: 0x%08x\n",
493                         __func__, request->result);
494
495         session = te_get_session(context, cmd->session_id);
496         if (!session) {
497                 pr_info("%s: session_id not found: 0x%x\n",
498                         __func__, cmd->session_id);
499                 return;
500         }
501
502         /* free session state */
503         te_release_mem_buffers(&session->persist_shmem_list);
504         list_del(&session->list);
505         kfree(session);
506 }
507
508 /*
509  * Launch operation SMC (supporting client-based te_launch_operation() calls)
510  */
511 void te_launch_operation(struct te_launchop *cmd,
512                         struct te_request *request,
513                         struct tlk_context *context)
514 {
515         struct te_session *session;
516         int ret;
517
518         session = te_get_session(context, cmd->session_id);
519         if (!session) {
520                 pr_info("%s: session_id not found: 0x%x\n",
521                         __func__, cmd->session_id);
522                 return;
523         }
524
525         request->session_id = cmd->session_id;
526         request->command_id = cmd->operation.command;
527         request->type = TE_SMC_LAUNCH_OPERATION;
528
529         ret = te_prep_mem_buffers(request, session);
530         if (ret != OTE_SUCCESS) {
531                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
532                         __func__, ret);
533                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
534                 return;
535         }
536
537         do_smc(request, context->dev);
538
539         if (request->result) {
540                 /* release any inactive persistent mem buffers if we failed */
541                 te_release_mem_buffers(&session->inactive_persist_shmem_list);
542         } else {
543                 /* otherwise mark active any persistent mem buffers */
544                 te_activate_persist_mem_buffers(session);
545         }
546
547         te_release_mem_buffers(&session->temp_shmem_list);
548 }