[FOSS_TLK]security: tlk_driver: add FIQ glue
[tegra/ote_partner/tlk_driver.git] / security / tlk_driver / ote_comms.c
1 /*
2  * Copyright (c) 2012-2014 NVIDIA Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/atomic.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/fs.h>
24 #include <linux/printk.h>
25 #include <linux/ioctl.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/pagemap.h>
29 #include <asm/smp_plat.h>
30
31 #include "ote_protocol.h"
32
33 bool verbose_smc;
34 core_param(verbose_smc, verbose_smc, bool, 0644);
35
36 #define SET_RESULT(req, r, ro)  { req->result = r; req->result_origin = ro; }
37
38 static int te_pin_user_pages(void *buffer, size_t size,
39                 unsigned long *pages_ptr, uint32_t buf_type)
40 {
41         int ret = 0;
42         unsigned int nr_pages;
43         struct page **pages = NULL;
44         bool writable;
45
46         nr_pages = (((uintptr_t)buffer & (PAGE_SIZE - 1)) +
47                         (size + PAGE_SIZE - 1)) >> PAGE_SHIFT;
48
49         pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
50         if (!pages)
51                 return -ENOMEM;
52
53         writable = (buf_type == TE_PARAM_TYPE_MEM_RW ||
54                 buf_type == TE_PARAM_TYPE_PERSIST_MEM_RW);
55
56         down_read(&current->mm->mmap_sem);
57         ret = get_user_pages(current, current->mm, (unsigned long)buffer,
58                         nr_pages, writable,
59                         0, pages, NULL);
60
61         up_read(&current->mm->mmap_sem);
62
63         *pages_ptr = (unsigned long) pages;
64
65         return ret;
66 }
67
68 static int te_prep_mem_buffer(uint32_t session_id,
69                 void *buffer, size_t size, uint32_t buf_type,
70                 struct tlk_context *context)
71 {
72         unsigned long pages = 0;
73         struct te_shmem_desc *shmem_desc = NULL;
74         int ret = 0, nr_pages = 0;
75
76         /* allocate new shmem descriptor */
77         shmem_desc = kzalloc(sizeof(struct te_shmem_desc), GFP_KERNEL);
78         if (!shmem_desc) {
79                 pr_err("%s: te_add_shmem_desc failed\n", __func__);
80                 ret = OTE_ERROR_OUT_OF_MEMORY;
81                 goto error;
82         }
83
84         /* pin pages */
85         nr_pages = te_pin_user_pages(buffer, size, &pages, buf_type);
86         if (nr_pages <= 0) {
87                 pr_err("%s: te_pin_user_pages failed (%d)\n", __func__,
88                         nr_pages);
89                 ret = OTE_ERROR_OUT_OF_MEMORY;
90                 kfree(shmem_desc);
91                 goto error;
92         }
93
94         /* initialize shmem descriptor */
95         INIT_LIST_HEAD(&(shmem_desc->list));
96         shmem_desc->active = false;
97         shmem_desc->buffer = buffer;
98         shmem_desc->size = size;
99         shmem_desc->nr_pages = nr_pages;
100         shmem_desc->pages = (struct page **)(uintptr_t)pages;
101
102         /* add shmem descriptor to proper list */
103         if ((buf_type == TE_PARAM_TYPE_MEM_RO) ||
104                 (buf_type == TE_PARAM_TYPE_MEM_RW))
105                 list_add_tail(&shmem_desc->list, &context->temp_shmem_list);
106         else {
107                 list_add_tail(&shmem_desc->list, &context->persist_shmem_list);
108         }
109
110         return OTE_SUCCESS;
111 error:
112         return ret;
113 }
114
115 static int te_prep_mem_buffers(struct te_request *request,
116                         struct tlk_context *context)
117 {
118         uint32_t i;
119         int ret = OTE_SUCCESS;
120         struct te_oper_param *params;
121
122         params = (struct te_oper_param *)(uintptr_t)request->params;
123         for (i = 0; i < request->params_size; i++) {
124                 switch (params[i].type) {
125                 case TE_PARAM_TYPE_NONE:
126                 case TE_PARAM_TYPE_INT_RO:
127                 case TE_PARAM_TYPE_INT_RW:
128                         break;
129                 case TE_PARAM_TYPE_MEM_RO:
130                 case TE_PARAM_TYPE_MEM_RW:
131                 case TE_PARAM_TYPE_PERSIST_MEM_RO:
132                 case TE_PARAM_TYPE_PERSIST_MEM_RW:
133                         ret = te_prep_mem_buffer(request->session_id,
134                                 (void *)(uintptr_t)params[i].u.Mem.base,
135                                 params[i].u.Mem.len,
136                                 params[i].type,
137                                 context);
138                         if (ret < 0) {
139                                 pr_err("%s failed with err (%d)\n",
140                                         __func__, ret);
141                                 ret = OTE_ERROR_BAD_PARAMETERS;
142                                 break;
143                         }
144                         break;
145                 default:
146                         pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
147                         ret = OTE_ERROR_BAD_PARAMETERS;
148                         break;
149                 }
150         }
151         return ret;
152 }
153
154 static void te_release_mem_buffer(struct te_shmem_desc *shmem_desc)
155 {
156         uint32_t i;
157
158         list_del(&shmem_desc->list);
159         for (i = 0; i < shmem_desc->nr_pages; i++) {
160                 if ((shmem_desc->type == TE_PARAM_TYPE_MEM_RW) ||
161                         (shmem_desc->type == TE_PARAM_TYPE_PERSIST_MEM_RW))
162                         set_page_dirty_lock(shmem_desc->pages[i]);
163                 page_cache_release(shmem_desc->pages[i]);
164         }
165         kfree(shmem_desc->pages);
166         kfree(shmem_desc);
167 }
168
169 static void te_release_temp_mem_buffers(struct tlk_context *context)
170 {
171         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
172
173         if (list_empty(&context->temp_shmem_list))
174                 return;
175
176         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
177                 &context->temp_shmem_list, list) {
178                 te_release_mem_buffer(shmem_desc);
179         }
180 }
181
182 static void te_release_persist_mem_buffers(uint32_t session_id,
183         struct tlk_context *context)
184 {
185         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
186
187         if (list_empty(&context->persist_shmem_list))
188                 return;
189
190         /*
191          * Release any persistent mem buffers that either belong to
192          * the specified session_id or are not currently marked active
193          * (i.e. because the associated open_session or launch_operation
194          * failed).
195          */
196         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
197                 &context->persist_shmem_list, list) {
198                 if ((shmem_desc->session_id == session_id) ||
199                         (!shmem_desc->active))
200                         te_release_mem_buffer(shmem_desc);
201         }
202 }
203
204 static void te_update_persist_mem_buffers(uint32_t session_id,
205         struct tlk_context *context)
206 {
207         struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
208
209         /*
210          * Assumes any entries that have yet to be marked active belong
211          * to the session associated with the session_id that has been
212          * passed in.
213          */
214         list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
215                 &context->persist_shmem_list, list) {
216
217                 if (!shmem_desc->active) {
218                         shmem_desc->session_id = session_id;
219                         shmem_desc->active = true;
220                 }
221         }
222 }
223
224 #ifdef CONFIG_SMP
225 cpumask_t saved_cpu_mask;
226 static void switch_cpumask_to_cpu0(void)
227 {
228         long ret;
229         cpumask_t local_cpu_mask = CPU_MASK_NONE;
230
231         cpu_set(0, local_cpu_mask);
232         cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
233         ret = sched_setaffinity(0, &local_cpu_mask);
234         if (ret)
235                 pr_err("%s: sched_setaffinity #1 -> 0x%lX", __func__, ret);
236 }
237
238 static void restore_cpumask(void)
239 {
240         long ret = sched_setaffinity(0, &saved_cpu_mask);
241         if (ret)
242                 pr_err("%s: sched_setaffinity #2 -> 0x%lX", __func__, ret);
243 }
244 #else
245 static inline void switch_cpumask_to_cpu0(void) {};
246 static inline void restore_cpumask(void) {};
247 #endif
248
249 uint32_t tlk_generic_smc(uint32_t arg0, uintptr_t arg1, uintptr_t arg2)
250 {
251         uint32_t retval;
252
253         switch_cpumask_to_cpu0();
254
255         retval = _tlk_generic_smc(arg0, arg1, arg2);
256         while (retval == TE_ERROR_PREEMPT_BY_IRQ ||
257                retval == TE_ERROR_PREEMPT_BY_FS) {
258                 if (retval == TE_ERROR_PREEMPT_BY_FS)
259                         tlk_ss_op();
260                 retval = _tlk_generic_smc(TE_SMC_RESTART, 0, 0);
261         }
262
263         restore_cpumask();
264
265         /* Print TLK logs if any */
266         ote_print_logs();
267
268         return retval;
269 }
270
271 uint32_t tlk_extended_smc(uintptr_t *regs)
272 {
273         uint32_t retval;
274
275         switch_cpumask_to_cpu0();
276
277         retval = _tlk_extended_smc(regs);
278         while (retval == TE_ERROR_PREEMPT_BY_IRQ)
279                 retval = _tlk_generic_smc(TE_SMC_RESTART, 0, 0);
280
281         restore_cpumask();
282
283         /* Print TLK logs if any */
284         ote_print_logs();
285
286         return retval;
287 }
288
289 /*
290  * Do an SMC call
291  */
292 static void do_smc(struct te_request *request, struct tlk_device *dev)
293 {
294         uint32_t smc_args;
295         uint32_t smc_params = 0;
296
297         smc_args = (char *)request - dev->req_param_buf;
298         if (request->params) {
299                 smc_params =
300                         (char *)(uintptr_t)request->params - dev->req_param_buf;
301         }
302
303         tlk_generic_smc(request->type, smc_args, smc_params);
304 }
305
306 struct tlk_smc_work_args {
307         uint32_t arg0;
308         uintptr_t arg1;
309         uint32_t arg2;
310 };
311
312 static long tlk_generic_smc_on_cpu0(void *args)
313 {
314         struct tlk_smc_work_args *work;
315         int cpu = cpu_logical_map(smp_processor_id());
316         uint32_t retval;
317
318         BUG_ON(cpu != 0);
319
320         work = (struct tlk_smc_work_args *)args;
321         retval = _tlk_generic_smc(work->arg0, work->arg1, work->arg2);
322         while (retval == TE_ERROR_PREEMPT_BY_IRQ)
323                 retval = _tlk_generic_smc(TE_SMC_RESTART, 0, 0);
324         return retval;
325 }
326
327 /*
328  * VPR programming SMC
329  *
330  * This routine is called both from normal threads and worker threads.
331  * The worker threads are per-cpu and have PF_NO_SETAFFINITY set, so
332  * any calls to sched_setaffinity will fail.
333  *
334  * If it's a worker thread on CPU0, just invoke the SMC directly. If
335  * it's running on a non-CPU0, use work_on_cpu() to schedule the SMC
336  * on CPU0.
337  */
338 int te_set_vpr_params(void *vpr_base, size_t vpr_size)
339 {
340         uint32_t retval;
341
342         /* Share the same lock used when request is send from user side */
343         mutex_lock(&smc_lock);
344
345         if (current->flags &
346             (PF_WQ_WORKER | PF_NO_SETAFFINITY | PF_KTHREAD)) {
347                 struct tlk_smc_work_args work_args;
348                 int cpu = cpu_logical_map(smp_processor_id());
349
350                 work_args.arg0 = TE_SMC_PROGRAM_VPR;
351                 work_args.arg1 = (uintptr_t)vpr_base;
352                 work_args.arg2 = vpr_size;
353
354                 /* workers don't change CPU. depending on the CPU, execute
355                  * directly or sched work */
356                 if (cpu == 0 && (current->flags & PF_WQ_WORKER))
357                         retval = tlk_generic_smc_on_cpu0(&work_args);
358                 else
359                         retval = work_on_cpu(0,
360                                         tlk_generic_smc_on_cpu0, &work_args);
361         } else {
362                 retval = tlk_generic_smc(TE_SMC_PROGRAM_VPR,
363                                         (uintptr_t)vpr_base, vpr_size);
364         }
365
366         mutex_unlock(&smc_lock);
367
368         if (retval != OTE_SUCCESS) {
369                 pr_err("%s: smc failed err (0x%x)\n", __func__, retval);
370                 return -EINVAL;
371         }
372         return 0;
373 }
374 EXPORT_SYMBOL(te_set_vpr_params);
375
376 /*
377  * Open session SMC (supporting client-based te_open_session() calls)
378  */
379 void te_open_session(struct te_opensession *cmd,
380                     struct te_request *request,
381                     struct tlk_context *context)
382 {
383         int ret;
384
385         request->type = TE_SMC_OPEN_SESSION;
386
387         ret = te_prep_mem_buffers(request, context);
388         if (ret != OTE_SUCCESS) {
389                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
390                         __func__, ret);
391                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
392                 return;
393         }
394
395         memcpy(&request->dest_uuid,
396                &cmd->dest_uuid,
397                sizeof(struct te_service_id));
398
399         pr_info("OPEN_CLIENT_SESSION: 0x%x 0x%x 0x%x 0x%x\n",
400                 request->dest_uuid[0],
401                 request->dest_uuid[1],
402                 request->dest_uuid[2],
403                 request->dest_uuid[3]);
404
405         do_smc(request, context->dev);
406
407         if (request->result) {
408                 /* release any persistent mem buffers if we failed */
409                 te_release_persist_mem_buffers(request->session_id, context);
410         } else {
411                 /* mark active any persistent mem buffers */
412                 te_update_persist_mem_buffers(request->session_id, context);
413         }
414
415         te_release_temp_mem_buffers(context);
416 }
417
418 /*
419  * Close session SMC (supporting client-based te_close_session() calls)
420  */
421 void te_close_session(struct te_closesession *cmd,
422                      struct te_request *request,
423                      struct tlk_context *context)
424 {
425         request->session_id = cmd->session_id;
426         request->type = TE_SMC_CLOSE_SESSION;
427
428         do_smc(request, context->dev);
429         if (request->result)
430                 pr_info("%s: error closing session: %08x\n",
431                         __func__, request->result);
432
433         /* release any peristent mem buffers */
434         te_release_persist_mem_buffers(request->session_id, context);
435 }
436
437 /*
438  * Launch operation SMC (supporting client-based te_launch_operation() calls)
439  */
440 void te_launch_operation(struct te_launchop *cmd,
441                         struct te_request *request,
442                         struct tlk_context *context)
443 {
444         int ret;
445
446         request->session_id = cmd->session_id;
447         request->command_id = cmd->operation.command;
448         request->type = TE_SMC_LAUNCH_OPERATION;
449
450         ret = te_prep_mem_buffers(request, context);
451         if (ret != OTE_SUCCESS) {
452                 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
453                         __func__, ret);
454                 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
455                 return;
456         }
457
458         do_smc(request, context->dev);
459
460         if (request->result) {
461                 /* release any persistent mem buffers if we failed */
462                 te_release_persist_mem_buffers(request->session_id, context);
463         } else {
464                 /* mark active any persistent mem buffers */
465                 te_update_persist_mem_buffers(request->session_id, context);
466         }
467
468         te_release_temp_mem_buffers(context);
469 }