2 * Copyright (c) 2012-2014 NVIDIA Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/atomic.h>
20 #include <linux/uaccess.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
24 #include <linux/printk.h>
25 #include <linux/ioctl.h>
26 #include <linux/sched.h>
28 #include <linux/pagemap.h>
29 #include <asm/smp_plat.h>
31 #include "ote_protocol.h"
34 core_param(verbose_smc, verbose_smc, bool, 0644);
36 #define SET_RESULT(req, r, ro) { req->result = r; req->result_origin = ro; }
38 static int te_pin_user_pages(void *buffer, size_t size,
39 unsigned long *pages_ptr, uint32_t buf_type)
42 unsigned int nr_pages;
43 struct page **pages = NULL;
46 nr_pages = (((uintptr_t)buffer & (PAGE_SIZE - 1)) +
47 (size + PAGE_SIZE - 1)) >> PAGE_SHIFT;
49 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
53 writable = (buf_type == TE_PARAM_TYPE_MEM_RW ||
54 buf_type == TE_PARAM_TYPE_PERSIST_MEM_RW);
56 down_read(¤t->mm->mmap_sem);
57 ret = get_user_pages(current, current->mm, (unsigned long)buffer,
61 up_read(¤t->mm->mmap_sem);
63 *pages_ptr = (unsigned long) pages;
68 static int te_prep_mem_buffer(uint32_t session_id,
69 void *buffer, size_t size, uint32_t buf_type,
70 struct te_session *session)
72 unsigned long pages = 0;
73 struct te_shmem_desc *shmem_desc = NULL;
74 int ret = 0, nr_pages = 0;
76 /* allocate new shmem descriptor */
77 shmem_desc = kzalloc(sizeof(struct te_shmem_desc), GFP_KERNEL);
79 pr_err("%s: te_add_shmem_desc failed\n", __func__);
80 ret = OTE_ERROR_OUT_OF_MEMORY;
85 nr_pages = te_pin_user_pages(buffer, size, &pages, buf_type);
87 pr_err("%s: te_pin_user_pages failed (%d)\n", __func__,
89 ret = OTE_ERROR_OUT_OF_MEMORY;
94 /* initialize shmem descriptor */
95 INIT_LIST_HEAD(&(shmem_desc->list));
96 shmem_desc->buffer = buffer;
97 shmem_desc->size = size;
98 shmem_desc->nr_pages = nr_pages;
99 shmem_desc->pages = (struct page **)(uintptr_t)pages;
101 /* add shmem descriptor to proper list */
102 if ((buf_type == TE_PARAM_TYPE_MEM_RO) ||
103 (buf_type == TE_PARAM_TYPE_MEM_RW))
104 list_add_tail(&shmem_desc->list, &session->temp_shmem_list);
106 list_add_tail(&shmem_desc->list,
107 &session->inactive_persist_shmem_list);
115 static int te_prep_mem_buffers(struct te_request *request,
116 struct te_session *session)
119 int ret = OTE_SUCCESS;
120 struct te_oper_param *params;
122 params = (struct te_oper_param *)(uintptr_t)request->params;
123 for (i = 0; i < request->params_size; i++) {
124 switch (params[i].type) {
125 case TE_PARAM_TYPE_NONE:
126 case TE_PARAM_TYPE_INT_RO:
127 case TE_PARAM_TYPE_INT_RW:
129 case TE_PARAM_TYPE_MEM_RO:
130 case TE_PARAM_TYPE_MEM_RW:
131 case TE_PARAM_TYPE_PERSIST_MEM_RO:
132 case TE_PARAM_TYPE_PERSIST_MEM_RW:
133 ret = te_prep_mem_buffer(request->session_id,
134 (void *)(uintptr_t)params[i].u.Mem.base,
139 pr_err("%s failed with err (%d)\n",
141 ret = OTE_ERROR_BAD_PARAMETERS;
146 pr_err("%s: OTE_ERROR_BAD_PARAMETERS\n", __func__);
147 ret = OTE_ERROR_BAD_PARAMETERS;
154 static void te_release_mem_buffer(struct te_shmem_desc *shmem_desc)
158 list_del(&shmem_desc->list);
159 for (i = 0; i < shmem_desc->nr_pages; i++) {
160 if ((shmem_desc->type == TE_PARAM_TYPE_MEM_RW) ||
161 (shmem_desc->type == TE_PARAM_TYPE_PERSIST_MEM_RW))
162 set_page_dirty_lock(shmem_desc->pages[i]);
163 page_cache_release(shmem_desc->pages[i]);
165 kfree(shmem_desc->pages);
169 static void te_release_mem_buffers(struct list_head *buflist)
171 struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
173 list_for_each_entry_safe(shmem_desc, tmp_shmem_desc, buflist, list) {
174 te_release_mem_buffer(shmem_desc);
178 static void te_activate_persist_mem_buffers(struct te_session *session)
180 struct te_shmem_desc *shmem_desc, *tmp_shmem_desc;
182 /* move persist mem buffers from inactive list to active list */
183 list_for_each_entry_safe(shmem_desc, tmp_shmem_desc,
184 &session->inactive_persist_shmem_list, list) {
186 list_move_tail(&shmem_desc->list, &session->persist_shmem_list);
190 static struct te_session *te_get_session(struct tlk_context *context,
193 struct te_session *session, *tmp_session;
195 list_for_each_entry_safe(session, tmp_session,
196 &context->session_list, list) {
197 if (session->session_id == session_id)
205 cpumask_t saved_cpu_mask;
206 static long switch_cpumask_to_cpu0(void)
209 cpumask_t local_cpu_mask = CPU_MASK_NONE;
211 cpu_set(0, local_cpu_mask);
212 cpumask_copy(&saved_cpu_mask, tsk_cpus_allowed(current));
213 ret = sched_setaffinity(0, &local_cpu_mask);
215 pr_err("%s: sched_setaffinity #1 -> 0x%lX", __func__, ret);
220 static void restore_cpumask(void)
222 long ret = sched_setaffinity(0, &saved_cpu_mask);
224 pr_err("%s: sched_setaffinity #2 -> 0x%lX", __func__, ret);
227 static inline long switch_cpumask_to_cpu0(void) { return 0; };
228 static inline void restore_cpumask(void) {};
231 struct tlk_smc_work_args {
237 static long tlk_generic_smc_on_cpu0(void *args)
239 struct tlk_smc_work_args *work;
242 work = (struct tlk_smc_work_args *)args;
243 retval = _tlk_generic_smc(work->arg0, work->arg1, work->arg2);
245 while (retval == TE_ERROR_PREEMPT_BY_IRQ ||
246 retval == TE_ERROR_PREEMPT_BY_FS) {
247 if (retval == TE_ERROR_PREEMPT_BY_FS)
249 retval = _tlk_generic_smc(TE_SMC_RESTART, 0, 0);
252 /* Print TLK logs if any */
258 uint32_t send_smc(uint32_t arg0, uintptr_t arg1, uintptr_t arg2)
261 struct tlk_smc_work_args work_args;
263 work_args.arg0 = arg0;
264 work_args.arg1 = arg1;
265 work_args.arg2 = arg2;
268 (PF_WQ_WORKER | PF_NO_SETAFFINITY | PF_KTHREAD)) {
269 int cpu = cpu_logical_map(get_cpu());
272 /* workers don't change CPU. depending on the CPU, execute
273 * directly or sched work */
274 if (cpu == 0 && (current->flags & PF_WQ_WORKER))
275 return tlk_generic_smc_on_cpu0(&work_args);
277 return work_on_cpu(0,
278 tlk_generic_smc_on_cpu0, &work_args);
282 ret = switch_cpumask_to_cpu0();
284 /* not able to switch, schedule work on CPU0 */
285 ret = work_on_cpu(0, tlk_generic_smc_on_cpu0, &work_args);
287 /* switched to CPU0 */
288 ret = tlk_generic_smc_on_cpu0(&work_args);
298 static void do_smc(struct te_request *request, struct tlk_device *dev)
301 uint32_t smc_params = 0;
303 smc_args = (char *)request - dev->req_param_buf;
304 if (request->params) {
306 (char *)(uintptr_t)request->params - dev->req_param_buf;
309 (void)send_smc(request->type, smc_args, smc_params);
313 * VPR programming SMC
315 * This routine is called both from normal threads and worker threads.
316 * The worker threads are per-cpu and have PF_NO_SETAFFINITY set, so
317 * any calls to sched_setaffinity will fail.
319 * If it's a worker thread on CPU0, just invoke the SMC directly. If
320 * it's running on a non-CPU0, use work_on_cpu() to schedule the SMC
323 int te_set_vpr_params(void *vpr_base, size_t vpr_size)
327 /* Share the same lock used when request is send from user side */
328 mutex_lock(&smc_lock);
330 retval = send_smc(TE_SMC_PROGRAM_VPR, (uintptr_t)vpr_base, vpr_size);
332 mutex_unlock(&smc_lock);
334 if (retval != OTE_SUCCESS) {
335 pr_err("%s: smc failed err (0x%x)\n", __func__, retval);
340 EXPORT_SYMBOL(te_set_vpr_params);
343 * Open session SMC (supporting client-based te_open_session() calls)
345 void te_open_session(struct te_opensession *cmd,
346 struct te_request *request,
347 struct tlk_context *context)
349 struct te_session *session;
352 session = kzalloc(sizeof(struct te_session), GFP_KERNEL);
354 SET_RESULT(request, OTE_ERROR_OUT_OF_MEMORY,
355 OTE_RESULT_ORIGIN_API);
359 INIT_LIST_HEAD(&session->list);
360 INIT_LIST_HEAD(&session->temp_shmem_list);
361 INIT_LIST_HEAD(&session->inactive_persist_shmem_list);
362 INIT_LIST_HEAD(&session->persist_shmem_list);
364 request->type = TE_SMC_OPEN_SESSION;
366 ret = te_prep_mem_buffers(request, session);
367 if (ret != OTE_SUCCESS) {
368 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
370 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
375 memcpy(&request->dest_uuid,
377 sizeof(struct te_service_id));
379 pr_info("OPEN_CLIENT_SESSION: 0x%x 0x%x 0x%x 0x%x\n",
380 request->dest_uuid[0],
381 request->dest_uuid[1],
382 request->dest_uuid[2],
383 request->dest_uuid[3]);
385 do_smc(request, context->dev);
387 if (request->result) {
388 /* release any persistent mem buffers if we failed */
389 te_release_mem_buffers(&session->inactive_persist_shmem_list);
391 /* otherwise mark active any persistent mem buffers */
392 te_activate_persist_mem_buffers(session);
394 /* save off session_id and add to list */
395 session->session_id = request->session_id;
396 list_add_tail(&session->list, &context->session_list);
399 te_release_mem_buffers(&session->temp_shmem_list);
403 * Close session SMC (supporting client-based te_close_session() calls)
405 void te_close_session(struct te_closesession *cmd,
406 struct te_request *request,
407 struct tlk_context *context)
409 struct te_session *session;
411 request->session_id = cmd->session_id;
412 request->type = TE_SMC_CLOSE_SESSION;
414 do_smc(request, context->dev);
416 pr_info("%s: error closing session: 0x%08x\n",
417 __func__, request->result);
419 session = te_get_session(context, cmd->session_id);
421 pr_info("%s: session_id not found: 0x%x\n",
422 __func__, cmd->session_id);
426 /* free session state */
427 te_release_mem_buffers(&session->persist_shmem_list);
428 list_del(&session->list);
433 * Launch operation SMC (supporting client-based te_launch_operation() calls)
435 void te_launch_operation(struct te_launchop *cmd,
436 struct te_request *request,
437 struct tlk_context *context)
439 struct te_session *session;
442 session = te_get_session(context, cmd->session_id);
444 pr_info("%s: session_id not found: 0x%x\n",
445 __func__, cmd->session_id);
449 request->session_id = cmd->session_id;
450 request->command_id = cmd->operation.command;
451 request->type = TE_SMC_LAUNCH_OPERATION;
453 ret = te_prep_mem_buffers(request, session);
454 if (ret != OTE_SUCCESS) {
455 pr_err("%s: te_prep_mem_buffers failed err (0x%x)\n",
457 SET_RESULT(request, ret, OTE_RESULT_ORIGIN_API);
461 do_smc(request, context->dev);
463 if (request->result) {
464 /* release any inactive persistent mem buffers if we failed */
465 te_release_mem_buffers(&session->inactive_persist_shmem_list);
467 /* otherwise mark active any persistent mem buffers */
468 te_activate_persist_mem_buffers(session);
471 te_release_mem_buffers(&session->temp_shmem_list);