security: tf_driver: integrate latest TL release
[linux-3.10.git] / security / tf_driver / tf_conn.c
1 /**
2  * Copyright (c) 2011 Trusted Logic S.A.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17  * MA 02111-1307 USA
18  */
19
20 #include <linux/atomic.h>
21 #include <linux/uaccess.h>
22 #include <linux/delay.h>
23 #include <linux/errno.h>
24 #include <linux/list.h>
25 #include <linux/mm.h>
26 #include <linux/pagemap.h>
27 #include <linux/stddef.h>
28 #include <linux/types.h>
29
30 #include "s_version.h"
31
32 #include "tf_protocol.h"
33 #include "tf_defs.h"
34 #include "tf_util.h"
35 #include "tf_comm.h"
36 #include "tf_conn.h"
37
38 #ifdef CONFIG_TF_ZEBRA
39 #include "tf_zebra.h"
40 #include "tf_crypto.h"
41 #endif
42
43 #ifdef CONFIG_ANDROID
44 #define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
45 #else
46 #define TF_PRIVILEGED_UID_GID 0
47 #endif
48
49 /*----------------------------------------------------------------------------
50  * Management of the shared memory blocks.
51  *
52  * Shared memory blocks are the blocks registered through
53  * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
54  *----------------------------------------------------------------------------*/
55
56 /**
57  * Unmaps a shared memory
58  **/
59 void tf_unmap_shmem(
60                 struct tf_connection *connection,
61                 struct tf_shmem_desc *shmem_desc,
62                 u32 full_cleanup)
63 {
64         /* check shmem_desc contains a descriptor */
65         if (shmem_desc == NULL)
66                 return;
67
68         dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
69
70 retry:
71         mutex_lock(&(connection->shmem_mutex));
72         if (atomic_read(&shmem_desc->ref_count) > 1) {
73                 /*
74                  * Shared mem still in use, wait for other operations completion
75                  * before actually unmapping it.
76                  */
77                 dprintk(KERN_INFO "Descriptor in use\n");
78                 mutex_unlock(&(connection->shmem_mutex));
79                 schedule();
80                 goto retry;
81         }
82
83         tf_cleanup_shared_memory(
84                         &(connection->cpt_alloc_context),
85                         shmem_desc,
86                         full_cleanup);
87
88         list_del(&(shmem_desc->list));
89
90         if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
91                         (full_cleanup != 0)) {
92                 internal_kfree(shmem_desc);
93
94                 atomic_dec(&(connection->shmem_count));
95         } else {
96                 /*
97                  * This is a preallocated shared memory, add to free list
98                  * Since the device context is unmapped last, it is
99                  * always the first element of the free list if no
100                  * device context has been created
101                  */
102                 shmem_desc->block_identifier = 0;
103                 list_add(&(shmem_desc->list), &(connection->free_shmem_list));
104         }
105
106         mutex_unlock(&(connection->shmem_mutex));
107 }
108
109
110 /**
111  * Find the first available slot for a new block of shared memory
112  * and map the user buffer.
113  * Update the descriptors to L1 descriptors
114  * Update the buffer_start_offset and buffer_size fields
115  * shmem_desc is updated to the mapped shared memory descriptor
116  **/
117 int tf_map_shmem(
118                 struct tf_connection *connection,
119                 u32 buffer,
120                 /* flags for read-write access rights on the memory */
121                 u32 flags,
122                 bool in_user_space,
123                 u32 descriptors[TF_MAX_COARSE_PAGES],
124                 u32 *buffer_start_offset,
125                 u32 buffer_size,
126                 struct tf_shmem_desc **shmem_desc,
127                 u32 *descriptor_count)
128 {
129         struct tf_shmem_desc *desc = NULL;
130         int error;
131
132         dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
133                                         connection,
134                                         (void *) buffer,
135                                         flags);
136
137         /*
138          * Added temporary to avoid kernel stack buffer
139          */
140         if (!in_user_space) {
141                 if (object_is_on_stack((void *)buffer) != 0) {
142                         dprintk(KERN_ERR
143                                 "tf_map_shmem: "
144                                 "kernel stack buffers "
145                                 "(addr=0x%08X) "
146                                 "are not supported",
147                                 buffer);
148                         error = -ENOSYS;
149                         goto error;
150                 }
151         }
152
153         mutex_lock(&(connection->shmem_mutex));
154
155         /*
156          * Check the list of free shared memory
157          * is not empty
158          */
159         if (list_empty(&(connection->free_shmem_list))) {
160                 if (atomic_read(&(connection->shmem_count)) ==
161                                 TF_SHMEM_MAX_COUNT) {
162                         printk(KERN_ERR "tf_map_shmem(%p):"
163                                 " maximum shared memories already registered\n",
164                                 connection);
165                         error = -ENOMEM;
166                         goto error;
167                 }
168
169                 /* no descriptor available, allocate a new one */
170
171                 desc = (struct tf_shmem_desc *) internal_kmalloc(
172                         sizeof(*desc), GFP_KERNEL);
173                 if (desc == NULL) {
174                         printk(KERN_ERR "tf_map_shmem(%p):"
175                                 " failed to allocate descriptor\n",
176                                 connection);
177                         error = -ENOMEM;
178                         goto error;
179                 }
180
181                 /* Initialize the structure */
182                 desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
183                 atomic_set(&desc->ref_count, 1);
184                 INIT_LIST_HEAD(&(desc->list));
185
186                 atomic_inc(&(connection->shmem_count));
187         } else {
188                 /* take the first free shared memory descriptor */
189                 desc = list_first_entry(&(connection->free_shmem_list),
190                         struct tf_shmem_desc, list);
191                 list_del(&(desc->list));
192         }
193
194         /* Add the descriptor to the used list */
195         list_add(&(desc->list), &(connection->used_shmem_list));
196
197         error = tf_fill_descriptor_table(
198                         &(connection->cpt_alloc_context),
199                         desc,
200                         buffer,
201                         connection->vmas,
202                         descriptors,
203                         buffer_size,
204                         buffer_start_offset,
205                         in_user_space,
206                         flags,
207                         descriptor_count);
208
209         if (error != 0) {
210                 dprintk(KERN_ERR "tf_map_shmem(%p):"
211                         " tf_fill_descriptor_table failed with error "
212                         "code %d!\n",
213                         connection,
214                         error);
215                 goto error;
216         }
217         desc->client_buffer = (u8 *) buffer;
218
219         /*
220          * Successful completion.
221          */
222         *shmem_desc = desc;
223         mutex_unlock(&(connection->shmem_mutex));
224         dprintk(KERN_DEBUG "tf_map_shmem: success\n");
225         return 0;
226
227
228         /*
229          * Error handling.
230          */
231 error:
232         mutex_unlock(&(connection->shmem_mutex));
233         dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
234                 error);
235
236         tf_unmap_shmem(
237                         connection,
238                         desc,
239                         0);
240
241         return error;
242 }
243
244
245
246 /* This function is a copy of the find_vma() function
247 in linux kernel 2.6.15 version with some fixes :
248         - memory block may end on vm_end
249         - check the full memory block is in the memory area
250         - guarantee NULL is returned if no memory area is found */
251 struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
252         unsigned long addr, unsigned long size)
253 {
254         struct vm_area_struct *vma = NULL;
255
256         dprintk(KERN_INFO
257                 "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
258
259         if (mm) {
260                 /* Check the cache first. */
261                 /* (Cache hit rate is typically around 35%.) */
262                 vma = mm->mmap_cache;
263                 if (!(vma && vma->vm_end >= (addr+size) &&
264                                 vma->vm_start <= addr)) {
265                         struct rb_node *rb_node;
266
267                         rb_node = mm->mm_rb.rb_node;
268                         vma = NULL;
269
270                         while (rb_node) {
271                                 struct vm_area_struct *vma_tmp;
272
273                                 vma_tmp = rb_entry(rb_node,
274                                         struct vm_area_struct, vm_rb);
275
276                                 dprintk(KERN_INFO
277                                         "vma_tmp->vm_start=0x%lX"
278                                         "vma_tmp->vm_end=0x%lX\n",
279                                         vma_tmp->vm_start,
280                                         vma_tmp->vm_end);
281
282                                 if (vma_tmp->vm_end >= (addr+size)) {
283                                         vma = vma_tmp;
284                                         if (vma_tmp->vm_start <= addr)
285                                                 break;
286
287                                         rb_node = rb_node->rb_left;
288                                 } else {
289                                         rb_node = rb_node->rb_right;
290                                 }
291                         }
292
293                         if (vma)
294                                 mm->mmap_cache = vma;
295                         if (rb_node == NULL)
296                                 vma = NULL;
297                 }
298         }
299         return vma;
300 }
301
302 int tf_validate_shmem_and_flags(
303         u32 shmem,
304         u32 shmem_size,
305         u32 flags)
306 {
307         struct vm_area_struct *vma;
308         u32 chunk;
309
310         if (shmem_size == 0)
311                 /* This is always valid */
312                 return 0;
313
314         if ((shmem + shmem_size) < shmem)
315                 /* Overflow */
316                 return -EINVAL;
317
318         down_read(&current->mm->mmap_sem);
319
320         /*
321          *  When looking for a memory address, split buffer into chunks of
322          *  size=PAGE_SIZE.
323          */
324         chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
325         if (chunk > shmem_size)
326                 chunk = shmem_size;
327
328         do {
329                 vma = tf_find_vma(current->mm, shmem, chunk);
330
331                 if (vma == NULL) {
332                         dprintk(KERN_ERR "%s: area not found\n", __func__);
333                         goto error;
334                 }
335
336                 if (flags & TF_SHMEM_TYPE_READ)
337                         if (!(vma->vm_flags & VM_READ)) {
338                                 dprintk(KERN_ERR "%s: no read permission\n",
339                                         __func__);
340                                 goto error;
341                         }
342                 if (flags & TF_SHMEM_TYPE_WRITE)
343                         if (!(vma->vm_flags & VM_WRITE)) {
344                                 dprintk(KERN_ERR "%s: no write permission\n",
345                                         __func__);
346                                 goto error;
347                         }
348
349                 shmem_size -= chunk;
350                 shmem += chunk;
351                 chunk = (shmem_size <= PAGE_SIZE ?
352                                 shmem_size : PAGE_SIZE);
353         } while (shmem_size != 0);
354
355         up_read(&current->mm->mmap_sem);
356         return 0;
357
358 error:
359         up_read(&current->mm->mmap_sem);
360         return -EFAULT;
361 }
362
363
364 static int tf_map_temp_shmem(struct tf_connection *connection,
365          struct tf_command_param_temp_memref *temp_memref,
366          u32 param_type,
367          struct tf_shmem_desc **shmem_desc)
368 {
369         u32 flags;
370         u32 error = S_SUCCESS;
371         bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
372
373         dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
374                 "0x%08x[size=0x%08x], offset=0x%08x)\n",
375                 connection,
376                 temp_memref->descriptor,
377                 temp_memref->size,
378                 temp_memref->offset);
379
380         switch (param_type) {
381         case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
382                 flags = TF_SHMEM_TYPE_READ;
383                 break;
384         case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
385                 flags = TF_SHMEM_TYPE_WRITE;
386                 break;
387         case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
388                 flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
389                 break;
390         default:
391                 error = -EINVAL;
392                 goto error;
393         }
394
395         if (temp_memref->descriptor == 0) {
396                 /* NULL tmpref */
397                 temp_memref->offset = 0;
398                 *shmem_desc = NULL;
399         } else if ((temp_memref->descriptor != 0) &&
400                         (temp_memref->size == 0)) {
401                 /* Empty tmpref */
402                 temp_memref->offset = temp_memref->descriptor;
403                 temp_memref->descriptor = 0;
404                 temp_memref->size = 0;
405                 *shmem_desc = NULL;
406         } else {
407                 /* Map the temp shmem block */
408
409                 u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
410                 u32 descriptor_count;
411
412                 if (in_user_space) {
413                         error = tf_validate_shmem_and_flags(
414                                 temp_memref->descriptor,
415                                 temp_memref->size,
416                                 flags);
417                         if (error != 0)
418                                 goto error;
419                 }
420
421                 error = tf_map_shmem(
422                                 connection,
423                                 temp_memref->descriptor,
424                                 flags,
425                                 in_user_space,
426                                 shared_mem_descriptors,
427                                 &(temp_memref->offset),
428                                 temp_memref->size,
429                                 shmem_desc,
430                                 &descriptor_count);
431                 temp_memref->descriptor = shared_mem_descriptors[0];
432          }
433
434 error:
435          return error;
436 }
437
438 /*
439  * Clean up a list of shared memory descriptors.
440  */
441 static void tf_shared_memory_cleanup_list(
442                 struct tf_connection *connection,
443                 struct list_head *shmem_desc_list)
444 {
445         while (!list_empty(shmem_desc_list)) {
446                 struct tf_shmem_desc *shmem_desc;
447
448                 shmem_desc = list_first_entry(shmem_desc_list,
449                         struct tf_shmem_desc, list);
450
451                 tf_unmap_shmem(connection, shmem_desc, 1);
452         }
453 }
454
455
456 /*
457  * Clean up the shared memory information in the connection.
458  * Releases all allocated pages.
459  */
460 static void tf_cleanup_shared_memories(struct tf_connection *connection)
461 {
462         /* clean up the list of used and free descriptors.
463          * done outside the mutex, because tf_unmap_shmem already
464          * mutex()ed
465          */
466         tf_shared_memory_cleanup_list(connection,
467                 &connection->used_shmem_list);
468         tf_shared_memory_cleanup_list(connection,
469                 &connection->free_shmem_list);
470
471         mutex_lock(&(connection->shmem_mutex));
472
473         /* Free the Vmas page */
474         if (connection->vmas) {
475                 internal_free_page((unsigned long) connection->vmas);
476                 connection->vmas = NULL;
477         }
478
479         tf_release_coarse_page_table_allocator(
480                 &(connection->cpt_alloc_context));
481
482         mutex_unlock(&(connection->shmem_mutex));
483 }
484
485
486 /*
487  * Initialize the shared memory in a connection.
488  * Allocates the minimum memory to be provided
489  * for shared memory management
490  */
491 int tf_init_shared_memory(struct tf_connection *connection)
492 {
493         int error;
494         int i;
495         int coarse_page_index;
496
497         /*
498          * We only need to initialize special elements and attempt to allocate
499          * the minimum shared memory descriptors we want to support
500          */
501
502         mutex_init(&(connection->shmem_mutex));
503         INIT_LIST_HEAD(&(connection->free_shmem_list));
504         INIT_LIST_HEAD(&(connection->used_shmem_list));
505         atomic_set(&(connection->shmem_count), 0);
506
507         tf_init_coarse_page_table_allocator(
508                 &(connection->cpt_alloc_context));
509
510
511         /*
512          * Preallocate 3 pages to increase the chances that a connection
513          * succeeds in allocating shared mem
514          */
515         for (i = 0;
516              i < 3;
517              i++) {
518                 struct tf_shmem_desc *shmem_desc =
519                         (struct tf_shmem_desc *) internal_kmalloc(
520                                 sizeof(*shmem_desc), GFP_KERNEL);
521
522                 if (shmem_desc == NULL) {
523                         printk(KERN_ERR "tf_init_shared_memory(%p):"
524                                 " failed to pre allocate descriptor %d\n",
525                                 connection,
526                                 i);
527                         error = -ENOMEM;
528                         goto error;
529                 }
530
531                 for (coarse_page_index = 0;
532                      coarse_page_index < TF_MAX_COARSE_PAGES;
533                      coarse_page_index++) {
534                         struct tf_coarse_page_table *coarse_pg_table;
535
536                         coarse_pg_table = tf_alloc_coarse_page_table(
537                                 &(connection->cpt_alloc_context),
538                                 TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
539
540                         if (coarse_pg_table == NULL) {
541                                 printk(KERN_ERR "tf_init_shared_memory(%p)"
542                                         ": descriptor %d coarse page %d - "
543                                         "tf_alloc_coarse_page_table() "
544                                         "failed\n",
545                                         connection,
546                                         i,
547                                         coarse_page_index);
548                                 error = -ENOMEM;
549                                 goto error;
550                         }
551
552                         shmem_desc->coarse_pg_table[coarse_page_index] =
553                                 coarse_pg_table;
554                 }
555                 shmem_desc->coarse_pg_table_count = 0;
556
557                 shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
558                 atomic_set(&shmem_desc->ref_count, 1);
559
560                 /*
561                  * add this preallocated descriptor to the list of free
562                  * descriptors Keep the device context specific one at the
563                  * beginning of the list
564                  */
565                 INIT_LIST_HEAD(&(shmem_desc->list));
566                 list_add_tail(&(shmem_desc->list),
567                         &(connection->free_shmem_list));
568         }
569
570         /* allocate memory for the vmas structure */
571         connection->vmas =
572                 (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
573         if (connection->vmas == NULL) {
574                 printk(KERN_ERR "tf_init_shared_memory(%p):"
575                         " vmas - failed to get_zeroed_page\n",
576                         connection);
577                 error = -ENOMEM;
578                 goto error;
579         }
580
581         return 0;
582
583 error:
584         tf_cleanup_shared_memories(connection);
585         return error;
586 }
587
588 /*----------------------------------------------------------------------------
589  * Connection operations to the Secure World
590  *----------------------------------------------------------------------------*/
591
592 int tf_create_device_context(
593         struct tf_connection *connection)
594 {
595         union tf_command command;
596         union tf_answer  answer;
597         int error = 0;
598
599         dprintk(KERN_INFO "tf_create_device_context(%p)\n",
600                         connection);
601
602         command.create_device_context.message_type =
603                 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
604         command.create_device_context.message_size =
605                 (sizeof(struct tf_command_create_device_context)
606                         - sizeof(struct tf_command_header))/sizeof(u32);
607         command.create_device_context.operation_id = (u32) &answer;
608         command.create_device_context.device_context_id = (u32) connection;
609
610         error = tf_send_receive(
611                 &connection->dev->sm,
612                 &command,
613                 &answer,
614                 connection,
615                 true);
616
617         if ((error != 0) ||
618                 (answer.create_device_context.error_code != S_SUCCESS))
619                 goto error;
620
621         /*
622          * CREATE_DEVICE_CONTEXT succeeded,
623          * store device context handler and update connection status
624          */
625         connection->device_context =
626                 answer.create_device_context.device_context;
627         spin_lock(&(connection->state_lock));
628         connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
629         spin_unlock(&(connection->state_lock));
630
631         /* successful completion */
632         dprintk(KERN_INFO "tf_create_device_context(%p):"
633                 " device_context=0x%08x\n",
634                 connection,
635                 answer.create_device_context.device_context);
636         return 0;
637
638 error:
639         if (error != 0) {
640                 dprintk(KERN_ERR "tf_create_device_context failed with "
641                         "error %d\n", error);
642         } else {
643                 /*
644                  * We sent a DeviceCreateContext. The state is now
645                  * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
646                  * reset if we ever want to send a DeviceCreateContext again
647                  */
648                 spin_lock(&(connection->state_lock));
649                 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
650                 spin_unlock(&(connection->state_lock));
651                 dprintk(KERN_ERR "tf_create_device_context failed with "
652                         "error_code 0x%08X\n",
653                         answer.create_device_context.error_code);
654                 if (answer.create_device_context.error_code ==
655                         S_ERROR_OUT_OF_MEMORY)
656                         error = -ENOMEM;
657                 else
658                         error = -EFAULT;
659         }
660
661         return error;
662 }
663
664 /* Check that the current application belongs to the
665  * requested GID */
666 static bool tf_check_gid(gid_t requested_gid)
667 {
668         if (requested_gid == current_egid()) {
669                 return true;
670         } else {
671                 u32    size;
672                 u32    i;
673                 /* Look in the supplementary GIDs */
674                 get_group_info(GROUP_INFO);
675                 size = GROUP_INFO->ngroups;
676                 for (i = 0; i < size; i++)
677                         if (requested_gid == GROUP_AT(GROUP_INFO , i))
678                                 return true;
679         }
680         return false;
681 }
682
683 /*
684  * Opens a client session to the Secure World
685  */
686 int tf_open_client_session(
687         struct tf_connection *connection,
688         union tf_command *command,
689         union tf_answer *answer)
690 {
691         int error = 0;
692         struct tf_shmem_desc *shmem_desc[4] = {NULL};
693         u32 i;
694
695         dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
696
697         /*
698          * Initialize the message size with no login data. This will be later
699          * adjusted the the cases below
700          */
701         command->open_client_session.message_size =
702                 (sizeof(struct tf_command_open_client_session) - 20
703                         - sizeof(struct tf_command_header))/4;
704
705         switch (command->open_client_session.login_type) {
706         case TF_LOGIN_PUBLIC:
707                  /* Nothing to do */
708                  break;
709
710         case TF_LOGIN_USER:
711                 /*
712                  * Send the EUID of the calling application in the login data.
713                  * Update message size.
714                  */
715                 *(u32 *) &command->open_client_session.login_data =
716                         current_euid();
717 #ifndef CONFIG_ANDROID
718                 command->open_client_session.login_type =
719                         (u32) TF_LOGIN_USER_LINUX_EUID;
720 #else
721                 command->open_client_session.login_type =
722                         (u32) TF_LOGIN_USER_ANDROID_EUID;
723 #endif
724
725                 /* Added one word */
726                 command->open_client_session.message_size += 1;
727                 break;
728
729         case TF_LOGIN_GROUP: {
730                 /* Check requested GID */
731                 gid_t  requested_gid =
732                         *(u32 *) command->open_client_session.login_data;
733
734                 if (!tf_check_gid(requested_gid)) {
735                         dprintk(KERN_ERR "tf_open_client_session(%p) "
736                                 "TF_LOGIN_GROUP: requested GID (0x%x) does "
737                                 "not match real eGID (0x%x)"
738                                 "or any of the supplementary GIDs\n",
739                                 connection, requested_gid, current_egid());
740                         error = -EACCES;
741                         goto error;
742                 }
743 #ifndef CONFIG_ANDROID
744                 command->open_client_session.login_type =
745                         TF_LOGIN_GROUP_LINUX_GID;
746 #else
747                 command->open_client_session.login_type =
748                         TF_LOGIN_GROUP_ANDROID_GID;
749 #endif
750
751                 command->open_client_session.message_size += 1; /* GID */
752                 break;
753         }
754
755 #ifndef CONFIG_ANDROID
756         case TF_LOGIN_APPLICATION: {
757                 /*
758                  * Compute SHA-1 hash of the application fully-qualified path
759                  * name.  Truncate the hash to 16 bytes and send it as login
760                  * data.  Update message size.
761                  */
762                 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
763
764                 error = tf_hash_application_path_and_data(pSHA1Hash,
765                         NULL, 0);
766                 if (error != 0) {
767                         dprintk(KERN_ERR "tf_open_client_session: "
768                                 "error in tf_hash_application_path_and_data\n");
769                         goto error;
770                 }
771                 memcpy(&command->open_client_session.login_data,
772                         pSHA1Hash, 16);
773                 command->open_client_session.login_type =
774                         TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
775                 /* 16 bytes */
776                 command->open_client_session.message_size += 4;
777                 break;
778         }
779 #else
780         case TF_LOGIN_APPLICATION:
781                 /*
782                  * Send the real UID of the calling application in the login
783                  * data. Update message size.
784                  */
785                 *(u32 *) &command->open_client_session.login_data =
786                         current_uid();
787
788                 command->open_client_session.login_type =
789                         (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
790
791                 /* Added one word */
792                 command->open_client_session.message_size += 1;
793                 break;
794 #endif
795
796 #ifndef CONFIG_ANDROID
797         case TF_LOGIN_APPLICATION_USER: {
798                 /*
799                  * Compute SHA-1 hash of the concatenation of the application
800                  * fully-qualified path name and the EUID of the calling
801                  * application.  Truncate the hash to 16 bytes and send it as
802                  * login data.  Update message size.
803                  */
804                 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
805
806                 error = tf_hash_application_path_and_data(pSHA1Hash,
807                         (u8 *) &(current_euid()), sizeof(current_euid()));
808                 if (error != 0) {
809                         dprintk(KERN_ERR "tf_open_client_session: "
810                                 "error in tf_hash_application_path_and_data\n");
811                         goto error;
812                 }
813                 memcpy(&command->open_client_session.login_data,
814                         pSHA1Hash, 16);
815                 command->open_client_session.login_type =
816                         TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
817
818                 /* 16 bytes */
819                 command->open_client_session.message_size += 4;
820
821                 break;
822         }
823 #else
824         case TF_LOGIN_APPLICATION_USER:
825                 /*
826                  * Send the real UID and the EUID of the calling application in
827                  * the login data. Update message size.
828                  */
829                 *(u32 *) &command->open_client_session.login_data =
830                         current_uid();
831                 *(u32 *) &command->open_client_session.login_data[4] =
832                         current_euid();
833
834                 command->open_client_session.login_type =
835                         TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
836
837                 /* Added two words */
838                 command->open_client_session.message_size += 2;
839                 break;
840 #endif
841
842 #ifndef CONFIG_ANDROID
843         case TF_LOGIN_APPLICATION_GROUP: {
844                 /*
845                  * Check requested GID.  Compute SHA-1 hash of the concatenation
846                  * of the application fully-qualified path name and the
847                  * requested GID.  Update message size
848                  */
849                 gid_t  requested_gid;
850                 u8     pSHA1Hash[SHA1_DIGEST_SIZE];
851
852                 requested_gid = *(u32 *) &command->open_client_session.
853                         login_data;
854
855                 if (!tf_check_gid(requested_gid)) {
856                         dprintk(KERN_ERR "tf_open_client_session(%p) "
857                         "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
858                         "does not match real eGID (0x%x)"
859                         "or any of the supplementary GIDs\n",
860                         connection, requested_gid, current_egid());
861                         error = -EACCES;
862                         goto error;
863                 }
864
865                 error = tf_hash_application_path_and_data(pSHA1Hash,
866                         &requested_gid, sizeof(u32));
867                 if (error != 0) {
868                         dprintk(KERN_ERR "tf_open_client_session: "
869                                 "error in tf_hash_application_path_and_data\n");
870                         goto error;
871                 }
872
873                 memcpy(&command->open_client_session.login_data,
874                         pSHA1Hash, 16);
875                 command->open_client_session.login_type =
876                         TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
877
878                 /* 16 bytes */
879                 command->open_client_session.message_size += 4;
880                 break;
881         }
882 #else
883         case TF_LOGIN_APPLICATION_GROUP: {
884                 /*
885                  * Check requested GID. Send the real UID and the requested GID
886                  * in the login data. Update message size.
887                  */
888                 gid_t requested_gid;
889
890                 requested_gid = *(u32 *) &command->open_client_session.
891                         login_data;
892
893                 if (!tf_check_gid(requested_gid)) {
894                         dprintk(KERN_ERR "tf_open_client_session(%p) "
895                         "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
896                         "does not match real eGID (0x%x)"
897                         "or any of the supplementary GIDs\n",
898                         connection, requested_gid, current_egid());
899                         error = -EACCES;
900                         goto error;
901                 }
902
903                 *(u32 *) &command->open_client_session.login_data =
904                         current_uid();
905                 *(u32 *) &command->open_client_session.login_data[4] =
906                         requested_gid;
907
908                 command->open_client_session.login_type =
909                         TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
910
911                 /* Added two words */
912                 command->open_client_session.message_size += 2;
913
914                 break;
915         }
916 #endif
917
918         case TF_LOGIN_PRIVILEGED:
919                 /* A privileged login may be performed only on behalf of the
920                    kernel itself or on behalf of a process with euid=0 or
921                    egid=0 or euid=system or egid=system. */
922                 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
923                         dprintk(KERN_DEBUG "tf_open_client_session: "
924                                 "TF_LOGIN_PRIVILEGED for kernel API\n");
925                 } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
926                            (current_egid() != TF_PRIVILEGED_UID_GID) &&
927                            (current_euid() != 0) && (current_egid() != 0)) {
928                         dprintk(KERN_ERR "tf_open_client_session: "
929                                 " user %d, group %d not allowed to open "
930                                 "session with TF_LOGIN_PRIVILEGED\n",
931                                 current_euid(), current_egid());
932                         error = -EACCES;
933                         goto error;
934                 } else {
935                         dprintk(KERN_DEBUG "tf_open_client_session: "
936                                 "TF_LOGIN_PRIVILEGED for %u:%u\n",
937                                 current_euid(), current_egid());
938                 }
939                 command->open_client_session.login_type =
940                         TF_LOGIN_PRIVILEGED;
941                 break;
942
943         case TF_LOGIN_AUTHENTICATION: {
944                 /*
945                  * Compute SHA-1 hash of the application binary
946                  * Send this hash as the login data (20 bytes)
947                  */
948
949                 u8 *hash;
950                 hash = &(command->open_client_session.login_data[0]);
951
952                 error = tf_get_current_process_hash(hash);
953                 if (error != 0) {
954                         dprintk(KERN_ERR "tf_open_client_session: "
955                                 "error in tf_get_current_process_hash\n");
956                         goto error;
957                 }
958                 command->open_client_session.login_type =
959                         TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
960
961                 /* 20 bytes */
962                 command->open_client_session.message_size += 5;
963                 break;
964         }
965
966         case TF_LOGIN_PRIVILEGED_KERNEL:
967                 /* A kernel login may be performed only on behalf of the
968                    kernel itself. */
969                 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
970                         dprintk(KERN_DEBUG "tf_open_client_session: "
971                                 "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
972                         command->open_client_session.login_type =
973                                 TF_LOGIN_PRIVILEGED_KERNEL;
974                 } else {
975                         dprintk(KERN_ERR "tf_open_client_session: "
976                                 " user %d, group %d not allowed to open "
977                                 "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
978                                 current_euid(), current_egid());
979                         error = -EACCES;
980                         goto error;
981                 }
982                 command->open_client_session.login_type =
983                         TF_LOGIN_PRIVILEGED_KERNEL;
984                 break;
985
986         default:
987                  dprintk(KERN_ERR "tf_open_client_session: "
988                         "unknown login_type(%08X)\n",
989                         command->open_client_session.login_type);
990                  error = -EOPNOTSUPP;
991                  goto error;
992         }
993
994         /* Map the temporary memory references */
995         for (i = 0; i < 4; i++) {
996                 int param_type;
997                 param_type = TF_GET_PARAM_TYPE(
998                         command->open_client_session.param_types, i);
999                 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
1000                                    TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
1001                                 == TF_PARAM_TYPE_MEMREF_FLAG) {
1002                         /* Map temp mem ref */
1003                         error = tf_map_temp_shmem(connection,
1004                                 &command->open_client_session.
1005                                         params[i].temp_memref,
1006                                 param_type,
1007                                 &shmem_desc[i]);
1008                         if (error != 0) {
1009                                 dprintk(KERN_ERR "tf_open_client_session: "
1010                                         "unable to map temporary memory block "
1011                                         "(%08X)\n", error);
1012                                 goto error;
1013                         }
1014                 }
1015         }
1016
1017         /* Fill the handle of the Device Context */
1018         command->open_client_session.device_context =
1019                 connection->device_context;
1020
1021         error = tf_send_receive(
1022                 &connection->dev->sm,
1023                 command,
1024                 answer,
1025                 connection,
1026                 true);
1027
1028 error:
1029         /* Unmap the temporary memory references */
1030         for (i = 0; i < 4; i++)
1031                 if (shmem_desc[i] != NULL)
1032                         tf_unmap_shmem(connection, shmem_desc[i], 0);
1033
1034         if (error != 0)
1035                 dprintk(KERN_ERR "tf_open_client_session returns %d\n",
1036                         error);
1037         else
1038                 dprintk(KERN_ERR "tf_open_client_session returns "
1039                         "error_code 0x%08X\n",
1040                         answer->open_client_session.error_code);
1041
1042         return error;
1043 }
1044
1045
1046 /*
1047  * Closes a client session from the Secure World
1048  */
1049 int tf_close_client_session(
1050         struct tf_connection *connection,
1051         union tf_command *command,
1052         union tf_answer *answer)
1053 {
1054         int error = 0;
1055
1056         dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
1057
1058         command->close_client_session.message_size =
1059                 (sizeof(struct tf_command_close_client_session) -
1060                         sizeof(struct tf_command_header)) / 4;
1061         command->close_client_session.device_context =
1062                 connection->device_context;
1063
1064         error = tf_send_receive(
1065                 &connection->dev->sm,
1066                 command,
1067                 answer,
1068                 connection,
1069                 true);
1070
1071         if (error != 0)
1072                 dprintk(KERN_ERR "tf_close_client_session returns %d\n",
1073                         error);
1074         else
1075                 dprintk(KERN_ERR "tf_close_client_session returns "
1076                         "error 0x%08X\n",
1077                         answer->close_client_session.error_code);
1078
1079         return error;
1080 }
1081
1082
1083 /*
1084  * Registers a shared memory to the Secure World
1085  */
1086 int tf_register_shared_memory(
1087         struct tf_connection *connection,
1088         union tf_command *command,
1089         union tf_answer *answer)
1090 {
1091         int error = 0;
1092         struct tf_shmem_desc *shmem_desc = NULL;
1093         bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
1094         struct tf_command_register_shared_memory *msg =
1095                 &command->register_shared_memory;
1096
1097         dprintk(KERN_INFO "tf_register_shared_memory(%p) "
1098                 "%p[0x%08X][0x%08x]\n",
1099                 connection,
1100                 (void *)msg->shared_mem_descriptors[0],
1101                 msg->shared_mem_size,
1102                 (u32)msg->memory_flags);
1103
1104         if (in_user_space) {
1105                 error = tf_validate_shmem_and_flags(
1106                         msg->shared_mem_descriptors[0],
1107                         msg->shared_mem_size,
1108                         (u32)msg->memory_flags);
1109                 if (error != 0)
1110                         goto error;
1111         }
1112
1113         /* Initialize message_size with no descriptors */
1114         msg->message_size
1115                 = (offsetof(struct tf_command_register_shared_memory,
1116                                                 shared_mem_descriptors) -
1117                         sizeof(struct tf_command_header)) / 4;
1118
1119         /* Map the shmem block and update the message */
1120         if (msg->shared_mem_size == 0) {
1121                 /* Empty shared mem */
1122                 msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
1123         } else {
1124                 u32 descriptor_count;
1125                 error = tf_map_shmem(
1126                         connection,
1127                         msg->shared_mem_descriptors[0],
1128                         msg->memory_flags,
1129                         in_user_space,
1130                         msg->shared_mem_descriptors,
1131                         &(msg->shared_mem_start_offset),
1132                         msg->shared_mem_size,
1133                         &shmem_desc,
1134                         &descriptor_count);
1135                 if (error != 0) {
1136                         dprintk(KERN_ERR "tf_register_shared_memory: "
1137                                 "unable to map shared memory block\n");
1138                         goto error;
1139                 }
1140                 msg->message_size += descriptor_count;
1141         }
1142
1143         /*
1144          * write the correct device context handle and the address of the shared
1145          * memory descriptor in the message
1146          */
1147         msg->device_context = connection->device_context;
1148         msg->block_id = (u32)shmem_desc;
1149
1150         /* Send the updated message */
1151         error = tf_send_receive(
1152                 &connection->dev->sm,
1153                 command,
1154                 answer,
1155                 connection,
1156                 true);
1157
1158         if ((error != 0) ||
1159                 (answer->register_shared_memory.error_code
1160                         != S_SUCCESS)) {
1161                 dprintk(KERN_ERR "tf_register_shared_memory: "
1162                         "operation failed. Unmap block\n");
1163                 goto error;
1164         }
1165
1166         /* Saves the block handle returned by the secure world */
1167         if (shmem_desc != NULL)
1168                 shmem_desc->block_identifier =
1169                         answer->register_shared_memory.block;
1170
1171         /* successful completion */
1172         dprintk(KERN_INFO "tf_register_shared_memory(%p):"
1173                 " block_id=0x%08x block=0x%08x\n",
1174                 connection, msg->block_id,
1175                 answer->register_shared_memory.block);
1176         return 0;
1177
1178         /* error completion */
1179 error:
1180         tf_unmap_shmem(
1181                 connection,
1182                 shmem_desc,
1183                 0);
1184
1185         if (error != 0)
1186                 dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
1187                         error);
1188         else
1189                 dprintk(KERN_ERR "tf_register_shared_memory returns "
1190                         "error_code 0x%08X\n",
1191                         answer->register_shared_memory.error_code);
1192
1193         return error;
1194 }
1195
1196
1197 /*
1198  * Releases a shared memory from the Secure World
1199  */
1200 int tf_release_shared_memory(
1201         struct tf_connection *connection,
1202         union tf_command *command,
1203         union tf_answer *answer)
1204 {
1205         int error = 0;
1206
1207         dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
1208
1209         command->release_shared_memory.message_size =
1210                 (sizeof(struct tf_command_release_shared_memory) -
1211                         sizeof(struct tf_command_header)) / 4;
1212         command->release_shared_memory.device_context =
1213                 connection->device_context;
1214
1215         error = tf_send_receive(
1216                 &connection->dev->sm,
1217                 command,
1218                 answer,
1219                 connection,
1220                 true);
1221
1222         if ((error != 0) ||
1223                 (answer->release_shared_memory.error_code != S_SUCCESS))
1224                 goto error;
1225
1226         /* Use block_id to get back the pointer to shmem_desc */
1227         tf_unmap_shmem(
1228                 connection,
1229                 (struct tf_shmem_desc *)
1230                         answer->release_shared_memory.block_id,
1231                 0);
1232
1233         /* successful completion */
1234         dprintk(KERN_INFO "tf_release_shared_memory(%p):"
1235                 " block_id=0x%08x block=0x%08x\n",
1236                 connection, answer->release_shared_memory.block_id,
1237                 command->release_shared_memory.block);
1238         return 0;
1239
1240
1241 error:
1242         if (error != 0)
1243                 dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
1244                         error);
1245         else
1246                 dprintk(KERN_ERR "tf_release_shared_memory returns "
1247                         "nChannelStatus 0x%08X\n",
1248                         answer->release_shared_memory.error_code);
1249
1250         return error;
1251
1252 }
1253
1254 /*
1255  * Invokes a client command to the Secure World
1256  */
1257 int tf_invoke_client_command(
1258         struct tf_connection *connection,
1259         union tf_command *command,
1260         union tf_answer *answer)
1261 {
1262         int error = 0;
1263         struct tf_shmem_desc *shmem_desc[4] = {NULL};
1264         int i;
1265 #ifdef CONFIG_TF_ION
1266         struct ion_handle *new_handle = NULL;
1267 #endif /* CONFIG_TF_ION */
1268
1269         dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
1270
1271         command->release_shared_memory.message_size =
1272                 (sizeof(struct tf_command_invoke_client_command) -
1273                         sizeof(struct tf_command_header)) / 4;
1274
1275 #ifdef CONFIG_TF_ZEBRA
1276         error = tf_crypto_try_shortcuted_update(connection,
1277                 (struct tf_command_invoke_client_command *) command,
1278                 (struct tf_answer_invoke_client_command *) answer);
1279         if (error == 0)
1280                 return error;
1281 #endif
1282
1283         /* Map the tmprefs */
1284         for (i = 0; i < 4; i++) {
1285                 int param_type = TF_GET_PARAM_TYPE(
1286                         command->invoke_client_command.param_types, i);
1287
1288                 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
1289                                         TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
1290                                 == TF_PARAM_TYPE_MEMREF_FLAG) {
1291                         /* A temporary memref: map it */
1292                         error = tf_map_temp_shmem(connection,
1293                                         &command->invoke_client_command.
1294                                                 params[i].temp_memref,
1295                                         param_type, &shmem_desc[i]);
1296                         if (error != 0) {
1297                                 dprintk(KERN_ERR
1298                                         "tf_invoke_client_command: "
1299                                         "unable to map temporary memory "
1300                                         "block\n (%08X)", error);
1301                                 goto error;
1302                         }
1303                 }
1304 #ifdef CONFIG_TF_ION
1305                 else if (param_type == TF_PARAM_TYPE_MEMREF_ION_HANDLE) {
1306                         struct tf_command_invoke_client_command *invoke;
1307                         ion_phys_addr_t ion_addr;
1308                         size_t ion_len;
1309                         struct ion_buffer *buffer;
1310
1311                         if (connection->ion_client == NULL) {
1312                                 connection->ion_client = ion_client_create(
1313                                         zebra_ion_device,
1314                                         (1 << ION_HEAP_TYPE_CARVEOUT),
1315                                         "tf");
1316                         }
1317                         if (connection->ion_client == NULL) {
1318                                 dprintk(KERN_ERR "%s(%p): "
1319                                         "unable to create ion client\n",
1320                                         __func__, connection);
1321                                 error = -EFAULT;
1322                                 goto error;
1323                         }
1324
1325                         invoke = &command->invoke_client_command;
1326
1327                         dprintk(KERN_INFO "ion_handle %x",
1328                                 invoke->params[i].value.a);
1329                         buffer = ion_share(connection->ion_client,
1330                                 (struct ion_handle *)invoke->params[i].value.a);
1331                         if (buffer == NULL) {
1332                                 dprintk(KERN_ERR "%s(%p): "
1333                                         "unable to share ion handle\n",
1334                                         __func__, connection);
1335                                 error = -EFAULT;
1336                                 goto error;
1337                         }
1338
1339                         dprintk(KERN_INFO "ion_buffer %p", buffer);
1340                         new_handle = ion_import(connection->ion_client, buffer);
1341                         if (new_handle == NULL) {
1342                                 dprintk(KERN_ERR "%s(%p): "
1343                                         "unable to import ion buffer\n",
1344                                         __func__, connection);
1345                                 error = -EFAULT;
1346                                 goto error;
1347                         }
1348
1349                         dprintk(KERN_INFO "new_handle %x", new_handle);
1350                         error = ion_phys(connection->ion_client,
1351                                         new_handle,
1352                                         &ion_addr,
1353                                         &ion_len);
1354                         if (error) {
1355                                 dprintk(KERN_ERR
1356                                 "%s: unable to convert ion handle "
1357                                 "0x%08X (error code 0x%08X)\n",
1358                                 __func__,
1359                                 new_handle,
1360                                 error);
1361                                 error = -EINVAL;
1362                                 goto error;
1363                         }
1364                         dprintk(KERN_INFO
1365                         "%s: handle=0x%08x phys_add=0x%08x length=0x%08x\n",
1366                         __func__, invoke->params[i].value.a, ion_addr, ion_len);
1367
1368                         invoke->params[i].value.a = (u32) ion_addr;
1369                         invoke->params[i].value.b = (u32) ion_len;
1370
1371                         invoke->param_types &= ~((0xF) << (4*i));
1372                         invoke->param_types |=
1373                                 TF_PARAM_TYPE_VALUE_INPUT << (4*i);
1374                 }
1375 #endif /* CONFIG_TF_ION */
1376         }
1377
1378         command->invoke_client_command.device_context =
1379                 connection->device_context;
1380
1381         error = tf_send_receive(&connection->dev->sm, command,
1382                 answer, connection, true);
1383
1384 error:
1385 #ifdef CONFIG_TF_ION
1386         if (new_handle != NULL)
1387                 ion_free(connection->ion_client, new_handle);
1388 #endif /* CONFIG_TF_ION */
1389         /* Unmap de temp mem refs */
1390         for (i = 0; i < 4; i++) {
1391                 if (shmem_desc[i] != NULL) {
1392                         dprintk(KERN_INFO "tf_invoke_client_command: "
1393                                 "UnMatemp_memref %d\n ", i);
1394
1395                         tf_unmap_shmem(connection, shmem_desc[i], 0);
1396                 }
1397         }
1398
1399         if (error != 0)
1400                 dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
1401                         error);
1402         else
1403                 dprintk(KERN_ERR "tf_invoke_client_command returns "
1404                         "error_code 0x%08X\n",
1405                         answer->invoke_client_command.error_code);
1406
1407         return error;
1408 }
1409
1410
1411 /*
1412  * Cancels a client command from the Secure World
1413  */
1414 int tf_cancel_client_command(
1415         struct tf_connection *connection,
1416         union tf_command *command,
1417         union tf_answer *answer)
1418 {
1419         int error = 0;
1420
1421         dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
1422
1423         command->cancel_client_operation.device_context =
1424                 connection->device_context;
1425         command->cancel_client_operation.message_size =
1426                 (sizeof(struct tf_command_cancel_client_operation) -
1427                         sizeof(struct tf_command_header)) / 4;
1428
1429         error = tf_send_receive(
1430                 &connection->dev->sm,
1431                 command,
1432                 answer,
1433                 connection,
1434                 true);
1435
1436         if ((error != 0) ||
1437                 (answer->cancel_client_operation.error_code != S_SUCCESS))
1438                 goto error;
1439
1440
1441         /* successful completion */
1442         return 0;
1443
1444 error:
1445         if (error != 0)
1446                 dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
1447                         error);
1448         else
1449                 dprintk(KERN_ERR "tf_cancel_client_command returns "
1450                         "nChannelStatus 0x%08X\n",
1451                         answer->cancel_client_operation.error_code);
1452
1453         return error;
1454 }
1455
1456
1457
1458 /*
1459  * Destroys a device context from the Secure World
1460  */
1461 int tf_destroy_device_context(
1462         struct tf_connection *connection)
1463 {
1464         int error;
1465         /*
1466          * AFY: better use the specialized tf_command_destroy_device_context
1467          * structure: this will save stack
1468          */
1469         union tf_command command;
1470         union tf_answer answer;
1471
1472         dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
1473
1474         BUG_ON(connection == NULL);
1475
1476         command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1477         command.header.message_size =
1478                 (sizeof(struct tf_command_destroy_device_context) -
1479                         sizeof(struct tf_command_header))/sizeof(u32);
1480
1481         /*
1482          * fill in the device context handler
1483          * it is guarantied that the first shared memory descriptor describes
1484          * the device context
1485          */
1486         command.destroy_device_context.device_context =
1487                 connection->device_context;
1488
1489         error = tf_send_receive(
1490                 &connection->dev->sm,
1491                 &command,
1492                 &answer,
1493                 connection,
1494                 false);
1495
1496         if ((error != 0) ||
1497                 (answer.destroy_device_context.error_code != S_SUCCESS))
1498                 goto error;
1499
1500         spin_lock(&(connection->state_lock));
1501         connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1502         spin_unlock(&(connection->state_lock));
1503
1504         /* successful completion */
1505         dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
1506                 connection);
1507         return 0;
1508
1509 error:
1510         if (error != 0) {
1511                 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1512                         "error %d\n", error);
1513         } else {
1514                 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1515                         "error_code 0x%08X\n",
1516                         answer.destroy_device_context.error_code);
1517                 if (answer.destroy_device_context.error_code ==
1518                         S_ERROR_OUT_OF_MEMORY)
1519                         error = -ENOMEM;
1520                 else
1521                         error = -EFAULT;
1522         }
1523
1524         return error;
1525 }
1526
1527
1528 /*----------------------------------------------------------------------------
1529  * Connection initialization and cleanup operations
1530  *----------------------------------------------------------------------------*/
1531
1532 /*
1533  * Opens a connection to the specified device.
1534  *
1535  * The placeholder referenced by connection is set to the address of the
1536  * new connection; it is set to NULL upon failure.
1537  *
1538  * Returns zero upon successful completion, or an appropriate error code upon
1539  * failure.
1540  */
1541 int tf_open(struct tf_device *dev,
1542         struct file *file,
1543         struct tf_connection **connection)
1544 {
1545         int error;
1546         struct tf_connection *conn = NULL;
1547
1548         dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
1549
1550         /*
1551          * Allocate and initialize the conn.
1552          * kmalloc only allocates sizeof(*conn) virtual memory
1553          */
1554         conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
1555                 GFP_KERNEL);
1556         if (conn == NULL) {
1557                 printk(KERN_ERR "tf_open(): "
1558                         "Out of memory for conn!\n");
1559                 error = -ENOMEM;
1560                 goto error;
1561         }
1562
1563         memset(conn, 0, sizeof(*conn));
1564
1565         conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1566         conn->dev = dev;
1567         spin_lock_init(&(conn->state_lock));
1568         atomic_set(&(conn->pending_op_count), 0);
1569         INIT_LIST_HEAD(&(conn->list));
1570
1571         /*
1572          * Initialize the shared memory
1573          */
1574         error = tf_init_shared_memory(conn);
1575         if (error != 0)
1576                 goto error;
1577
1578 #ifdef CONFIG_TF_ZEBRA
1579         /*
1580          * Initialize CUS specifics
1581          */
1582         tf_crypto_init_cus(conn);
1583 #endif
1584
1585         /*
1586          * Attach the conn to the device.
1587          */
1588         spin_lock(&(dev->connection_list_lock));
1589         list_add(&(conn->list), &(dev->connection_list));
1590         spin_unlock(&(dev->connection_list_lock));
1591
1592         /*
1593          * Successful completion.
1594          */
1595
1596         *connection = conn;
1597
1598         dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
1599         return 0;
1600
1601         /*
1602          * Error handling.
1603          */
1604
1605 error:
1606         dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
1607         /* Deallocate the descriptor pages if necessary */
1608         internal_kfree(conn);
1609         *connection = NULL;
1610         return error;
1611 }
1612
1613
1614 /*
1615  * Closes the specified connection.
1616  *
1617  * Upon return, the connection has been destroyed and cannot be used anymore.
1618  *
1619  * This function does nothing if connection is set to NULL.
1620  */
1621 void tf_close(struct tf_connection *connection)
1622 {
1623         int error;
1624         enum TF_CONN_STATE state;
1625
1626         dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
1627
1628         if (connection == NULL)
1629                 return;
1630
1631         /*
1632          * Assumption: Linux guarantees that no other operation is in progress
1633          * and that no other operation will be started when close is called
1634          */
1635         BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
1636
1637         /*
1638          * Exchange a Destroy Device Context message if needed.
1639          */
1640         spin_lock(&(connection->state_lock));
1641         state = connection->state;
1642         spin_unlock(&(connection->state_lock));
1643         if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
1644                 /*
1645                  * A DestroyDeviceContext operation was not performed. Do it
1646                  * now.
1647                  */
1648                 error = tf_destroy_device_context(connection);
1649                 if (error != 0)
1650                         /* avoid cleanup if destroy device context fails */
1651                         goto error;
1652         }
1653
1654         /*
1655          * Clean up the shared memory
1656          */
1657         tf_cleanup_shared_memories(connection);
1658
1659 #ifdef CONFIG_TF_ION
1660         if (connection->ion_client != NULL)
1661                 ion_client_destroy(connection->ion_client);
1662 #endif
1663
1664         spin_lock(&(connection->dev->connection_list_lock));
1665         list_del(&(connection->list));
1666         spin_unlock(&(connection->dev->connection_list_lock));
1667
1668         internal_kfree(connection);
1669
1670         return;
1671
1672 error:
1673         dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
1674                 connection, error);
1675 }