HACK: make TF compile
[linux-3.10.git] / security / tf_driver / tf_comm.c
1 /**
2  * Copyright (c) 2011 Trusted Logic S.A.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License
7  * version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17  * MA 02111-1307 USA
18  */
19
20 #include <asm/div64.h>
21 #include <asm/system.h>
22 #include <linux/version.h>
23 #include <asm/cputype.h>
24 #include <linux/interrupt.h>
25 #include <linux/page-flags.h>
26 #include <linux/pagemap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/jiffies.h>
29 #include <linux/freezer.h>
30
31 #include "tf_defs.h"
32 #include "tf_comm.h"
33 #include "tf_protocol.h"
34 #include "tf_util.h"
35 #include "tf_conn.h"
36
37 #ifdef CONFIG_TF_ZEBRA
38 #include "tf_zebra.h"
39 #endif
40
41 /*---------------------------------------------------------------------------
42  * Internal Constants
43  *---------------------------------------------------------------------------*/
44
45 /*
46  * shared memories descriptor constants
47  */
48 #define DESCRIPTOR_B_MASK           (1 << 2)
49 #define DESCRIPTOR_C_MASK           (1 << 3)
50 #define DESCRIPTOR_S_MASK           (1 << 10)
51
52 #define L1_COARSE_DESCRIPTOR_BASE         (0x00000001)
53 #define L1_COARSE_DESCRIPTOR_ADDR_MASK    (0xFFFFFC00)
54 #define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
55
56 #define L2_PAGE_DESCRIPTOR_BASE              (0x00000003)
57 #define L2_PAGE_DESCRIPTOR_AP_APX_READ       (0x220)
58 #define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
59
60 #define L2_INIT_DESCRIPTOR_BASE           (0x00000003)
61 #define L2_INIT_DESCRIPTOR_V13_12_SHIFT   (4)
62
63 /*
64  * Reject an attempt to share a strongly-Ordered or Device memory
65  * Strongly-Ordered:  TEX=0b000, C=0, B=0
66  * Shared Device:     TEX=0b000, C=0, B=1
67  * Non-Shared Device: TEX=0b010, C=0, B=0
68  */
69 #define L2_TEX_C_B_MASK \
70         ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
71 #define L2_TEX_C_B_STRONGLY_ORDERED \
72         ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
73 #define L2_TEX_C_B_SHARED_DEVICE \
74         ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
75 #define L2_TEX_C_B_NON_SHARED_DEVICE \
76         ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
77
78 #define CACHE_S(x)      ((x) & (1 << 24))
79 #define CACHE_DSIZE(x)  (((x) >> 12) & 4095)
80
81 #define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
82 #define TIME_INFINITE  ((u64) 0xFFFFFFFFFFFFFFFFULL)
83
84 /*---------------------------------------------------------------------------
85  * atomic operation definitions
86  *---------------------------------------------------------------------------*/
87
88 /*
89  * Atomically updates the sync_serial_n and time_n register
90  * sync_serial_n and time_n modifications are thread safe
91  */
92 void tf_set_current_time(struct tf_comm *comm)
93 {
94         u32 new_sync_serial;
95         struct timeval now;
96         u64 time64;
97
98         /*
99          * lock the structure while updating the L1 shared memory fields
100          */
101         spin_lock(&comm->lock);
102
103         /* read sync_serial_n and change the TimeSlot bit field */
104         new_sync_serial =
105                 tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
106
107         do_gettimeofday(&now);
108         time64 = now.tv_sec;
109         time64 = (time64 * 1000) + (now.tv_usec / 1000);
110
111         /* Write the new time64 and nSyncSerial into shared memory */
112         tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
113                 TF_SYNC_SERIAL_TIMESLOT_N], time64);
114         tf_write_reg32(&comm->l1_buffer->sync_serial_n,
115                 new_sync_serial);
116
117         spin_unlock(&comm->lock);
118 }
119
120 /*
121  * Performs the specific read timeout operation
122  * The difficulty here is to read atomically 2 u32
123  * values from the L1 shared buffer.
124  * This is guaranteed by reading before and after the operation
125  * the timeslot given by the Secure World
126  */
127 static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
128 {
129         u32 sync_serial_s_initial = 0;
130         u32 sync_serial_s_final = 1;
131         u64 time64;
132
133         spin_lock(&comm->lock);
134
135         while (sync_serial_s_initial != sync_serial_s_final) {
136                 sync_serial_s_initial = tf_read_reg32(
137                         &comm->l1_buffer->sync_serial_s);
138                 time64 = tf_read_reg64(
139                         &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
140
141                 sync_serial_s_final = tf_read_reg32(
142                         &comm->l1_buffer->sync_serial_s);
143         }
144
145         spin_unlock(&comm->lock);
146
147         *time = time64;
148 }
149
150 /*----------------------------------------------------------------------------
151  * SIGKILL signal handling
152  *----------------------------------------------------------------------------*/
153
154 static bool sigkill_pending(void)
155 {
156         if (signal_pending(current)) {
157                 dprintk(KERN_INFO "A signal is pending\n");
158                 if (sigismember(&current->pending.signal, SIGKILL)) {
159                         dprintk(KERN_INFO "A SIGKILL is pending\n");
160                         return true;
161                 } else if (sigismember(
162                         &current->signal->shared_pending.signal, SIGKILL)) {
163                         dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
164                         return true;
165                 }
166         }
167         return false;
168 }
169
170 /*----------------------------------------------------------------------------
171  * Shared memory related operations
172  *----------------------------------------------------------------------------*/
173
174 struct tf_coarse_page_table *tf_alloc_coarse_page_table(
175         struct tf_coarse_page_table_allocation_context *alloc_context,
176         u32 type)
177 {
178         struct tf_coarse_page_table *coarse_pg_table = NULL;
179
180         spin_lock(&(alloc_context->lock));
181
182         if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
183                 /*
184                  * The free list can provide us a coarse page table
185                  * descriptor
186                  */
187                 coarse_pg_table = list_first_entry(
188                                 &alloc_context->free_coarse_page_tables,
189                                 struct tf_coarse_page_table, list);
190                 list_del(&(coarse_pg_table->list));
191
192                 coarse_pg_table->parent->ref_count++;
193         } else {
194                 /* no array of coarse page tables, create a new one */
195                 struct tf_coarse_page_table_array *array;
196                 void *page;
197                 int i;
198
199                 spin_unlock(&(alloc_context->lock));
200
201                 /* first allocate a new page descriptor */
202                 array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
203                 if (array == NULL) {
204                         dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
205                                         " failed to allocate a table array\n",
206                                         alloc_context);
207                         return NULL;
208                 }
209
210                 array->type = type;
211                 array->ref_count = 0;
212                 INIT_LIST_HEAD(&(array->list));
213
214                 /* now allocate the actual page the page descriptor describes */
215                 page = (void *) internal_get_zeroed_page(GFP_KERNEL);
216                 if (page == NULL) {
217                         dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
218                                         " failed allocate a page\n",
219                                         alloc_context);
220                         internal_kfree(array);
221                         return NULL;
222                 }
223
224                 spin_lock(&(alloc_context->lock));
225
226                 /* initialize the coarse page table descriptors */
227                 for (i = 0; i < 4; i++) {
228                         INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
229                         array->coarse_page_tables[i].descriptors =
230                                 page + (i * SIZE_1KB);
231                         array->coarse_page_tables[i].parent = array;
232
233                         if (i == 0) {
234                                 /*
235                                  * the first element is kept for the current
236                                  * coarse page table allocation
237                                  */
238                                 coarse_pg_table =
239                                         &(array->coarse_page_tables[i]);
240                                 array->ref_count++;
241                         } else {
242                                 /*
243                                  * The other elements are added to the free list
244                                  */
245                                 list_add(&(array->coarse_page_tables[i].list),
246                                         &(alloc_context->
247                                                 free_coarse_page_tables));
248                         }
249                 }
250
251                 list_add(&(array->list),
252                         &(alloc_context->coarse_page_table_arrays));
253         }
254         spin_unlock(&(alloc_context->lock));
255
256         return coarse_pg_table;
257 }
258
259
260 void tf_free_coarse_page_table(
261         struct tf_coarse_page_table_allocation_context *alloc_context,
262         struct tf_coarse_page_table *coarse_pg_table,
263         int force)
264 {
265         struct tf_coarse_page_table_array *array;
266
267         spin_lock(&(alloc_context->lock));
268
269         array = coarse_pg_table->parent;
270
271         (array->ref_count)--;
272
273         if (array->ref_count == 0) {
274                 /*
275                  * no coarse page table descriptor is used
276                  * check if we should free the whole page
277                  */
278
279                 if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
280                         && (force == 0))
281                         /*
282                          * This is a preallocated page,
283                          * add the page back to the free list
284                          */
285                         list_add(&(coarse_pg_table->list),
286                                 &(alloc_context->free_coarse_page_tables));
287                 else {
288                         /*
289                          * None of the page's coarse page table descriptors
290                          * are in use, free the whole page
291                          */
292                         int i;
293                         u32 *descriptors;
294
295                         /*
296                          * remove the page's associated coarse page table
297                          * descriptors from the free list
298                          */
299                         for (i = 0; i < 4; i++)
300                                 if (&(array->coarse_page_tables[i]) !=
301                                                 coarse_pg_table)
302                                         list_del(&(array->
303                                                 coarse_page_tables[i].list));
304
305                         descriptors =
306                                 array->coarse_page_tables[0].descriptors;
307                         array->coarse_page_tables[0].descriptors = NULL;
308
309                         /* remove the coarse page table from the array  */
310                         list_del(&(array->list));
311
312                         spin_unlock(&(alloc_context->lock));
313                         /*
314                          * Free the page.
315                          * The address of the page is contained in the first
316                          * element
317                          */
318                         internal_free_page((unsigned long) descriptors);
319                         /* finaly free the array */
320                         internal_kfree(array);
321
322                         spin_lock(&(alloc_context->lock));
323                 }
324         } else {
325                 /*
326                  * Some coarse page table descriptors are in use.
327                  * Add the descriptor to the free list
328                  */
329                 list_add(&(coarse_pg_table->list),
330                         &(alloc_context->free_coarse_page_tables));
331         }
332
333         spin_unlock(&(alloc_context->lock));
334 }
335
336
337 void tf_init_coarse_page_table_allocator(
338         struct tf_coarse_page_table_allocation_context *alloc_context)
339 {
340         spin_lock_init(&(alloc_context->lock));
341         INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
342         INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
343 }
344
345 void tf_release_coarse_page_table_allocator(
346         struct tf_coarse_page_table_allocation_context *alloc_context)
347 {
348         spin_lock(&(alloc_context->lock));
349
350         /* now clean up the list of page descriptors */
351         while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
352                 struct tf_coarse_page_table_array *page_desc;
353                 u32 *descriptors;
354
355                 page_desc = list_first_entry(
356                         &alloc_context->coarse_page_table_arrays,
357                         struct tf_coarse_page_table_array, list);
358
359                 descriptors = page_desc->coarse_page_tables[0].descriptors;
360                 list_del(&(page_desc->list));
361
362                 spin_unlock(&(alloc_context->lock));
363
364                 if (descriptors != NULL)
365                         internal_free_page((unsigned long)descriptors);
366
367                 internal_kfree(page_desc);
368
369                 spin_lock(&(alloc_context->lock));
370         }
371
372         spin_unlock(&(alloc_context->lock));
373 }
374
375 /*
376  * Returns the L1 coarse page descriptor for
377  * a coarse page table located at address coarse_pg_table_descriptors
378  */
379 u32 tf_get_l1_coarse_descriptor(
380         u32 coarse_pg_table_descriptors[256])
381 {
382         u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
383         unsigned int info = read_cpuid(CPUID_CACHETYPE);
384
385         descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
386                 & L1_COARSE_DESCRIPTOR_ADDR_MASK);
387
388         if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
389                 dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
390                         "V31-12 added to descriptor\n");
391                 /* the 16k alignment restriction applies */
392                 descriptor |= (DESCRIPTOR_V13_12_GET(
393                         (u32)coarse_pg_table_descriptors) <<
394                                 L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
395         }
396
397         return descriptor;
398 }
399
400
401 #define dprintk_desc(...)
402 /*
403  * Returns the L2 descriptor for the specified user page.
404  */
405 u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
406 {
407         pgd_t *pgd;
408         pud_t *pud;
409         pmd_t *pmd;
410         pte_t *ptep;
411         u32  *hwpte;
412         u32   tex = 0;
413         u32 descriptor = 0;
414
415         dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
416         pgd = pgd_offset(mm, vaddr);
417         dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
418                 (unsigned int) *pgd);
419         if (pgd_none(*pgd))
420                 goto error;
421         pud = pud_offset(pgd, vaddr);
422         dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
423                 (unsigned int) *pud);
424         if (pud_none(*pud))
425                 goto error;
426         pmd = pmd_offset(pud, vaddr);
427         dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
428                 (unsigned int) *pmd);
429         if (pmd_none(*pmd))
430                 goto error;
431
432         if (PMD_TYPE_SECT&(*pmd)) {
433                 /* We have a section */
434                 dprintk_desc(KERN_INFO "Section descr=%x\n",
435                         (unsigned int)*pmd);
436                 if ((*pmd) & PMD_SECT_BUFFERABLE)
437                         descriptor |= DESCRIPTOR_B_MASK;
438                 if ((*pmd) & PMD_SECT_CACHEABLE)
439                         descriptor |= DESCRIPTOR_C_MASK;
440                 if ((*pmd) & PMD_SECT_S)
441                         descriptor |= DESCRIPTOR_S_MASK;
442                 tex = ((*pmd) >> 12) & 7;
443         } else {
444                 /* We have a table */
445                 ptep = pte_offset_map(pmd, vaddr);
446                 if (pte_present(*ptep)) {
447                         dprintk_desc(KERN_INFO "L2 descr=%x\n",
448                                 (unsigned int) *ptep);
449                         if ((*ptep) & L_PTE_MT_BUFFERABLE)
450                                 descriptor |= DESCRIPTOR_B_MASK;
451                         if ((*ptep) & L_PTE_MT_WRITETHROUGH)
452                                 descriptor |= DESCRIPTOR_C_MASK;
453                         if ((*ptep) & L_PTE_MT_DEV_SHARED)
454                                 descriptor |= DESCRIPTOR_S_MASK;
455
456                         /*
457                          * Linux's pte doesn't keep track of TEX value.
458                          * Have to jump to hwpte see include/asm/pgtable.h
459                          */
460 #ifdef PTE_HWTABLE_SIZE
461                         hwpte = (u32 *) (ptep + PTE_HWTABLE_PTRS);
462 #else
463                         hwpte = (u32 *) (ptep - PTRS_PER_PTE);
464 #endif
465                         if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
466                                         ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
467                                 goto error;
468                         dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
469                         tex = ((*hwpte) >> 6) & 7;
470                         pte_unmap(ptep);
471                 } else {
472                         pte_unmap(ptep);
473                         goto error;
474                 }
475         }
476
477         descriptor |= (tex << 6);
478
479         return descriptor;
480
481 error:
482         dprintk(KERN_ERR "Error occured in %s\n", __func__);
483         return 0;
484 }
485
486
487 /*
488  * Changes an L2 page descriptor back to a pointer to a physical page
489  */
490 inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
491 {
492         return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
493 }
494
495
496 /*
497  * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
498  * must be in the kernel address space.
499  */
500 static void tf_get_l2_page_descriptor(
501         u32 *l2_page_descriptor,
502         u32 flags, struct mm_struct *mm)
503 {
504         unsigned long page_vaddr;
505         u32 descriptor;
506         struct page *page;
507         bool unmap_page = false;
508
509 #if 0
510         dprintk(KERN_INFO
511                 "tf_get_l2_page_descriptor():"
512                 "*l2_page_descriptor=%x\n",
513                 *l2_page_descriptor);
514 #endif
515
516         if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
517                 return;
518
519         page = (struct page *) (*l2_page_descriptor);
520
521         page_vaddr = (unsigned long) page_address(page);
522         if (page_vaddr == 0) {
523                 dprintk(KERN_INFO "page_address returned 0\n");
524                 /* Should we use kmap_atomic(page, KM_USER0) instead ? */
525                 page_vaddr = (unsigned long) kmap(page);
526                 if (page_vaddr == 0) {
527                         *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
528                         dprintk(KERN_ERR "kmap returned 0\n");
529                         return;
530                 }
531                 unmap_page = true;
532         }
533
534         descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
535         if (descriptor == 0) {
536                 *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
537                 return;
538         }
539         descriptor |= L2_PAGE_DESCRIPTOR_BASE;
540
541         descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
542
543         if (!(flags & TF_SHMEM_TYPE_WRITE))
544                 /* only read access */
545                 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
546         else
547                 /* read and write access */
548                 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
549
550         if (unmap_page)
551                 kunmap(page);
552
553         *l2_page_descriptor = descriptor;
554 }
555
556
557 /*
558  * Unlocks the physical memory pages
559  * and frees the coarse pages that need to
560  */
561 void tf_cleanup_shared_memory(
562         struct tf_coarse_page_table_allocation_context *alloc_context,
563         struct tf_shmem_desc *shmem_desc,
564         u32 full_cleanup)
565 {
566         u32 coarse_page_index;
567
568         dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
569                         shmem_desc);
570
571 #ifdef DEBUG_COARSE_TABLES
572         printk(KERN_DEBUG "tf_cleanup_shared_memory "
573                 "- number of coarse page tables=%d\n",
574                 shmem_desc->coarse_pg_table_count);
575
576         for (coarse_page_index = 0;
577              coarse_page_index < shmem_desc->coarse_pg_table_count;
578              coarse_page_index++) {
579                 u32 j;
580
581                 printk(KERN_DEBUG "  Descriptor=%p address=%p index=%d\n",
582                         shmem_desc->coarse_pg_table[coarse_page_index],
583                         shmem_desc->coarse_pg_table[coarse_page_index]->
584                                 descriptors,
585                         coarse_page_index);
586                 if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
587                         for (j = 0;
588                              j < TF_DESCRIPTOR_TABLE_CAPACITY;
589                              j += 8) {
590                                 int k;
591                                 printk(KERN_DEBUG "    ");
592                                 for (k = j; k < j + 8; k++)
593                                         printk(KERN_DEBUG "%p ",
594                                                 shmem_desc->coarse_pg_table[
595                                                         coarse_page_index]->
596                                                                 descriptors);
597                                 printk(KERN_DEBUG "\n");
598                         }
599                 }
600         }
601         printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
602 #endif
603
604         /* Parse the coarse page descriptors */
605         for (coarse_page_index = 0;
606              coarse_page_index < shmem_desc->coarse_pg_table_count;
607              coarse_page_index++) {
608                 u32 j;
609                 u32 found = 0;
610
611                 /* parse the page descriptors of the coarse page */
612                 for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
613                         u32 l2_page_descriptor = (u32) (shmem_desc->
614                                 coarse_pg_table[coarse_page_index]->
615                                         descriptors[j]);
616
617                         if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
618                                 struct page *page =
619                                         tf_l2_page_descriptor_to_page(
620                                                 l2_page_descriptor);
621
622                                 if (!PageReserved(page))
623                                         SetPageDirty(page);
624                                 internal_page_cache_release(page);
625
626                                 found = 1;
627                         } else if (found == 1) {
628                                 break;
629                         }
630                 }
631
632                 /*
633                  * Only free the coarse pages of descriptors not preallocated
634                  */
635                 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
636                         (full_cleanup != 0))
637                         tf_free_coarse_page_table(alloc_context,
638                                 shmem_desc->coarse_pg_table[coarse_page_index],
639                                 0);
640         }
641
642         shmem_desc->coarse_pg_table_count = 0;
643         dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
644                         shmem_desc);
645 }
646
647 /*
648  * Make sure the coarse pages are allocated. If not allocated, do it.
649  * Locks down the physical memory pages.
650  * Verifies the memory attributes depending on flags.
651  */
652 int tf_fill_descriptor_table(
653         struct tf_coarse_page_table_allocation_context *alloc_context,
654         struct tf_shmem_desc *shmem_desc,
655         u32 buffer,
656         struct vm_area_struct **vmas,
657         u32 descriptors[TF_MAX_COARSE_PAGES],
658         u32 buffer_size,
659         u32 *buffer_start_offset,
660         bool in_user_space,
661         u32 flags,
662         u32 *descriptor_count)
663 {
664         u32 coarse_page_index;
665         u32 coarse_page_count;
666         u32 page_count;
667         u32 page_shift = 0;
668         int ret = 0;
669         unsigned int info = read_cpuid(CPUID_CACHETYPE);
670
671         dprintk(KERN_INFO "tf_fill_descriptor_table"
672                 "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
673                 "flags = 0x%08x)\n",
674                 shmem_desc,
675                 buffer,
676                 buffer_size,
677                 in_user_space,
678                 flags);
679
680         /*
681          * Compute the number of pages
682          * Compute the number of coarse pages
683          * Compute the page offset
684          */
685         page_count = ((buffer & ~PAGE_MASK) +
686                 buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
687
688         /* check whether the 16k alignment restriction applies */
689         if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
690                 /*
691                  * The 16k alignment restriction applies.
692                  * Shift data to get them 16k aligned
693                  */
694                 page_shift = DESCRIPTOR_V13_12_GET(buffer);
695         page_count += page_shift;
696
697
698         /*
699          * Check the number of pages fit in the coarse pages
700          */
701         if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
702                         TF_MAX_COARSE_PAGES)) {
703                 dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
704                         "%u pages required to map shared memory!\n",
705                         shmem_desc, page_count);
706                 ret = -ENOMEM;
707                 goto error;
708         }
709
710         /* coarse page describe 256 pages */
711         coarse_page_count = ((page_count +
712                 TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
713                         TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
714
715         /*
716          * Compute the buffer offset
717          */
718         *buffer_start_offset = (buffer & ~PAGE_MASK) |
719                 (page_shift << PAGE_SHIFT);
720
721         /* map each coarse page */
722         for (coarse_page_index = 0;
723              coarse_page_index < coarse_page_count;
724              coarse_page_index++) {
725                 u32 j;
726                 struct tf_coarse_page_table *coarse_pg_table;
727
728                 /* compute a virtual address with appropriate offset */
729                 u32 buffer_offset_vaddr = buffer +
730                         (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
731                 u32 pages_to_get;
732
733                 /*
734                  * Compute the number of pages left for this coarse page.
735                  * Decrement page_count each time
736                  */
737                 pages_to_get = (page_count >>
738                         TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
739                                 TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
740                 page_count -= pages_to_get;
741
742                 /*
743                  * Check if the coarse page has already been allocated
744                  * If not, do it now
745                  */
746                 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
747                         || (shmem_desc->type ==
748                                 TF_SHMEM_TYPE_PM_HIBERNATE)) {
749                         coarse_pg_table = tf_alloc_coarse_page_table(
750                                 alloc_context,
751                                 TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
752
753                         if (coarse_pg_table == NULL) {
754                                 dprintk(KERN_ERR
755                                         "tf_fill_descriptor_table(%p): "
756                                         "tf_alloc_coarse_page_table "
757                                         "failed for coarse page %d\n",
758                                         shmem_desc, coarse_page_index);
759                                 ret = -ENOMEM;
760                                 goto error;
761                         }
762
763                         shmem_desc->coarse_pg_table[coarse_page_index] =
764                                 coarse_pg_table;
765                 } else {
766                         coarse_pg_table =
767                                 shmem_desc->coarse_pg_table[coarse_page_index];
768                 }
769
770                 /*
771                  * The page is not necessarily filled with zeroes.
772                  * Set the fault descriptors ( each descriptor is 4 bytes long)
773                  */
774                 memset(coarse_pg_table->descriptors, 0x00,
775                         TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
776
777                 if (in_user_space) {
778                         int pages;
779
780                         /*
781                          * TRICK: use pCoarsePageDescriptor->descriptors to
782                          * hold the (struct page*) items before getting their
783                          * physical address
784                          */
785                         down_read(&(current->mm->mmap_sem));
786                         pages = internal_get_user_pages(
787                                 current,
788                                 current->mm,
789                                 buffer_offset_vaddr,
790                                 /*
791                                  * page_shift is cleared after retrieving first
792                                  * coarse page
793                                  */
794                                 (pages_to_get - page_shift),
795                                 (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
796                                 0,
797                                 (struct page **) (coarse_pg_table->descriptors
798                                         + page_shift),
799                                 vmas);
800                         up_read(&(current->mm->mmap_sem));
801
802                         if ((pages <= 0) ||
803                                 (pages != (pages_to_get - page_shift))) {
804                                 dprintk(KERN_ERR "tf_fill_descriptor_table:"
805                                         " get_user_pages got %d pages while "
806                                         "trying to get %d pages!\n",
807                                         pages, pages_to_get - page_shift);
808                                 ret = -EFAULT;
809                                 goto error;
810                         }
811
812                         for (j = page_shift;
813                                   j < page_shift + pages;
814                                   j++) {
815                                 /* Get the actual L2 descriptors */
816                                 tf_get_l2_page_descriptor(
817                                         &coarse_pg_table->descriptors[j],
818                                         flags,
819                                         current->mm);
820                                 /*
821                                  * Reject Strongly-Ordered or Device Memory
822                                  */
823 #define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
824         ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
825          (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
826          (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
827
828                                 if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
829                                         coarse_pg_table->
830                                                 descriptors[j])) {
831                                         dprintk(KERN_ERR
832                                                 "tf_fill_descriptor_table:"
833                                                 " descriptor 0x%08X use "
834                                                 "strongly-ordered or device "
835                                                 "memory. Rejecting!\n",
836                                                 coarse_pg_table->
837                                                         descriptors[j]);
838                                         ret = -EFAULT;
839                                         goto error;
840                                 }
841                         }
842                 } else {
843                         /* Kernel-space memory */
844                         dprintk(KERN_INFO
845                                 "tf_fill_descriptor_table: "
846                                 "buffer starting at %p\n",
847                                (void *)buffer_offset_vaddr);
848                         for (j = page_shift; j < pages_to_get; j++) {
849                                 struct page *page;
850                                 void *addr =
851                                         (void *)(buffer_offset_vaddr +
852                                                 (j - page_shift) * PAGE_SIZE);
853
854                                 if (is_vmalloc_addr(
855                                                 (void *) buffer_offset_vaddr))
856                                         page = vmalloc_to_page(addr);
857                                 else
858                                         page = virt_to_page(addr);
859
860                                 if (page == NULL) {
861                                         dprintk(KERN_ERR
862                                                 "tf_fill_descriptor_table: "
863                                                 "cannot map %p (vmalloc) "
864                                                 "to page\n",
865                                                 addr);
866                                         ret = -EFAULT;
867                                         goto error;
868                                 }
869                                 coarse_pg_table->descriptors[j] = (u32)page;
870                                 get_page(page);
871
872                                 /* change coarse page "page address" */
873                                 tf_get_l2_page_descriptor(
874                                         &coarse_pg_table->descriptors[j],
875                                         flags,
876                                         &init_mm);
877                         }
878                 }
879
880                 dmac_flush_range((void *)coarse_pg_table->descriptors,
881                    (void *)(((u32)(coarse_pg_table->descriptors)) +
882                    TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
883
884                 outer_clean_range(
885                         __pa(coarse_pg_table->descriptors),
886                         __pa(coarse_pg_table->descriptors) +
887                         TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
888                 wmb();
889
890                 /* Update the coarse page table address */
891                 descriptors[coarse_page_index] =
892                         tf_get_l1_coarse_descriptor(
893                                 coarse_pg_table->descriptors);
894
895                 /*
896                  * The next coarse page has no page shift, reset the
897                  * page_shift
898                  */
899                 page_shift = 0;
900         }
901
902         *descriptor_count = coarse_page_count;
903         shmem_desc->coarse_pg_table_count = coarse_page_count;
904
905 #ifdef DEBUG_COARSE_TABLES
906         printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
907                 "numberOfCoarsePages=%d\n", buffer_size,
908                 shmem_desc->coarse_pg_table_count);
909         for (coarse_page_index = 0;
910              coarse_page_index < shmem_desc->coarse_pg_table_count;
911              coarse_page_index++) {
912                 u32 j;
913                 struct tf_coarse_page_table *coarse_page_table =
914                         shmem_desc->coarse_pg_table[coarse_page_index];
915
916                 printk(KERN_DEBUG "  Descriptor=%p address=%p index=%d\n",
917                         coarse_page_table,
918                         coarse_page_table->descriptors,
919                         coarse_page_index);
920                 for (j = 0;
921                      j < TF_DESCRIPTOR_TABLE_CAPACITY;
922                      j += 8) {
923                         int k;
924                         printk(KERN_DEBUG "    ");
925                         for (k = j; k < j + 8; k++)
926                                 printk(KERN_DEBUG "0x%08X ",
927                                         coarse_page_table->descriptors[k]);
928                         printk(KERN_DEBUG "\n");
929                 }
930         }
931         printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
932 #endif
933
934         return 0;
935
936 error:
937         tf_cleanup_shared_memory(
938                         alloc_context,
939                         shmem_desc,
940                         0);
941
942         return ret;
943 }
944
945
946 /*----------------------------------------------------------------------------
947  * Standard communication operations
948  *----------------------------------------------------------------------------*/
949
950 u8 *tf_get_description(struct tf_comm *comm)
951 {
952         if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
953                 return comm->l1_buffer->version_description;
954
955         return NULL;
956 }
957
958 /*
959  * Returns a non-zero value if the specified S-timeout has expired, zero
960  * otherwise.
961  *
962  * The placeholder referenced to by relative_timeout_jiffies gives the relative
963  * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
964  * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
965  */
966 static int tf_test_s_timeout(
967                 u64 timeout,
968                 signed long *relative_timeout_jiffies)
969 {
970         struct timeval now;
971         u64 time64;
972
973         *relative_timeout_jiffies = 0;
974
975         /* immediate timeout */
976         if (timeout == TIME_IMMEDIATE)
977                 return 1;
978
979         /* infinite timeout */
980         if (timeout == TIME_INFINITE) {
981                 dprintk(KERN_DEBUG "tf_test_s_timeout: "
982                         "timeout is infinite\n");
983                 *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
984                 return 0;
985         }
986
987         do_gettimeofday(&now);
988         time64 = now.tv_sec;
989         /* will not overflow as operations are done on 64bit values */
990         time64 = (time64 * 1000) + (now.tv_usec / 1000);
991
992         /* timeout expired */
993         if (time64 >= timeout) {
994                 dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
995                 return 1;
996         }
997
998         /*
999          * finite timeout, compute relative_timeout_jiffies
1000          */
1001         /* will not overflow as time64 < timeout */
1002         timeout -= time64;
1003
1004         /* guarantee *relative_timeout_jiffies is a valid timeout */
1005         if ((timeout >> 32) != 0)
1006                 *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
1007         else
1008                 *relative_timeout_jiffies =
1009                         msecs_to_jiffies((unsigned int) timeout);
1010
1011         dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
1012                 *relative_timeout_jiffies);
1013         return 0;
1014 }
1015
1016 static void tf_copy_answers(struct tf_comm *comm)
1017 {
1018         u32 first_answer;
1019         u32 first_free_answer;
1020         struct tf_answer_struct *answerStructureTemp;
1021
1022         if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1023                 spin_lock(&comm->lock);
1024                 first_free_answer = tf_read_reg32(
1025                         &comm->l1_buffer->first_free_answer);
1026                 first_answer = tf_read_reg32(
1027                         &comm->l1_buffer->first_answer);
1028
1029                 while (first_answer != first_free_answer) {
1030                         /* answer queue not empty */
1031                         union tf_answer sComAnswer;
1032                         struct tf_answer_header  header;
1033
1034                         /*
1035                          * the size of the command in words of 32bit, not in
1036                          * bytes
1037                          */
1038                         u32 command_size;
1039                         u32 i;
1040                         u32 *temp = (uint32_t *) &header;
1041
1042                         dprintk(KERN_INFO
1043                                 "[pid=%d] tf_copy_answers(%p): "
1044                                 "Read answers from L1\n",
1045                                 current->pid, comm);
1046
1047                         /* Read the answer header */
1048                         for (i = 0;
1049                              i < sizeof(struct tf_answer_header)/sizeof(u32);
1050                                i++)
1051                                 temp[i] = comm->l1_buffer->answer_queue[
1052                                         (first_answer + i) %
1053                                                 TF_S_ANSWER_QUEUE_CAPACITY];
1054
1055                         /* Read the answer from the L1_Buffer*/
1056                         command_size = header.message_size +
1057                                 sizeof(struct tf_answer_header)/sizeof(u32);
1058                         temp = (uint32_t *) &sComAnswer;
1059                         for (i = 0; i < command_size; i++)
1060                                 temp[i] = comm->l1_buffer->answer_queue[
1061                                         (first_answer + i) %
1062                                                 TF_S_ANSWER_QUEUE_CAPACITY];
1063
1064                         answerStructureTemp = (struct tf_answer_struct *)
1065                                 sComAnswer.header.operation_id;
1066
1067                         tf_dump_answer(&sComAnswer);
1068
1069                         memcpy(answerStructureTemp->answer, &sComAnswer,
1070                                 command_size * sizeof(u32));
1071                         answerStructureTemp->answer_copied = true;
1072
1073                         first_answer += command_size;
1074                         tf_write_reg32(&comm->l1_buffer->first_answer,
1075                                 first_answer);
1076                 }
1077                 spin_unlock(&(comm->lock));
1078         }
1079 }
1080
1081 static void tf_copy_command(
1082         struct tf_comm *comm,
1083         union tf_command *command,
1084         struct tf_connection *connection,
1085         enum TF_COMMAND_STATE *command_status)
1086 {
1087         if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
1088                 && (command != NULL)) {
1089                 /*
1090                  * Write the message in the message queue.
1091                  */
1092
1093                 if (*command_status == TF_COMMAND_STATE_PENDING) {
1094                         u32 command_size;
1095                         u32 queue_words_count;
1096                         u32 i;
1097                         u32 first_free_command;
1098                         u32 first_command;
1099
1100                         spin_lock(&comm->lock);
1101
1102                         first_command = tf_read_reg32(
1103                                 &comm->l1_buffer->first_command);
1104                         first_free_command = tf_read_reg32(
1105                                 &comm->l1_buffer->first_free_command);
1106
1107                         queue_words_count = first_free_command - first_command;
1108                         command_size     = command->header.message_size +
1109                                 sizeof(struct tf_command_header)/sizeof(u32);
1110                         if ((queue_words_count + command_size) <
1111                                 TF_N_MESSAGE_QUEUE_CAPACITY) {
1112                                 /*
1113                                 * Command queue is not full.
1114                                 * If the Command queue is full,
1115                                 * the command will be copied at
1116                                 * another iteration
1117                                 * of the current function.
1118                                 */
1119
1120                                 /*
1121                                 * Change the conn state
1122                                 */
1123                                 if (connection == NULL)
1124                                         goto copy;
1125
1126                                 spin_lock(&(connection->state_lock));
1127
1128                                 if ((connection->state ==
1129                                 TF_CONN_STATE_NO_DEVICE_CONTEXT)
1130                                 &&
1131                                 (command->header.message_type ==
1132                                 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1133
1134                                         dprintk(KERN_INFO
1135                                 "tf_copy_command(%p):"
1136                                 "Conn state is DEVICE_CONTEXT_SENT\n",
1137                                  connection);
1138                                         connection->state =
1139                         TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
1140                                 } else if ((connection->state !=
1141                                 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1142                                 &&
1143                                 (command->header.message_type !=
1144                                 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1145                                         /* The connection
1146                                         * is no longer valid.
1147                                         * We may not send any command on it,
1148                                         * not even another
1149                                         * DESTROY_DEVICE_CONTEXT.
1150                                         */
1151                                         dprintk(KERN_INFO
1152                                                 "[pid=%d] tf_copy_command(%p): "
1153                                                 "Connection no longer valid."
1154                                                 "ABORT\n",
1155                                                 current->pid, connection);
1156                                         *command_status =
1157                                                 TF_COMMAND_STATE_ABORTED;
1158                                         spin_unlock(
1159                                                 &(connection->state_lock));
1160                                         spin_unlock(
1161                                                 &comm->lock);
1162                                         return;
1163                                 } else if (
1164                                         (command->header.message_type ==
1165                                 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
1166                                 (connection->state ==
1167                                 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1168                                                 ) {
1169                                         dprintk(KERN_INFO
1170                                         "[pid=%d] tf_copy_command(%p): "
1171                                         "Conn state is "
1172                                         "DESTROY_DEVICE_CONTEXT_SENT\n",
1173                                         current->pid, connection);
1174                                         connection->state =
1175                         TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
1176                                         }
1177                                         spin_unlock(&(connection->state_lock));
1178 copy:
1179                                         /*
1180                                         * Copy the command to L1 Buffer
1181                                         */
1182                                         dprintk(KERN_INFO
1183                                 "[pid=%d] tf_copy_command(%p): "
1184                                 "Write Message in the queue\n",
1185                                 current->pid, command);
1186                                         tf_dump_command(command);
1187
1188                                         for (i = 0; i < command_size; i++)
1189                                                 comm->l1_buffer->command_queue[
1190                                                 (first_free_command + i) %
1191                                                 TF_N_MESSAGE_QUEUE_CAPACITY] =
1192                                                 ((uint32_t *) command)[i];
1193
1194                                         *command_status =
1195                                                 TF_COMMAND_STATE_SENT;
1196                                         first_free_command += command_size;
1197
1198                                         tf_write_reg32(
1199                                                 &comm->
1200                                                 l1_buffer->first_free_command,
1201                                                 first_free_command);
1202                         }
1203                         spin_unlock(&comm->lock);
1204                 }
1205         }
1206 }
1207
1208 /*
1209  * Sends the specified message through the specified communication channel.
1210  *
1211  * This function sends the command and waits for the answer
1212  *
1213  * Returns zero upon successful completion, or an appropriate error code upon
1214  * failure.
1215  */
1216 static int tf_send_recv(struct tf_comm *comm,
1217         union tf_command *command,
1218         struct tf_answer_struct *answerStruct,
1219         struct tf_connection *connection,
1220         int bKillable
1221         )
1222 {
1223         int result;
1224         u64 timeout;
1225         signed long nRelativeTimeoutJiffies;
1226         bool wait_prepared = false;
1227         enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
1228         DEFINE_WAIT(wait);
1229 #ifdef CONFIG_FREEZER
1230         unsigned long saved_flags;
1231 #endif
1232         dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
1233                  current->pid, command);
1234
1235 #ifdef CONFIG_FREEZER
1236         saved_flags = current->flags;
1237         current->flags |= PF_KTHREAD;
1238 #endif
1239
1240         /*
1241          * Read all answers from the answer queue
1242          */
1243 copy_answers:
1244         tf_copy_answers(comm);
1245
1246         tf_copy_command(comm, command, connection, &command_status);
1247
1248         /*
1249          * Notify all waiting threads
1250          */
1251         wake_up(&(comm->wait_queue));
1252
1253 #ifdef CONFIG_FREEZER
1254         if (unlikely(freezing(current))) {
1255
1256                 dprintk(KERN_INFO
1257                         "Entering refrigerator.\n");
1258                 try_to_freeze();
1259                 dprintk(KERN_INFO
1260                         "Left refrigerator.\n");
1261                 goto copy_answers;
1262         }
1263 #endif
1264
1265 #ifndef CONFIG_PREEMPT
1266         if (need_resched())
1267                 schedule();
1268 #endif
1269
1270 #ifdef CONFIG_TF_ZEBRA
1271         /*
1272          * Handle RPC (if any)
1273          */
1274         if (tf_rpc_execute(comm) == RPC_NON_YIELD)
1275                 goto schedule_secure_world;
1276 #endif
1277
1278         /*
1279          * Join wait queue
1280          */
1281         /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
1282                 current->pid, command);*/
1283         prepare_to_wait(&comm->wait_queue, &wait,
1284                         bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1285         wait_prepared = true;
1286
1287         /*
1288          * Check if our answer is available
1289          */
1290         if (command_status == TF_COMMAND_STATE_ABORTED) {
1291                 /* Not waiting for an answer, return error code */
1292                 result = -EINTR;
1293                 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1294                         "Command status is ABORTED."
1295                         "Exit with 0x%x\n",
1296                         current->pid, result);
1297                 goto exit;
1298         }
1299         if (answerStruct->answer_copied) {
1300                 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1301                         "Received answer (type 0x%02X)\n",
1302                         current->pid,
1303                         answerStruct->answer->header.message_type);
1304                 result = 0;
1305                 goto exit;
1306         }
1307
1308         /*
1309          * Check if a signal is pending
1310          */
1311         if (bKillable && (sigkill_pending())) {
1312                 if (command_status == TF_COMMAND_STATE_PENDING)
1313                         /*Command was not sent. */
1314                         result = -EINTR;
1315                 else
1316                         /* Command was sent but no answer was received yet. */
1317                         result = -EIO;
1318
1319                 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1320                         "Signal Pending. Return error %d\n",
1321                         current->pid, result);
1322                 goto exit;
1323         }
1324
1325         /*
1326          * Check if secure world is schedulable. It is schedulable if at
1327          * least one of the following conditions holds:
1328          * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
1329          *   is not set);
1330          * + there is a command in the queue;
1331          * + the secure world timeout is zero.
1332          */
1333         if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1334                 u32 first_free_command;
1335                 u32 first_command;
1336                 spin_lock(&comm->lock);
1337                 first_command = tf_read_reg32(
1338                         &comm->l1_buffer->first_command);
1339                 first_free_command = tf_read_reg32(
1340                         &comm->l1_buffer->first_free_command);
1341                 spin_unlock(&comm->lock);
1342                 tf_read_timeout(comm, &timeout);
1343                 if ((first_free_command == first_command) &&
1344                          (tf_test_s_timeout(timeout,
1345                         &nRelativeTimeoutJiffies) == 0))
1346                         /*
1347                          * If command queue is empty and if timeout has not
1348                          * expired secure world is not schedulable
1349                          */
1350                         goto wait;
1351         }
1352
1353         finish_wait(&comm->wait_queue, &wait);
1354         wait_prepared = false;
1355
1356         /*
1357          * Yield to the Secure World
1358          */
1359 #ifdef CONFIG_TF_ZEBRA
1360 schedule_secure_world:
1361 #endif
1362
1363         result = tf_schedule_secure_world(comm);
1364         if (result < 0)
1365                 goto exit;
1366         goto copy_answers;
1367
1368 wait:
1369         if (bKillable && (sigkill_pending())) {
1370                 if (command_status == TF_COMMAND_STATE_PENDING)
1371                         result = -EINTR; /* Command was not sent. */
1372                 else
1373                         /* Command was sent but no answer was received yet. */
1374                         result = -EIO;
1375
1376                 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1377                         "Signal Pending while waiting. Return error %d\n",
1378                         current->pid, result);
1379                 goto exit;
1380         }
1381
1382         if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
1383                 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1384                         "prepare to sleep infinitely\n", current->pid);
1385         else
1386                 dprintk(KERN_INFO "tf_send_recv: "
1387                         "prepare to sleep 0x%lx jiffies\n",
1388                         nRelativeTimeoutJiffies);
1389
1390         /* go to sleep */
1391         if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
1392                 dprintk(KERN_INFO
1393                         "tf_send_recv: timeout expired\n");
1394         else
1395                 dprintk(KERN_INFO
1396                         "tf_send_recv: signal delivered\n");
1397
1398         finish_wait(&comm->wait_queue, &wait);
1399         wait_prepared = false;
1400         goto copy_answers;
1401
1402 exit:
1403         if (wait_prepared) {
1404                 finish_wait(&comm->wait_queue, &wait);
1405                 wait_prepared = false;
1406         }
1407
1408         return result;
1409 }
1410
1411 /*
1412  * Sends the specified message through the specified communication channel.
1413  *
1414  * This function sends the message and waits for the corresponding answer
1415  * It may return if a signal needs to be delivered.
1416  *
1417  * Returns zero upon successful completion, or an appropriate error code upon
1418  * failure.
1419  */
1420 int tf_send_receive(struct tf_comm *comm,
1421           union tf_command *command,
1422           union tf_answer *answer,
1423           struct tf_connection *connection,
1424           bool bKillable)
1425 {
1426         int error;
1427         struct tf_answer_struct answerStructure;
1428 #ifdef CONFIG_SMP
1429         long ret_affinity;
1430         cpumask_t saved_cpu_mask;
1431         cpumask_t local_cpu_mask = CPU_MASK_NONE;
1432 #endif
1433
1434         answerStructure.answer = answer;
1435         answerStructure.answer_copied = false;
1436
1437         if (command != NULL)
1438                 command->header.operation_id = (u32) &answerStructure;
1439
1440         dprintk(KERN_INFO "tf_send_receive\n");
1441
1442 #ifdef CONFIG_TF_ZEBRA
1443         if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1444                 dprintk(KERN_ERR "tf_send_receive(%p): "
1445                         "Secure world not started\n", comm);
1446
1447                 return -EFAULT;
1448         }
1449 #endif
1450
1451         if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
1452                 dprintk(KERN_DEBUG
1453                         "tf_send_receive: Flag Terminating is set\n");
1454                 return 0;
1455         }
1456
1457 #ifdef CONFIG_SMP
1458         cpu_set(0, local_cpu_mask);
1459         sched_getaffinity(0, &saved_cpu_mask);
1460         ret_affinity = sched_setaffinity(0, &local_cpu_mask);
1461         if (ret_affinity != 0)
1462                 dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
1463 #endif
1464
1465
1466         /*
1467          * Send the command
1468          */
1469         error = tf_send_recv(comm,
1470                 command, &answerStructure, connection, bKillable);
1471
1472         if (!bKillable && sigkill_pending()) {
1473                 if ((command->header.message_type ==
1474                         TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
1475                         (answer->create_device_context.error_code ==
1476                                 S_SUCCESS)) {
1477
1478                         /*
1479                          * CREATE_DEVICE_CONTEXT was interrupted.
1480                          */
1481                         dprintk(KERN_INFO "tf_send_receive: "
1482                                 "sending DESTROY_DEVICE_CONTEXT\n");
1483                         answerStructure.answer =  answer;
1484                         answerStructure.answer_copied = false;
1485
1486                         command->header.message_type =
1487                                 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1488                         command->header.message_size =
1489                                 (sizeof(struct
1490                                         tf_command_destroy_device_context) -
1491                                  sizeof(struct tf_command_header))/sizeof(u32);
1492                         command->header.operation_id =
1493                                 (u32) &answerStructure;
1494                         command->destroy_device_context.device_context =
1495                                 answer->create_device_context.
1496                                         device_context;
1497
1498                         goto destroy_context;
1499                 }
1500         }
1501
1502         if (error == 0) {
1503                 /*
1504                  * tf_send_recv returned Success.
1505                  */
1506                 if (command->header.message_type ==
1507                 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
1508                         spin_lock(&(connection->state_lock));
1509                         connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
1510                         spin_unlock(&(connection->state_lock));
1511                 } else if (command->header.message_type ==
1512                 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1513                         spin_lock(&(connection->state_lock));
1514                         connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1515                         spin_unlock(&(connection->state_lock));
1516                 }
1517         } else if (error  == -EINTR) {
1518                 /*
1519                 * No command was sent, return failure.
1520                 */
1521                 dprintk(KERN_ERR
1522                         "tf_send_receive: "
1523                         "tf_send_recv failed (error %d) !\n",
1524                         error);
1525         } else if (error  == -EIO) {
1526                 /*
1527                 * A command was sent but its answer is still pending.
1528                 */
1529
1530                 /* means bKillable is true */
1531                 dprintk(KERN_ERR
1532                         "tf_send_receive: "
1533                         "tf_send_recv interrupted (error %d)."
1534                         "Send DESTROY_DEVICE_CONTEXT.\n", error);
1535
1536                 /* Send the DESTROY_DEVICE_CONTEXT. */
1537                 answerStructure.answer =  answer;
1538                 answerStructure.answer_copied = false;
1539
1540                 command->header.message_type =
1541                         TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1542                 command->header.message_size =
1543                         (sizeof(struct tf_command_destroy_device_context) -
1544                                 sizeof(struct tf_command_header))/sizeof(u32);
1545                 command->header.operation_id =
1546                         (u32) &answerStructure;
1547                 command->destroy_device_context.device_context =
1548                         connection->device_context;
1549
1550                 error = tf_send_recv(comm,
1551                         command, &answerStructure, connection, false);
1552                 if (error == -EINTR) {
1553                         /*
1554                         * Another thread already sent
1555                         * DESTROY_DEVICE_CONTEXT.
1556                         * We must still wait for the answer
1557                         * to the original command.
1558                         */
1559                         command = NULL;
1560                         goto destroy_context;
1561                 } else {
1562                          /* An answer was received.
1563                          * Check if it is the answer
1564                          * to the DESTROY_DEVICE_CONTEXT.
1565                          */
1566                          spin_lock(&comm->lock);
1567                          if (answer->header.message_type !=
1568                          TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1569                                 answerStructure.answer_copied = false;
1570                          }
1571                          spin_unlock(&comm->lock);
1572                          if (!answerStructure.answer_copied) {
1573                                 /* Answer to DESTROY_DEVICE_CONTEXT
1574                                 * was not yet received.
1575                                 * Wait for the answer.
1576                                 */
1577                                 dprintk(KERN_INFO
1578                                         "[pid=%d] tf_send_receive:"
1579                                         "Answer to DESTROY_DEVICE_CONTEXT"
1580                                         "not yet received.Retry\n",
1581                                         current->pid);
1582                                 command = NULL;
1583                                 goto destroy_context;
1584                          }
1585                 }
1586         }
1587
1588         dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
1589         goto exit;
1590
1591 destroy_context:
1592         error = tf_send_recv(comm,
1593         command, &answerStructure, connection, false);
1594
1595         /*
1596          * tf_send_recv cannot return an error because
1597          * it's not killable and not within a connection
1598          */
1599         BUG_ON(error != 0);
1600
1601         /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
1602         spin_lock(&(connection->state_lock));
1603         connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1604         spin_unlock(&(connection->state_lock));
1605
1606 exit:
1607
1608 #ifdef CONFIG_SMP
1609         ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
1610         if (ret_affinity != 0)
1611                 dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
1612 #endif
1613         return error;
1614 }
1615
1616 /*----------------------------------------------------------------------------
1617  * Power management
1618  *----------------------------------------------------------------------------*/
1619
1620
1621 /*
1622  * Handles all the power management calls.
1623  * The operation is the type of power management
1624  * operation to be performed.
1625  *
1626  * This routine will only return if a failure occured or if
1627  * the required opwer management is of type "resume".
1628  * "Hibernate" and "Shutdown" should lock when doing the
1629  * corresponding SMC to the Secure World
1630  */
1631 int tf_power_management(struct tf_comm *comm,
1632         enum TF_POWER_OPERATION operation)
1633 {
1634         u32 status;
1635         int error = 0;
1636
1637         dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
1638
1639 #ifdef CONFIG_TF_ZEBRA
1640         if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1641                 dprintk(KERN_INFO "tf_power_management(%p): "
1642                         "succeeded (not started)\n", comm);
1643
1644                 return 0;
1645         }
1646 #endif
1647
1648         status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
1649                 & TF_STATUS_POWER_STATE_MASK)
1650                 >> TF_STATUS_POWER_STATE_SHIFT);
1651
1652         switch (operation) {
1653         case TF_POWER_OPERATION_SHUTDOWN:
1654                 switch (status) {
1655                 case TF_POWER_MODE_ACTIVE:
1656                         error = tf_pm_shutdown(comm);
1657
1658                         if (error) {
1659                                 dprintk(KERN_ERR "tf_power_management(): "
1660                                         "Failed with error code 0x%08x\n",
1661                                         error);
1662                                 goto error;
1663                         }
1664                         break;
1665
1666                 default:
1667                         goto not_allowed;
1668                 }
1669                 break;
1670
1671         case TF_POWER_OPERATION_HIBERNATE:
1672                 switch (status) {
1673                 case TF_POWER_MODE_ACTIVE:
1674                         error = tf_pm_hibernate(comm);
1675
1676                         if (error) {
1677                                 dprintk(KERN_ERR "tf_power_management(): "
1678                                         "Failed with error code 0x%08x\n",
1679                                         error);
1680                                 goto error;
1681                         }
1682                         break;
1683
1684                 default:
1685                         goto not_allowed;
1686                 }
1687                 break;
1688
1689         case TF_POWER_OPERATION_RESUME:
1690                 error = tf_pm_resume(comm);
1691
1692                 if (error != 0) {
1693                         dprintk(KERN_ERR "tf_power_management(): "
1694                                 "Failed with error code 0x%08x\n",
1695                                 error);
1696                         goto error;
1697                 }
1698                 break;
1699         }
1700
1701         dprintk(KERN_INFO "tf_power_management(): succeeded\n");
1702         return 0;
1703
1704 not_allowed:
1705         dprintk(KERN_ERR "tf_power_management(): "
1706                 "Power command not allowed in current "
1707                 "Secure World state %d\n", status);
1708         error = -ENOTTY;
1709 error:
1710         return error;
1711 }
1712