arm: tegra: add Trusted Foundations hooks and driver
Chris Johnson [Sat, 19 Nov 2011 00:14:07 +0000 (16:14 -0800)]
Add CONFIG_TRUSTED_FOUNDATIONS build option and calls to issue
SMCs to the TL secure monitor (used when needing to update state
not writable by non-secure code).

Make security/tf_driver an optional part of the build, which is
part of the TL framework to interact with secure services.

Bug 883391

Change-Id: I9c6c14ff457fb3a0c612d558fe731a17c2480750
Signed-off-by: Chris Johnson <cwj@nvidia.com>
Reviewed-on: http://git-master/r/65616
Reviewed-by: Varun Colbert <vcolbert@nvidia.com>
Tested-by: Varun Colbert <vcolbert@nvidia.com>

26 files changed:
arch/arm/mach-tegra/common.c
arch/arm/mach-tegra/headsmp.S
arch/arm/mach-tegra/pm.c
arch/arm/mach-tegra/pm.h
arch/arm/mach-tegra/reset.c
arch/arm/mach-tegra/reset.h
arch/arm/mach-tegra/sleep-t2.S
arch/arm/mach-tegra/sleep-t3.S
arch/arm/mach-tegra/sleep.S
arch/arm/mach-tegra/sleep.h
arch/arm/mm/proc-v7.S
security/Kconfig
security/Makefile
security/tf_driver/Kconfig [new file with mode: 0644]
security/tf_driver/Makefile [new file with mode: 0644]
security/tf_driver/s_version.h [new file with mode: 0644]
security/tf_driver/tf_comm.c [new file with mode: 0644]
security/tf_driver/tf_comm.h [new file with mode: 0644]
security/tf_driver/tf_comm_tz.c [new file with mode: 0644]
security/tf_driver/tf_conn.c [new file with mode: 0644]
security/tf_driver/tf_conn.h [new file with mode: 0644]
security/tf_driver/tf_defs.h [new file with mode: 0644]
security/tf_driver/tf_device.c [new file with mode: 0644]
security/tf_driver/tf_protocol.h [new file with mode: 0644]
security/tf_driver/tf_util.c [new file with mode: 0644]
security/tf_driver/tf_util.h [new file with mode: 0644]

index 7fcf131..642d0ae 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 #include <linux/bitops.h>
+#include <linux/sched.h>
 
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/system.h>
@@ -185,12 +186,95 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
        { NULL,         NULL,           0,              0},
 };
 
+#if defined(CONFIG_TRUSTED_FOUNDATIONS) && defined(CONFIG_CACHE_L2X0)
+static void tegra_cache_smc(bool enable, u32 arg)
+{
+       void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+       bool need_affinity_switch;
+       bool can_switch_affinity;
+       bool l2x0_enabled;
+       cpumask_t local_cpu_mask;
+       cpumask_t saved_cpu_mask;
+       unsigned long flags;
+       long ret;
+
+       /*
+        * ISSUE : Some registers of PL310 controler must be written
+        *              from Secure context (and from CPU0)!
+        *
+        * When called form Normal we obtain an abort or do nothing.
+        * Instructions that must be called in Secure:
+        *      - Write to Control register (L2X0_CTRL==0x100)
+        *      - Write in Auxiliary controler (L2X0_AUX_CTRL==0x104)
+        *      - Invalidate all entries (L2X0_INV_WAY==0x77C),
+        *              mandatory at boot time.
+        *      - Tag and Data RAM Latency Control Registers
+        *              (0x108 & 0x10C) must be written in Secure.
+        */
+       need_affinity_switch = (smp_processor_id() != 0);
+       can_switch_affinity = !irqs_disabled();
+
+       WARN_ON(need_affinity_switch && !can_switch_affinity);
+       if (need_affinity_switch && can_switch_affinity) {
+               cpu_set(0, local_cpu_mask);
+               sched_getaffinity(0, &saved_cpu_mask);
+               ret = sched_setaffinity(0, &local_cpu_mask);
+               WARN_ON(ret != 0);
+       }
+
+       local_irq_save(flags);
+       l2x0_enabled = readl_relaxed(p + L2X0_CTRL) & 1;
+       if (enable && !l2x0_enabled)
+               tegra_generic_smc(0xFFFFF100, 0x00000001, arg);
+       else if (!enable && l2x0_enabled)
+               tegra_generic_smc(0xFFFFF100, 0x00000002, arg);
+       local_irq_restore(flags);
+
+       if (need_affinity_switch && can_switch_affinity) {
+               ret = sched_setaffinity(0, &saved_cpu_mask);
+               WARN_ON(ret != 0);
+       }
+}
+
+static void tegra_l2x0_disable(void)
+{
+       unsigned long flags;
+       static u32 l2x0_way_mask;
+
+       if (!l2x0_way_mask) {
+               void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+               u32 aux_ctrl;
+               u32 ways;
+
+               aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+               ways = (aux_ctrl & (1 << 16)) ? 16 : 8;
+               l2x0_way_mask = (1 << ways) - 1;
+       }
+
+       local_irq_save(flags);
+       tegra_cache_smc(false, l2x0_way_mask);
+       local_irq_restore(flags);
+}
+#endif /* CONFIG_TRUSTED_FOUNDATIONS && defined(CONFIG_CACHE_L2X0) */
+
 void tegra_init_cache(bool init)
 {
 #ifdef CONFIG_CACHE_L2X0
        void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
        u32 aux_ctrl;
 
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+       /* issue the SMC to enable the L2 */
+       aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+       tegra_cache_smc(true, aux_ctrl);
+
+       /* after init, reread aux_ctrl and register handlers */
+       aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
+       l2x0_init(p, aux_ctrl, 0xFFFFFFFF);
+
+       /* override outer_disable() with our disable */
+       outer_cache.disable = tegra_l2x0_disable;
+#else
 #if defined(CONFIG_ARCH_TEGRA_2x_SOC)
        writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
        writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);
@@ -219,6 +303,8 @@ void tegra_init_cache(bool init)
                aux_ctrl |= 0x7C000001;
                l2x0_init(p, aux_ctrl, 0x8200c3fe);
        }
+       l2x0_enable();
+#endif
 #endif
 }
 
index faea025..4763528 100644 (file)
@@ -86,6 +86,11 @@ ENTRY(tegra_resume)
        orr     r1, r1, #1
        str     r1, [r0]
 
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+       /* wake up (should have specified args?) */
+       bl      tegra_generic_smc
+#endif
+
        b       tegra_cpu_resume_phys
 ENDPROC(tegra_resume)
 #endif
index 8028c78..ca3baad 100644 (file)
@@ -66,7 +66,6 @@
 #include "reset.h"
 #include "sleep.h"
 #include "timer.h"
-#include "reset.h"
 
 struct suspend_context {
        /*
@@ -525,6 +524,36 @@ bool tegra_set_cpu_in_lp2(int cpu)
        return last_cpu;
 }
 
+static void tegra_sleep_core(enum tegra_suspend_mode mode,
+                            unsigned long v2p)
+{
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+       if (mode == TEGRA_SUSPEND_LP0) {
+               tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
+                                 virt_to_phys(tegra_resume));
+       } else {
+               tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
+                                 (TEGRA_RESET_HANDLER_BASE +
+                                  tegra_cpu_reset_handler_offset));
+       }
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+       tegra2_sleep_core(v2p);
+#else
+       tegra3_sleep_core(v2p);
+#endif
+}
+
+static inline void tegra_sleep_cpu(unsigned long v2p)
+{
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+       tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE4,
+                         (TEGRA_RESET_HANDLER_BASE +
+                          tegra_cpu_reset_handler_offset));
+#endif
+       tegra_sleep_cpu_save(v2p);
+}
+
 unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
 {
        u32 mode;       /* hardware + software power mode flags */
@@ -569,7 +598,6 @@ unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
 
 #ifdef CONFIG_CACHE_L2X0
        tegra_init_cache(false);
-       l2x0_enable();
 #endif
        tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
        restore_cpu_complex(mode);
@@ -810,7 +838,7 @@ int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
        if (mode == TEGRA_SUSPEND_LP2)
                tegra_sleep_cpu(PLAT_PHYS_OFFSET - PAGE_OFFSET);
        else
-               tegra_sleep_core(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+               tegra_sleep_core(mode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
 
        tegra_init_cache(true);
 
index 33b56d5..32b504f 100644 (file)
@@ -194,6 +194,10 @@ extern bool tegra_all_cpus_booted __read_mostly;
 #define tegra_all_cpus_booted (true)
 #endif
 
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+void tegra_generic_smc(u32 type, u32 subtype, u32 arg);
+#endif
+
 /* The debug channel uart base physical address */
 extern unsigned long  debug_uart_port_base;
 
index 8c07651..c44a3de 100644 (file)
 
 #include "reset.h"
 #include "sleep.h"
+#include "pm.h"
 
 static bool is_enabled;
 
-void tegra_cpu_reset_handler_enable(void)
+static void tegra_cpu_reset_handler_enable(void)
 {
-       void __tegra_cpu_reset_handler(void);
-       void __tegra_cpu_reset_handler_start(void);
-       void __tegra_cpu_reset_handler_end(void);
+       void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_BASE);
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
        void __iomem *evp_cpu_reset =
                IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE + 0x100);
-       void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_BASE);
        void __iomem *sb_ctrl = IO_ADDRESS(TEGRA_SB_BASE);
-       unsigned long cpu_reset_handler_size =
-               __tegra_cpu_reset_handler_end - __tegra_cpu_reset_handler_start;
-       unsigned long cpu_reset_handler_offset =
-               __tegra_cpu_reset_handler - __tegra_cpu_reset_handler_start;
        unsigned long reg;
-
+#endif
        BUG_ON(is_enabled);
-       BUG_ON(cpu_reset_handler_size > TEGRA_RESET_HANDLER_SIZE);
+       BUG_ON(tegra_cpu_reset_handler_size > TEGRA_RESET_HANDLER_SIZE);
 
        memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start,
-              cpu_reset_handler_size);
+               tegra_cpu_reset_handler_size);
 
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+       tegra_generic_smc(0xFFFFF200,
+               TEGRA_RESET_HANDLER_BASE + tegra_cpu_reset_handler_offset, 0);
+#else
        /* NOTE: This must be the one and only write to the EVP CPU reset
                 vector in the entire system. */
-       writel(TEGRA_RESET_HANDLER_BASE + cpu_reset_handler_offset,
+       writel(TEGRA_RESET_HANDLER_BASE + tegra_cpu_reset_handler_offset,
                evp_cpu_reset);
        wmb();
        reg = readl(evp_cpu_reset);
@@ -62,8 +61,9 @@ void tegra_cpu_reset_handler_enable(void)
        reg = readl(sb_ctrl);
        reg |= 2;
        writel(reg, sb_ctrl);
-       is_enabled = true;
        wmb();
+#endif
+       is_enabled = true;
 }
 
 #ifdef CONFIG_PM_SLEEP
index 08a4480..2af17c3 100644 (file)
@@ -34,6 +34,8 @@
 extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
 
 void __tegra_cpu_reset_handler_start(void);
+void __tegra_cpu_reset_handler(void);
+void __tegra_cpu_reset_handler_end(void);
 void tegra_secondary_startup(void);
 
 #ifdef CONFIG_PM_SLEEP
@@ -45,12 +47,19 @@ void tegra_secondary_startup(void);
                ((u32)__tegra_cpu_reset_handler_data - \
                 (u32)__tegra_cpu_reset_handler_start))))
 
+#define tegra_cpu_reset_handler_offset \
+               ((u32)__tegra_cpu_reset_handler - \
+                (u32)__tegra_cpu_reset_handler_start)
+
+#define tegra_cpu_reset_handler_size \
+               (__tegra_cpu_reset_handler_end - \
+                __tegra_cpu_reset_handler_start)
+
 #define tegra_cpu_lp2_mask ((cpumask_t *)(IO_ADDRESS(TEGRA_RESET_HANDLER_BASE + \
                ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
                 (u32)__tegra_cpu_reset_handler_start))))
 #endif
 
-void tegra_cpu_reset_handler_enable(void);
 void __init tegra_cpu_reset_handler_init(void);
 
 #ifdef CONFIG_PM_SLEEP
index 298f0a1..d350a17 100644 (file)
@@ -307,13 +307,15 @@ ENTRY(tegra2_lp1_reset)
         * enable PLLP.
         */
        mov32   r0, TEGRA_CLK_RESET_BASE
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+       /* secure code handles 32KHz to CLKM/OSC clock switch */
        mov     r1, #(1 << 28)
        str     r1, [r0, #CLK_RESET_SCLK_BURST]
        str     r1, [r0, #CLK_RESET_CCLK_BURST]
        mov     r1, #0
        str     r1, [r0, #CLK_RESET_SCLK_DIVIDER]
        str     r1, [r0, #CLK_RESET_CCLK_DIVIDER]
-
+#endif
        ldr     r1, [r0, #CLK_RESET_PLLM_BASE]
        tst     r1, #(1 << 30)
        orreq   r1, r1, #(1 << 30)
index af5b6ff..b0960be 100644 (file)
@@ -289,14 +289,15 @@ ENTRY(tegra3_lp1_reset)
         * IRAM when this code is executed; immediately switch to CLKM and
         * enable PLLP, PLLM, PLLC, PLLA and PLLX. */
        mov32   r0, TEGRA_CLK_RESET_BASE
-
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+       /* secure code handles 32KHz to CLKM/OSC clock switch */
        mov     r1, #(1<<28)
        str     r1, [r0, #CLK_RESET_SCLK_BURST]
        str     r1, [r0, #CLK_RESET_CCLK_BURST]
        mov     r1, #0
        str     r1, [r0, #CLK_RESET_SCLK_DIVIDER]
        str     r1, [r0, #CLK_RESET_CCLK_DIVIDER]
-
+#endif
        /* enable PLLM via PMC */
        mov32   r2, TEGRA_PMC_BASE
        ldr     r1, [r2, #PMC_PLLP_WB0_OVERRIDE]
index e041f2d..5ae951f 100644 (file)
@@ -365,12 +365,12 @@ ENTRY(tegra_cpu_save)
 ENDPROC(tegra_cpu_save)
 
 /*
- * tegra_sleep_cpu(unsigned long v2p)
+ * tegra_sleep_cpu_save(unsigned long v2p)
  *
  * enters suspend in LP2 by turning off the mmu and jumping to
  * tegra?_tear_down_cpu
  */
-ENTRY(tegra_sleep_cpu)
+ENTRY(tegra_sleep_cpu_save)
        mov     r12, pc                 @ return here is via r12
        b       tegra_cpu_save
 
@@ -381,7 +381,7 @@ ENTRY(tegra_sleep_cpu)
 #endif
        add     r1, r1, r0
        b       tegra_turn_off_mmu
-ENDPROC(tegra_sleep_cpu)
+ENDPROC(tegra_sleep_cpu_save)
 
 /*
  * tegra_cpu_resume
@@ -461,3 +461,30 @@ ENTRY(tegra_cpu_pllp)
        mov     pc, lr
 ENDPROC(tegra_cpu_pllp)
 #endif
+
+#ifdef CONFIG_TRUSTED_FOUNDATIONS
+/*
+ * tegra_generic_smc
+ *
+ * r0 = smc type
+ * r1 = smc subtype
+ * r2 = argument passed to smc
+ *
+ * issues SMC (secure monitor call) instruction with
+ * the specified parameters.
+ */
+ENTRY(tegra_generic_smc)
+       adr     r3, __tegra_smc_stack
+       stmia   r3, {r4-r12, lr}
+       mov     r3, #0
+       mov     r4, #0
+       dsb
+       smc     #0
+       adr     r3, __tegra_smc_stack
+       ldmia   r3, {r4-r12, pc}
+ENDPROC(tegra_generic_smc)
+       .type   __tegra_smc_stack, %object
+__tegra_smc_stack:
+       .long   0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+       .size   __tegra_smc_stack, . - __tegra_smc_stack
+#endif
index cd982d3..7b8f84d 100644 (file)
 #else
 #define USE_TEGRA_CPU_SUSPEND  0
 #endif
-
-#define TEGRA_PL310_VIRT       (TEGRA_ARM_PL310_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
-
-/* FIXME: The core associated with this should be removed if our change to
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
+/* FIXME: The code associated with this should be removed if our change to
    save the diagnostic regsiter in the CPU context is accepted. */
 #define USE_TEGRA_DIAG_REG_SAVE        1
+#else
+#define USE_TEGRA_DIAG_REG_SAVE        0
+#endif
 
 #define TEGRA_POWER_SDRAM_SELFREFRESH  (1 << 26) /* SDRAM is in self-refresh */
 #define TEGRA_POWER_HOTPLUG_SHUTDOWN   (1 << 27) /* Hotplug shutdown */
@@ -85,6 +86,7 @@
 #define FLOW_CTRL_CSR_WFI_BITMAP       (0xF << 8)
 #endif
 
+#define TEGRA_PL310_VIRT (TEGRA_ARM_PL310_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
 #define TEGRA_FLOW_CTRL_VIRT (TEGRA_FLOW_CTRL_BASE - IO_PPSB_PHYS + IO_PPSB_VIRT)
 #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS + IO_CPU_VIRT)
 
        pop_stack_token \tmp1, \tmp2    @ debug stack debug token
 .endm
 
-#else
+#else  /* !defined(__ASSEMBLY__) */
 
 #define FLOW_CTRL_HALT_CPU(cpu)        (IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) +     \
        ((cpu) ? (FLOW_CTRL_HALT_CPU1_EVENTS + 8 * ((cpu) - 1)) :       \
@@ -202,6 +204,8 @@ static inline void flowctrl_writel(unsigned long val, void __iomem *addr)
 void tegra_pen_lock(void);
 void tegra_pen_unlock(void);
 void tegra_cpu_wfi(void);
+void tegra_sleep_cpu_save(unsigned long v2p);
+void tegra_resume(void);
 
 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
 extern void tegra2_iram_start;
@@ -238,19 +242,5 @@ static inline void *tegra_iram_end(void)
        return &tegra3_iram_end;
 #endif
 }
-
-static inline void tegra_sleep_core(unsigned long v2p)
-{
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
-       tegra2_sleep_core(v2p);
-#else
-       tegra3_sleep_core(v2p);
-#endif
-}
-
-void tegra_sleep_cpu(unsigned long v2p);
-void tegra_resume(void);
-
 #endif
-
 #endif
index d63d37f..e666e4f 100644 (file)
@@ -337,7 +337,9 @@ ENTRY(cpu_v7_do_resume)
        mcr     p15, 0, ip, c8, c7, 0   @ invalidate TLBs
        mcr     p15, 0, ip, c7, c5, 0   @ invalidate I cache
        ldmia   r0!, {r3 - r6}
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
        mcr     p15, 0, r3, c15, c0, 1  @ diag
+#endif
        mcr     p15, 0, r4, c13, c0, 0  @ FCSE/PID
        mcr     p15, 0, r5, c13, c0, 1  @ Context ID
        mcr     p15, 0, r6, c13, c0, 3  @ User r/o thread ID
@@ -512,6 +514,7 @@ __v7_setup:
 2:     ldr     r10, =0x00000c09                @ Cortex-A9 primary part number
        teq     r0, r10
        bne     3f
+#ifndef CONFIG_TRUSTED_FOUNDATIONS
        cmp     r6, #0x10                       @ power ctrl reg added r1p0
        mrcge   p15, 0, r10, c15, c0, 0         @ read power control register
        orrge   r10, r10, #1                    @ enable dynamic clock gating
@@ -559,6 +562,7 @@ __v7_setup:
        orrlt   r10, r10, #1 << 20              @ set bit #20
        mcrlt   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
+#endif
 
 3:     mov     r10, #0
 #ifdef HARVARD_CACHE
index e0f08b5..85923b6 100644 (file)
@@ -185,6 +185,7 @@ source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
 source security/apparmor/Kconfig
+source security/tf_driver/Kconfig
 
 source security/integrity/ima/Kconfig
 
index 8bb0fe9..9962092 100644 (file)
@@ -7,6 +7,7 @@ subdir-$(CONFIG_SECURITY_SELINUX)       += selinux
 subdir-$(CONFIG_SECURITY_SMACK)                += smack
 subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
 subdir-$(CONFIG_SECURITY_APPARMOR)     += apparmor
+subdir-$(CONFIG_TRUSTED_FOUNDATIONS)   += tf_driver
 
 # always enable default capabilities
 obj-y                                  += commoncap.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_AUDIT)                   += lsm_audit.o
 obj-$(CONFIG_SECURITY_TOMOYO)          += tomoyo/built-in.o
 obj-$(CONFIG_SECURITY_APPARMOR)                += apparmor/built-in.o
 obj-$(CONFIG_CGROUP_DEVICE)            += device_cgroup.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS)      += tf_driver/built-in.o
 
 # Object integrity file lists
 subdir-$(CONFIG_IMA)                   += integrity/ima
diff --git a/security/tf_driver/Kconfig b/security/tf_driver/Kconfig
new file mode 100644 (file)
index 0000000..2a980c5
--- /dev/null
@@ -0,0 +1,8 @@
+config TRUSTED_FOUNDATIONS
+       bool "Enable TF Driver"
+       default n
+        select CRYPTO_SHA1
+       help
+         This option adds kernel support for communication with the Trusted Foundations.
+         If you are unsure how to answer this question, answer N.
+
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
new file mode 100644 (file)
index 0000000..dfadb7d
--- /dev/null
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+# debug options
+#EXTRA_CFLAGS += -O0 -DDEBUG -D_DEBUG -DCONFIG_TF_DRIVER_DEBUG_SUPPORT
+EXTRA_CFLAGS += -DNDEBUG
+EXTRA_CFLAGS += -DLINUX -DCONFIG_TF_TRUSTZONE -DCONFIG_TFN
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+tf_driver-objs += tf_util.o
+tf_driver-objs += tf_conn.o
+tf_driver-objs += tf_device.o
+tf_driver-objs += tf_comm.o
+tf_driver-objs += tf_comm_tz.o
+
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
new file mode 100644 (file)
index 0000000..6244d3f
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __S_VERSION_H__
+#define __S_VERSION_H__
+
+/*
+ * Usage: define S_VERSION_BUILD on the compiler's command line.
+ *
+ * Then set:
+ * - S_VERSION_OS
+ * - S_VERSION_PLATFORM
+ * - S_VERSION_MAIN
+ * - S_VERSION_ENG is optional
+ * - S_VERSION_PATCH is optional
+ * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
+ */
+
+#define S_VERSION_OS "A"          /* "A" for all Android */
+#define S_VERSION_PLATFORM "B"    /* "B" for Tegra3 */
+
+/*
+ * This version number must be updated for each new release
+ */
+#define S_VERSION_MAIN  "01.03"
+
+/*
+* If this is a patch or engineering version use the following
+* defines to set the version number. Else set these values to 0.
+*/
+#define S_VERSION_ENG 0
+#define S_VERSION_PATCH 0
+
+#ifdef S_VERSION_BUILD
+/* TRICK: detect if S_VERSION is defined but empty */
+#if 0 == S_VERSION_BUILD-0
+#undef  S_VERSION_BUILD
+#define S_VERSION_BUILD 0
+#endif
+#else
+/* S_VERSION_BUILD is not defined */
+#define S_VERSION_BUILD 0
+#endif
+
+#define __STRINGIFY(X) #X
+#define __STRINGIFY2(X) __STRINGIFY(X)
+
+#if S_VERSION_ENG != 0
+#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG)
+#else
+#define _S_VERSION_ENG ""
+#endif
+
+#if S_VERSION_PATCH != 0
+#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH)
+#else
+#define _S_VERSION_PATCH ""
+#endif
+
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT "D   "
+#else
+#define S_VERSION_VARIANT "    "
+#endif
+
+#define S_VERSION_STRING \
+       "TFN" \
+       S_VERSION_OS \
+       S_VERSION_PLATFORM \
+       S_VERSION_MAIN \
+       _S_VERSION_ENG \
+       _S_VERSION_PATCH \
+       "."  __STRINGIFY2(S_VERSION_BUILD) " " \
+       S_VERSION_VARIANT
+
+#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644 (file)
index 0000000..8b12f29
--- /dev/null
@@ -0,0 +1,1745 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK           (1 << 2)
+#define DESCRIPTOR_C_MASK           (1 << 3)
+#define DESCRIPTOR_S_MASK           (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE         (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK    (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE              (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ       (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE           (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT   (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered:  TEX=0b000, C=0, B=0
+ * Shared Device:     TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+       ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+       ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+       ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+       ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x)      ((x) & (1 << 24))
+#define CACHE_DSIZE(x)  (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE  ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the sync_serial_n and time_n register
+ * sync_serial_n and time_n modifications are thread safe
+ */
+void tf_set_current_time(struct tf_comm *comm)
+{
+       u32 new_sync_serial;
+       struct timeval now;
+       u64 time64;
+
+       /*
+        * lock the structure while updating the L1 shared memory fields
+        */
+       spin_lock(&comm->lock);
+
+       /* read sync_serial_n and change the TimeSlot bit field */
+       new_sync_serial =
+               tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
+
+       do_gettimeofday(&now);
+       time64 = now.tv_sec;
+       time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+       /* Write the new time64 and nSyncSerial into shared memory */
+       tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
+               TF_SYNC_SERIAL_TIMESLOT_N], time64);
+       tf_write_reg32(&comm->l1_buffer->sync_serial_n,
+               new_sync_serial);
+
+       spin_unlock(&comm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
+{
+       u32 sync_serial_s_initial = 0;
+       u32 sync_serial_s_final = 1;
+       u64 time64;
+
+       spin_lock(&comm->lock);
+
+       while (sync_serial_s_initial != sync_serial_s_final) {
+               sync_serial_s_initial = tf_read_reg32(
+                       &comm->l1_buffer->sync_serial_s);
+               time64 = tf_read_reg64(
+                       &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
+
+               sync_serial_s_final = tf_read_reg32(
+                       &comm->l1_buffer->sync_serial_s);
+       }
+
+       spin_unlock(&comm->lock);
+
+       *time = time64;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+       if (signal_pending(current)) {
+               dprintk(KERN_INFO "A signal is pending\n");
+               if (sigismember(&current->pending.signal, SIGKILL)) {
+                       dprintk(KERN_INFO "A SIGKILL is pending\n");
+                       return true;
+               } else if (sigismember(
+                       &current->signal->shared_pending.signal, SIGKILL)) {
+                       dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+                       return true;
+               }
+       }
+       return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       u32 type)
+{
+       struct tf_coarse_page_table *coarse_pg_table = NULL;
+
+       spin_lock(&(alloc_context->lock));
+
+       if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
+               /*
+                * The free list can provide us a coarse page table
+                * descriptor
+                */
+               coarse_pg_table = list_first_entry(
+                               &alloc_context->free_coarse_page_tables,
+                               struct tf_coarse_page_table, list);
+               list_del(&(coarse_pg_table->list));
+
+               coarse_pg_table->parent->ref_count++;
+       } else {
+               /* no array of coarse page tables, create a new one */
+               struct tf_coarse_page_table_array *array;
+               void *page;
+               int i;
+
+               spin_unlock(&(alloc_context->lock));
+
+               /* first allocate a new page descriptor */
+               array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
+               if (array == NULL) {
+                       dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+                                       " failed to allocate a table array\n",
+                                       alloc_context);
+                       return NULL;
+               }
+
+               array->type = type;
+               INIT_LIST_HEAD(&(array->list));
+
+               /* now allocate the actual page the page descriptor describes */
+               page = (void *) internal_get_zeroed_page(GFP_KERNEL);
+               if (page == NULL) {
+                       dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+                                       " failed allocate a page\n",
+                                       alloc_context);
+                       internal_kfree(array);
+                       return NULL;
+               }
+
+               spin_lock(&(alloc_context->lock));
+
+               /* initialize the coarse page table descriptors */
+               for (i = 0; i < 4; i++) {
+                       INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
+                       array->coarse_page_tables[i].descriptors =
+                               page + (i * SIZE_1KB);
+                       array->coarse_page_tables[i].parent = array;
+
+                       if (i == 0) {
+                               /*
+                                * the first element is kept for the current
+                                * coarse page table allocation
+                                */
+                               coarse_pg_table =
+                                       &(array->coarse_page_tables[i]);
+                               array->ref_count++;
+                       } else {
+                               /*
+                                * The other elements are added to the free list
+                                */
+                               list_add(&(array->coarse_page_tables[i].list),
+                                       &(alloc_context->
+                                               free_coarse_page_tables));
+                       }
+               }
+
+               list_add(&(array->list),
+                       &(alloc_context->coarse_page_table_arrays));
+       }
+       spin_unlock(&(alloc_context->lock));
+
+       return coarse_pg_table;
+}
+
+
+void tf_free_coarse_page_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_coarse_page_table *coarse_pg_table,
+       int force)
+{
+       struct tf_coarse_page_table_array *array;
+
+       spin_lock(&(alloc_context->lock));
+
+       array = coarse_pg_table->parent;
+
+       (array->ref_count)--;
+
+       if (array->ref_count == 0) {
+               /*
+                * no coarse page table descriptor is used
+                * check if we should free the whole page
+                */
+
+               if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+                       && (force == 0))
+                       /*
+                        * This is a preallocated page,
+                        * add the page back to the free list
+                        */
+                       list_add(&(coarse_pg_table->list),
+                               &(alloc_context->free_coarse_page_tables));
+               else {
+                       /*
+                        * None of the page's coarse page table descriptors
+                        * are in use, free the whole page
+                        */
+                       int i;
+                       u32 *descriptors;
+
+                       /*
+                        * remove the page's associated coarse page table
+                        * descriptors from the free list
+                        */
+                       for (i = 0; i < 4; i++)
+                               if (&(array->coarse_page_tables[i]) !=
+                                               coarse_pg_table)
+                                       list_del(&(array->
+                                               coarse_page_tables[i].list));
+
+                       descriptors =
+                               array->coarse_page_tables[0].descriptors;
+                       array->coarse_page_tables[0].descriptors = NULL;
+
+                       /* remove the coarse page table from the array  */
+                       list_del(&(array->list));
+
+                       spin_unlock(&(alloc_context->lock));
+                       /*
+                        * Free the page.
+                        * The address of the page is contained in the first
+                        * element
+                        */
+                       internal_free_page((unsigned long) descriptors);
+                       /* finaly free the array */
+                       internal_kfree(array);
+
+                       spin_lock(&(alloc_context->lock));
+               }
+       } else {
+               /*
+                * Some coarse page table descriptors are in use.
+                * Add the descriptor to the free list
+                */
+               list_add(&(coarse_pg_table->list),
+                       &(alloc_context->free_coarse_page_tables));
+       }
+
+       spin_unlock(&(alloc_context->lock));
+}
+
+
+void tf_init_coarse_page_table_allocator(
+       struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+       spin_lock_init(&(alloc_context->lock));
+       INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
+       INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
+}
+
+void tf_release_coarse_page_table_allocator(
+       struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+       spin_lock(&(alloc_context->lock));
+
+       /* now clean up the list of page descriptors */
+       while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
+               struct tf_coarse_page_table_array *page_desc;
+               u32 *descriptors;
+
+               page_desc = list_first_entry(
+                       &alloc_context->coarse_page_table_arrays,
+                       struct tf_coarse_page_table_array, list);
+
+               descriptors = page_desc->coarse_page_tables[0].descriptors;
+               list_del(&(page_desc->list));
+
+               spin_unlock(&(alloc_context->lock));
+
+               if (descriptors != NULL)
+                       internal_free_page((unsigned long)descriptors);
+
+               internal_kfree(page_desc);
+
+               spin_lock(&(alloc_context->lock));
+       }
+
+       spin_unlock(&(alloc_context->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address coarse_pg_table_descriptors
+ */
+u32 tf_get_l1_coarse_descriptor(
+       u32 coarse_pg_table_descriptors[256])
+{
+       u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
+       unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+       descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
+               & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+       if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+               dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
+                       "V31-12 added to descriptor\n");
+               /* the 16k alignment restriction applies */
+               descriptor |= (DESCRIPTOR_V13_12_GET(
+                       (u32)coarse_pg_table_descriptors) <<
+                               L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+       }
+
+       return descriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep;
+       u32  *hwpte;
+       u32   tex = 0;
+       u32 descriptor = 0;
+
+       dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
+       pgd = pgd_offset(mm, vaddr);
+       dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+               (unsigned int) *pgd);
+       if (pgd_none(*pgd))
+               goto error;
+       pud = pud_offset(pgd, vaddr);
+       dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+               (unsigned int) *pud);
+       if (pud_none(*pud))
+               goto error;
+       pmd = pmd_offset(pud, vaddr);
+       dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+               (unsigned int) *pmd);
+       if (pmd_none(*pmd))
+               goto error;
+
+       if (PMD_TYPE_SECT&(*pmd)) {
+               /* We have a section */
+               dprintk_desc(KERN_INFO "Section descr=%x\n",
+                       (unsigned int)*pmd);
+               if ((*pmd) & PMD_SECT_BUFFERABLE)
+                       descriptor |= DESCRIPTOR_B_MASK;
+               if ((*pmd) & PMD_SECT_CACHEABLE)
+                       descriptor |= DESCRIPTOR_C_MASK;
+               if ((*pmd) & PMD_SECT_S)
+                       descriptor |= DESCRIPTOR_S_MASK;
+               tex = ((*pmd) >> 12) & 7;
+       } else {
+               /* We have a table */
+               ptep = pte_offset_map(pmd, vaddr);
+               if (pte_present(*ptep)) {
+                       dprintk_desc(KERN_INFO "L2 descr=%x\n",
+                               (unsigned int) *ptep);
+                       if ((*ptep) & L_PTE_MT_BUFFERABLE)
+                               descriptor |= DESCRIPTOR_B_MASK;
+                       if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+                               descriptor |= DESCRIPTOR_C_MASK;
+                       if ((*ptep) & L_PTE_MT_DEV_SHARED)
+                               descriptor |= DESCRIPTOR_S_MASK;
+
+                       /*
+                        * Linux's pte doesn't keep track of TEX value.
+                        * Have to jump to hwpte see include/asm/pgtable.h
+                        * (-2k before 2.6.38, then +2k)
+                        */
+#ifdef PTE_HWTABLE_SIZE
+                       hwpte = (u32 *) (ptep+PTE_HWTABLE_PTRS);
+#else
+                       hwpte = (u32 *) (ptep-PTRS_PER_PTE);
+#endif
+                       if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+                                       ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+                               goto error;
+                       dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+                       tex = ((*hwpte) >> 6) & 7;
+                       pte_unmap(ptep);
+               } else {
+                       pte_unmap(ptep);
+                       goto error;
+               }
+       }
+
+       descriptor |= (tex << 6);
+
+       return descriptor;
+
+error:
+       dprintk(KERN_ERR "Error occured in %s\n", __func__);
+       return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
+{
+       return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+static void tf_get_l2_page_descriptor(
+       u32 *l2_page_descriptor,
+       u32 flags, struct mm_struct *mm)
+{
+       unsigned long page_vaddr;
+       u32 descriptor;
+       struct page *page;
+       bool unmap_page = false;
+
+#if 0
+       dprintk(KERN_INFO
+               "tf_get_l2_page_descriptor():"
+               "*l2_page_descriptor=%x\n",
+               *l2_page_descriptor);
+#endif
+
+       if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
+               return;
+
+       page = (struct page *) (*l2_page_descriptor);
+
+       page_vaddr = (unsigned long) page_address(page);
+       if (page_vaddr == 0) {
+               dprintk(KERN_INFO "page_address returned 0\n");
+               /* Should we use kmap_atomic(page, KM_USER0) instead ? */
+               page_vaddr = (unsigned long) kmap(page);
+               if (page_vaddr == 0) {
+                       *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+                       dprintk(KERN_ERR "kmap returned 0\n");
+                       return;
+               }
+               unmap_page = true;
+       }
+
+       descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
+       if (descriptor == 0) {
+               *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+               return;
+       }
+       descriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+       descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
+
+       if (!(flags & TF_SHMEM_TYPE_WRITE))
+               /* only read access */
+               descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+       else
+               /* read and write access */
+               descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+       if (unmap_page)
+               kunmap(page);
+
+       *l2_page_descriptor = descriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void tf_cleanup_shared_memory(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_shmem_desc *shmem_desc,
+       u32 full_cleanup)
+{
+       u32 coarse_page_index;
+
+       dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
+                       shmem_desc);
+
+#ifdef DEBUG_COARSE_TABLES
+       printk(KERN_DEBUG "tf_cleanup_shared_memory "
+               "- number of coarse page tables=%d\n",
+               shmem_desc->coarse_pg_table_count);
+
+       for (coarse_page_index = 0;
+            coarse_page_index < shmem_desc->coarse_pg_table_count;
+            coarse_page_index++) {
+               u32 j;
+
+               printk(KERN_DEBUG "  Descriptor=%p address=%p index=%d\n",
+                       shmem_desc->coarse_pg_table[coarse_page_index],
+                       shmem_desc->coarse_pg_table[coarse_page_index]->
+                               descriptors,
+                       coarse_page_index);
+               if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
+                       for (j = 0;
+                            j < TF_DESCRIPTOR_TABLE_CAPACITY;
+                            j += 8) {
+                               int k;
+                               printk(KERN_DEBUG "    ");
+                               for (k = j; k < j + 8; k++)
+                                       printk(KERN_DEBUG "%p ",
+                                               shmem_desc->coarse_pg_table[
+                                                       coarse_page_index]->
+                                                               descriptors);
+                               printk(KERN_DEBUG "\n");
+                       }
+               }
+       }
+       printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
+#endif
+
+       /* Parse the coarse page descriptors */
+       for (coarse_page_index = 0;
+            coarse_page_index < shmem_desc->coarse_pg_table_count;
+            coarse_page_index++) {
+               u32 j;
+               u32 found = 0;
+
+               /* parse the page descriptors of the coarse page */
+               for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
+                       u32 l2_page_descriptor = (u32) (shmem_desc->
+                               coarse_pg_table[coarse_page_index]->
+                                       descriptors[j]);
+
+                       if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
+                               struct page *page =
+                                       tf_l2_page_descriptor_to_page(
+                                               l2_page_descriptor);
+
+                               if (!PageReserved(page))
+                                       SetPageDirty(page);
+                               internal_page_cache_release(page);
+
+                               found = 1;
+                       } else if (found == 1) {
+                               break;
+                       }
+               }
+
+               /*
+                * Only free the coarse pages of descriptors not preallocated
+                */
+               if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+                       (full_cleanup != 0))
+                       tf_free_coarse_page_table(alloc_context,
+                               shmem_desc->coarse_pg_table[coarse_page_index],
+                               0);
+       }
+
+       shmem_desc->coarse_pg_table_count = 0;
+       dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
+                       shmem_desc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it.
+ * Locks down the physical memory pages.
+ * Verifies the memory attributes depending on flags.
+ */
+int tf_fill_descriptor_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_shmem_desc *shmem_desc,
+       u32 buffer,
+       struct vm_area_struct **vmas,
+       u32 descriptors[TF_MAX_COARSE_PAGES],
+       u32 buffer_size,
+       u32 *buffer_start_offset,
+       bool in_user_space,
+       u32 flags,
+       u32 *descriptor_count)
+{
+       u32 coarse_page_index;
+       u32 coarse_page_count;
+       u32 page_count;
+       u32 page_shift = 0;
+       int ret = 0;
+       unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+       dprintk(KERN_INFO "tf_fill_descriptor_table"
+               "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
+               "flags = 0x%08x)\n",
+               shmem_desc,
+               buffer,
+               buffer_size,
+               in_user_space,
+               flags);
+
+       /*
+        * Compute the number of pages
+        * Compute the number of coarse pages
+        * Compute the page offset
+        */
+       page_count = ((buffer & ~PAGE_MASK) +
+               buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
+
+       /* check whether the 16k alignment restriction applies */
+       if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+               /*
+                * The 16k alignment restriction applies.
+                * Shift data to get them 16k aligned
+                */
+               page_shift = DESCRIPTOR_V13_12_GET(buffer);
+       page_count += page_shift;
+
+
+       /*
+        * Check the number of pages fit in the coarse pages
+        */
+       if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
+                       TF_MAX_COARSE_PAGES)) {
+               dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
+                       "%u pages required to map shared memory!\n",
+                       shmem_desc, page_count);
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* coarse page describe 256 pages */
+       coarse_page_count = ((page_count +
+               TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+                       TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+       /*
+        * Compute the buffer offset
+        */
+       *buffer_start_offset = (buffer & ~PAGE_MASK) |
+               (page_shift << PAGE_SHIFT);
+
+       /* map each coarse page */
+       for (coarse_page_index = 0;
+            coarse_page_index < coarse_page_count;
+            coarse_page_index++) {
+               u32 j;
+               struct tf_coarse_page_table *coarse_pg_table;
+
+               /* compute a virtual address with appropriate offset */
+               u32 buffer_offset_vaddr = buffer +
+                       (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
+               u32 pages_to_get;
+
+               /*
+                * Compute the number of pages left for this coarse page.
+                * Decrement page_count each time
+                */
+               pages_to_get = (page_count >>
+                       TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+                               TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
+               page_count -= pages_to_get;
+
+               /*
+                * Check if the coarse page has already been allocated
+                * If not, do it now
+                */
+               if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
+                       || (shmem_desc->type ==
+                               TF_SHMEM_TYPE_PM_HIBERNATE)) {
+                       coarse_pg_table = tf_alloc_coarse_page_table(
+                               alloc_context,
+                               TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+                       if (coarse_pg_table == NULL) {
+                               dprintk(KERN_ERR
+                                       "tf_fill_descriptor_table(%p): "
+                                       "tf_alloc_coarse_page_table "
+                                       "failed for coarse page %d\n",
+                                       shmem_desc, coarse_page_index);
+                               ret = -ENOMEM;
+                               goto error;
+                       }
+
+                       shmem_desc->coarse_pg_table[coarse_page_index] =
+                               coarse_pg_table;
+               } else {
+                       coarse_pg_table =
+                               shmem_desc->coarse_pg_table[coarse_page_index];
+               }
+
+               /*
+                * The page is not necessarily filled with zeroes.
+                * Set the fault descriptors ( each descriptor is 4 bytes long)
+                */
+               memset(coarse_pg_table->descriptors, 0x00,
+                       TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+               if (in_user_space) {
+                       int pages;
+
+                       /*
+                        * TRICK: use pCoarsePageDescriptor->descriptors to
+                        * hold the (struct page*) items before getting their
+                        * physical address
+                        */
+                       down_read(&(current->mm->mmap_sem));
+                       pages = internal_get_user_pages(
+                               current,
+                               current->mm,
+                               buffer_offset_vaddr,
+                               /*
+                                * page_shift is cleared after retrieving first
+                                * coarse page
+                                */
+                               (pages_to_get - page_shift),
+                               (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
+                               0,
+                               (struct page **) (coarse_pg_table->descriptors
+                                       + page_shift),
+                               vmas);
+                       up_read(&(current->mm->mmap_sem));
+
+                       if ((pages <= 0) ||
+                               (pages != (pages_to_get - page_shift))) {
+                               dprintk(KERN_ERR "tf_fill_descriptor_table:"
+                                       " get_user_pages got %d pages while "
+                                       "trying to get %d pages!\n",
+                                       pages, pages_to_get - page_shift);
+                               ret = -EFAULT;
+                               goto error;
+                       }
+
+                       for (j = page_shift;
+                                 j < page_shift + pages;
+                                 j++) {
+                               /* Get the actual L2 descriptors */
+                               tf_get_l2_page_descriptor(
+                                       &coarse_pg_table->descriptors[j],
+                                       flags,
+                                       current->mm);
+                               /*
+                                * Reject Strongly-Ordered or Device Memory
+                                */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+       ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+        (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+        (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+                               if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+                                       coarse_pg_table->
+                                               descriptors[j])) {
+                                       dprintk(KERN_ERR
+                                               "tf_fill_descriptor_table:"
+                                               " descriptor 0x%08X use "
+                                               "strongly-ordered or device "
+                                               "memory. Rejecting!\n",
+                                               coarse_pg_table->
+                                                       descriptors[j]);
+                                       ret = -EFAULT;
+                                       goto error;
+                               }
+                       }
+               } else if (is_vmalloc_addr((void *)buffer_offset_vaddr)) {
+                       /* Kernel-space memory obtained through vmalloc */
+                       dprintk(KERN_INFO
+                               "tf_fill_descriptor_table: "
+                               "vmalloc'ed buffer starting at %p\n",
+                              (void *)buffer_offset_vaddr);
+                       for (j = page_shift; j < pages_to_get; j++) {
+                               struct page *page;
+                               void *addr =
+                                       (void *)(buffer_offset_vaddr +
+                                               (j - page_shift) * PAGE_SIZE);
+                               page = vmalloc_to_page(addr);
+                               if (page == NULL) {
+                                       dprintk(KERN_ERR
+                                               "tf_fill_descriptor_table: "
+                                               "cannot map %p (vmalloc) "
+                                               "to page\n",
+                                               addr);
+                                       ret = -EFAULT;
+                                       goto error;
+                               }
+                               coarse_pg_table->descriptors[j] = (u32)page;
+                               get_page(page);
+
+                               /* change coarse page "page address" */
+                               tf_get_l2_page_descriptor(
+                                       &coarse_pg_table->descriptors[j],
+                                       flags,
+                                       &init_mm);
+                       }
+               } else {
+                       /* Kernel-space memory given by a virtual address */
+                       dprintk(KERN_INFO
+                               "tf_fill_descriptor_table: "
+                               "buffer starting at virtual address %p\n",
+                              (void *)buffer_offset_vaddr);
+                       for (j = page_shift; j < pages_to_get; j++) {
+                               struct page *page;
+                               void *addr =
+                                       (void *)(buffer_offset_vaddr +
+                                               (j - page_shift) * PAGE_SIZE);
+                               page = virt_to_page(addr);
+                               if (page == NULL) {
+                                       dprintk(KERN_ERR
+                                               "tf_fill_descriptor_table: "
+                                               "cannot map %p (virtual) "
+                                               "to page\n",
+                                               addr);
+                                       ret = -EFAULT;
+                                       goto error;
+                               }
+                               coarse_pg_table->descriptors[j] = (u32)page;
+                               get_page(page);
+
+                               /* change coarse page "page address" */
+                               tf_get_l2_page_descriptor(
+                                       &coarse_pg_table->descriptors[j],
+                                       flags,
+                                       &init_mm);
+                       }
+               }
+
+               dmac_flush_range((void *)coarse_pg_table->descriptors,
+                  (void *)(((u32)(coarse_pg_table->descriptors)) +
+                  TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+               outer_clean_range(
+                       __pa(coarse_pg_table->descriptors),
+                       __pa(coarse_pg_table->descriptors) +
+                       TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+               wmb();
+
+               /* Update the coarse page table address */
+               descriptors[coarse_page_index] =
+                       tf_get_l1_coarse_descriptor(
+                               coarse_pg_table->descriptors);
+
+               /*
+                * The next coarse page has no page shift, reset the
+                * page_shift
+                */
+               page_shift = 0;
+       }
+
+       *descriptor_count = coarse_page_count;
+       shmem_desc->coarse_pg_table_count = coarse_page_count;
+
+#ifdef DEBUG_COARSE_TABLES
+       printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
+               "numberOfCoarsePages=%d\n", buffer_size,
+               shmem_desc->coarse_pg_table_count);
+       for (coarse_page_index = 0;
+            coarse_page_index < shmem_desc->coarse_pg_table_count;
+            coarse_page_index++) {
+               u32 j;
+               struct tf_coarse_page_table *coarse_page_table =
+                       shmem_desc->coarse_pg_table[coarse_page_index];
+
+               printk(KERN_DEBUG "  Descriptor=%p address=%p index=%d\n",
+                       coarse_page_table,
+                       coarse_page_table->descriptors,
+                       coarse_page_index);
+               for (j = 0;
+                    j < TF_DESCRIPTOR_TABLE_CAPACITY;
+                    j += 8) {
+                       int k;
+                       printk(KERN_DEBUG "    ");
+                       for (k = j; k < j + 8; k++)
+                               printk(KERN_DEBUG "0x%08X ",
+                                       coarse_page_table->descriptors[k]);
+                       printk(KERN_DEBUG "\n");
+               }
+       }
+       printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
+#endif
+
+       return 0;
+
+error:
+       tf_cleanup_shared_memory(
+                       alloc_context,
+                       shmem_desc,
+                       0);
+
+       return ret;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *tf_get_description(struct tf_comm *comm)
+{
+       if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+               return comm->l1_buffer->version_description;
+
+       return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by relative_timeout_jiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int tf_test_s_timeout(
+               u64 timeout,
+               signed long *relative_timeout_jiffies)
+{
+       struct timeval now;
+       u64 time64;
+
+       *relative_timeout_jiffies = 0;
+
+       /* immediate timeout */
+       if (timeout == TIME_IMMEDIATE)
+               return 1;
+
+       /* infinite timeout */
+       if (timeout == TIME_INFINITE) {
+               dprintk(KERN_DEBUG "tf_test_s_timeout: "
+                       "timeout is infinite\n");
+               *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+               return 0;
+       }
+
+       do_gettimeofday(&now);
+       time64 = now.tv_sec;
+       /* will not overflow as operations are done on 64bit values */
+       time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+       /* timeout expired */
+       if (time64 >= timeout) {
+               dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
+               return 1;
+       }
+
+       /*
+        * finite timeout, compute relative_timeout_jiffies
+        */
+       /* will not overflow as time64 < timeout */
+       timeout -= time64;
+
+       /* guarantee *relative_timeout_jiffies is a valid timeout */
+       if ((timeout >> 32) != 0)
+               *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
+       else
+               *relative_timeout_jiffies =
+                       msecs_to_jiffies((unsigned int) timeout);
+
+       dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
+               *relative_timeout_jiffies);
+       return 0;
+}
+
+static void tf_copy_answers(struct tf_comm *comm)
+{
+       u32 first_answer;
+       u32 first_free_answer;
+       struct tf_answer_struct *answerStructureTemp;
+
+       if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+               spin_lock(&comm->lock);
+               first_free_answer = tf_read_reg32(
+                       &comm->l1_buffer->first_free_answer);
+               first_answer = tf_read_reg32(
+                       &comm->l1_buffer->first_answer);
+
+               while (first_answer != first_free_answer) {
+                       /* answer queue not empty */
+                       union tf_answer sComAnswer;
+                       struct tf_answer_header  header;
+
+                       /*
+                        * the size of the command in words of 32bit, not in
+                        * bytes
+                        */
+                       u32 command_size;
+                       u32 i;
+                       u32 *temp = (uint32_t *) &header;
+
+                       dprintk(KERN_INFO
+                               "[pid=%d] tf_copy_answers(%p): "
+                               "Read answers from L1\n",
+                               current->pid, comm);
+
+                       /* Read the answer header */
+                       for (i = 0;
+                            i < sizeof(struct tf_answer_header)/sizeof(u32);
+                              i++)
+                               temp[i] = comm->l1_buffer->answer_queue[
+                                       (first_answer + i) %
+                                               TF_S_ANSWER_QUEUE_CAPACITY];
+
+                       /* Read the answer from the L1_Buffer*/
+                       command_size = header.message_size +
+                               sizeof(struct tf_answer_header)/sizeof(u32);
+                       temp = (uint32_t *) &sComAnswer;
+                       for (i = 0; i < command_size; i++)
+                               temp[i] = comm->l1_buffer->answer_queue[
+                                       (first_answer + i) %
+                                               TF_S_ANSWER_QUEUE_CAPACITY];
+
+                       answerStructureTemp = (struct tf_answer_struct *)
+                               sComAnswer.header.operation_id;
+
+                       tf_dump_answer(&sComAnswer);
+
+                       memcpy(answerStructureTemp->answer, &sComAnswer,
+                               command_size * sizeof(u32));
+                       answerStructureTemp->answer_copied = true;
+
+                       first_answer += command_size;
+                       tf_write_reg32(&comm->l1_buffer->first_answer,
+                               first_answer);
+               }
+               spin_unlock(&(comm->lock));
+       }
+}
+
+static void tf_copy_command(
+       struct tf_comm *comm,
+       union tf_command *command,
+       struct tf_connection *connection,
+       enum TF_COMMAND_STATE *command_status)
+{
+       if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+               && (command != NULL)) {
+               /*
+                * Write the message in the message queue.
+                */
+
+               if (*command_status == TF_COMMAND_STATE_PENDING) {
+                       u32 command_size;
+                       u32 queue_words_count;
+                       u32 i;
+                       u32 first_free_command;
+                       u32 first_command;
+
+                       spin_lock(&comm->lock);
+
+                       first_command = tf_read_reg32(
+                               &comm->l1_buffer->first_command);
+                       first_free_command = tf_read_reg32(
+                               &comm->l1_buffer->first_free_command);
+
+                       queue_words_count = first_free_command - first_command;
+                       command_size     = command->header.message_size +
+                               sizeof(struct tf_command_header)/sizeof(u32);
+                       if ((queue_words_count + command_size) <
+                               TF_N_MESSAGE_QUEUE_CAPACITY) {
+                               /*
+                               * Command queue is not full.
+                               * If the Command queue is full,
+                               * the command will be copied at
+                               * another iteration
+                               * of the current function.
+                               */
+
+                               /*
+                               * Change the conn state
+                               */
+                               if (connection == NULL)
+                                       goto copy;
+
+                               spin_lock(&(connection->state_lock));
+
+                               if ((connection->state ==
+                               TF_CONN_STATE_NO_DEVICE_CONTEXT)
+                               &&
+                               (command->header.message_type ==
+                               TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+                                       dprintk(KERN_INFO
+                               "tf_copy_command(%p):"
+                               "Conn state is DEVICE_CONTEXT_SENT\n",
+                                connection);
+                                       connection->state =
+                       TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+                               } else if ((connection->state !=
+                               TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+                               &&
+                               (command->header.message_type !=
+                               TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+                                       /* The connection
+                                       * is no longer valid.
+                                       * We may not send any command on it,
+                                       * not even another
+                                       * DESTROY_DEVICE_CONTEXT.
+                                       */
+                                       dprintk(KERN_INFO
+                                               "[pid=%d] tf_copy_command(%p): "
+                                               "Connection no longer valid."
+                                               "ABORT\n",
+                                               current->pid, connection);
+                                       *command_status =
+                                               TF_COMMAND_STATE_ABORTED;
+                                       spin_unlock(
+                                               &(connection->state_lock));
+                                       spin_unlock(
+                                               &comm->lock);
+                                       return;
+                               } else if (
+                                       (command->header.message_type ==
+                               TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+                               (connection->state ==
+                               TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+                                               ) {
+                                       dprintk(KERN_INFO
+                                       "[pid=%d] tf_copy_command(%p): "
+                                       "Conn state is "
+                                       "DESTROY_DEVICE_CONTEXT_SENT\n",
+                                       current->pid, connection);
+                                       connection->state =
+                       TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+                                       }
+                                       spin_unlock(&(connection->state_lock));
+copy:
+                                       /*
+                                       * Copy the command to L1 Buffer
+                                       */
+                                       dprintk(KERN_INFO
+                               "[pid=%d] tf_copy_command(%p): "
+                               "Write Message in the queue\n",
+                               current->pid, command);
+                                       tf_dump_command(command);
+
+                                       for (i = 0; i < command_size; i++)
+                                               comm->l1_buffer->command_queue[
+                                               (first_free_command + i) %
+                                               TF_N_MESSAGE_QUEUE_CAPACITY] =
+                                               ((uint32_t *) command)[i];
+
+                                       *command_status =
+                                               TF_COMMAND_STATE_SENT;
+                                       first_free_command += command_size;
+
+                                       tf_write_reg32(
+                                               &comm->
+                                               l1_buffer->first_free_command,
+                                               first_free_command);
+                       }
+                       spin_unlock(&comm->lock);
+               }
+       }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct tf_comm *comm,
+       union tf_command *command,
+       struct tf_answer_struct *answerStruct,
+       struct tf_connection *connection,
+       int bKillable
+       )
+{
+       int result;
+       u64 timeout;
+       signed long nRelativeTimeoutJiffies;
+       bool wait_prepared = false;
+       enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
+       DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+       unsigned long saved_flags;
+#endif
+       dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+                current->pid, command);
+
+#ifdef CONFIG_TF_ZEBRA
+       tf_clock_timer_start();
+#endif
+
+#ifdef CONFIG_FREEZER
+       saved_flags = current->flags;
+       current->flags |= PF_FREEZER_NOSIG;
+#endif
+
+       /*
+        * Read all answers from the answer queue
+        */
+copy_answers:
+       tf_copy_answers(comm);
+
+       tf_copy_command(comm, command, connection, &command_status);
+
+       /*
+        * Notify all waiting threads
+        */
+       wake_up(&(comm->wait_queue));
+
+#ifdef CONFIG_FREEZER
+       if (unlikely(freezing(current))) {
+
+               dprintk(KERN_INFO
+                       "Entering refrigerator.\n");
+               refrigerator();
+               dprintk(KERN_INFO
+                       "Left refrigerator.\n");
+               goto copy_answers;
+       }
+#endif
+
+#ifndef CONFIG_PREEMPT
+       if (need_resched())
+               schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+       /*
+        * Handle RPC (if any)
+        */
+       if (tf_rpc_execute(comm) == RPC_NON_YIELD)
+               goto schedule_secure_world;
+#endif
+
+       /*
+        * Join wait queue
+        */
+       /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+               current->pid, command);*/
+       prepare_to_wait(&comm->wait_queue, &wait,
+                       bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+       wait_prepared = true;
+
+       /*
+        * Check if our answer is available
+        */
+       if (command_status == TF_COMMAND_STATE_ABORTED) {
+               /* Not waiting for an answer, return error code */
+               result = -EINTR;
+               dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+                       "Command status is ABORTED."
+                       "Exit with 0x%x\n",
+                       current->pid, result);
+               goto exit;
+       }
+       if (answerStruct->answer_copied) {
+               dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+                       "Received answer (type 0x%02X)\n",
+                       current->pid,
+                       answerStruct->answer->header.message_type);
+               result = 0;
+               goto exit;
+       }
+
+       /*
+        * Check if a signal is pending
+        */
+       if (bKillable && (sigkill_pending())) {
+               if (command_status == TF_COMMAND_STATE_PENDING)
+                       /*Command was not sent. */
+                       result = -EINTR;
+               else
+                       /* Command was sent but no answer was received yet. */
+                       result = -EIO;
+
+               dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+                       "Signal Pending. Return error %d\n",
+                       current->pid, result);
+               goto exit;
+       }
+
+       /*
+        * Check if secure world is schedulable. It is schedulable if at
+        * least one of the following conditions holds:
+        * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
+        *   is not set);
+        * + there is a command in the queue;
+        * + the secure world timeout is zero.
+        */
+       if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+               u32 first_free_command;
+               u32 first_command;
+               spin_lock(&comm->lock);
+               first_command = tf_read_reg32(
+                       &comm->l1_buffer->first_command);
+               first_free_command = tf_read_reg32(
+                       &comm->l1_buffer->first_free_command);
+               spin_unlock(&comm->lock);
+               tf_read_timeout(comm, &timeout);
+               if ((first_free_command == first_command) &&
+                        (tf_test_s_timeout(timeout,
+                       &nRelativeTimeoutJiffies) == 0))
+                       /*
+                        * If command queue is empty and if timeout has not
+                        * expired secure world is not schedulable
+                        */
+                       goto wait;
+       }
+
+       finish_wait(&comm->wait_queue, &wait);
+       wait_prepared = false;
+
+       /*
+        * Yield to the Secure World
+        */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+#endif
+
+       result = tf_schedule_secure_world(comm);
+       if (result < 0)
+               goto exit;
+       goto copy_answers;
+
+wait:
+       if (bKillable && (sigkill_pending())) {
+               if (command_status == TF_COMMAND_STATE_PENDING)
+                       result = -EINTR; /* Command was not sent. */
+               else
+                       /* Command was sent but no answer was received yet. */
+                       result = -EIO;
+
+               dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+                       "Signal Pending while waiting. Return error %d\n",
+                       current->pid, result);
+               goto exit;
+       }
+
+       if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+               dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+                       "prepare to sleep infinitely\n", current->pid);
+       else
+               dprintk(KERN_INFO "tf_send_recv: "
+                       "prepare to sleep 0x%lx jiffies\n",
+                       nRelativeTimeoutJiffies);
+
+       /* go to sleep */
+       if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+               dprintk(KERN_INFO
+                       "tf_send_recv: timeout expired\n");
+       else
+               dprintk(KERN_INFO
+                       "tf_send_recv: signal delivered\n");
+
+       finish_wait(&comm->wait_queue, &wait);
+       wait_prepared = false;
+       goto copy_answers;
+
+exit:
+       if (wait_prepared) {
+               finish_wait(&comm->wait_queue, &wait);
+               wait_prepared = false;
+       }
+
+#ifdef CONFIG_FREEZER
+       current->flags &= ~(PF_FREEZER_NOSIG);
+       current->flags |= (saved_flags & PF_FREEZER_NOSIG);
+#endif
+
+       return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_send_receive(struct tf_comm *comm,
+         union tf_command *command,
+         union tf_answer *answer,
+         struct tf_connection *connection,
+         bool bKillable)
+{
+       int error;
+       struct tf_answer_struct answerStructure;
+#ifdef CONFIG_SMP
+       long ret_affinity;
+       cpumask_t saved_cpu_mask;
+       cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+
+       answerStructure.answer = answer;
+       answerStructure.answer_copied = false;
+
+       if (command != NULL)
+               command->header.operation_id = (u32) &answerStructure;
+
+       dprintk(KERN_INFO "tf_send_receive\n");
+
+#ifdef CONFIG_TF_ZEBRA
+       if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+               dprintk(KERN_ERR "tf_send_receive(%p): "
+                       "Secure world not started\n", comm);
+
+               return -EFAULT;
+       }
+#endif
+
+       if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
+               dprintk(KERN_DEBUG
+                       "tf_send_receive: Flag Terminating is set\n");
+               return 0;
+       }
+
+#ifdef CONFIG_SMP
+       cpu_set(0, local_cpu_mask);
+       sched_getaffinity(0, &saved_cpu_mask);
+       ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+       if (ret_affinity != 0)
+               dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+       /*
+        * Send the command
+        */
+       error = tf_send_recv(comm,
+               command, &answerStructure, connection, bKillable);
+
+       if (!bKillable && sigkill_pending()) {
+               if ((command->header.message_type ==
+                       TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+                       (answer->create_device_context.error_code ==
+                               S_SUCCESS)) {
+
+                       /*
+                        * CREATE_DEVICE_CONTEXT was interrupted.
+                        */
+                       dprintk(KERN_INFO "tf_send_receive: "
+                               "sending DESTROY_DEVICE_CONTEXT\n");
+                       answerStructure.answer =  answer;
+                       answerStructure.answer_copied = false;
+
+                       command->header.message_type =
+                               TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+                       command->header.message_size =
+                               (sizeof(struct
+                                       tf_command_destroy_device_context) -
+                                sizeof(struct tf_command_header))/sizeof(u32);
+                       command->header.operation_id =
+                               (u32) &answerStructure;
+                       command->destroy_device_context.device_context =
+                               answer->create_device_context.
+                                       device_context;
+
+                       goto destroy_context;
+               }
+       }
+
+       if (error == 0) {
+               /*
+                * tf_send_recv returned Success.
+                */
+               if (command->header.message_type ==
+               TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+                       spin_lock(&(connection->state_lock));
+                       connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+                       spin_unlock(&(connection->state_lock));
+               } else if (command->header.message_type ==
+               TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+                       spin_lock(&(connection->state_lock));
+                       connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+                       spin_unlock(&(connection->state_lock));
+               }
+       } else if (error  == -EINTR) {
+               /*
+               * No command was sent, return failure.
+               */
+               dprintk(KERN_ERR
+                       "tf_send_receive: "
+                       "tf_send_recv failed (error %d) !\n",
+                       error);
+       } else if (error  == -EIO) {
+               /*
+               * A command was sent but its answer is still pending.
+               */
+
+               /* means bKillable is true */
+               dprintk(KERN_ERR
+                       "tf_send_receive: "
+                       "tf_send_recv interrupted (error %d)."
+                       "Send DESTROY_DEVICE_CONTEXT.\n", error);
+
+               /* Send the DESTROY_DEVICE_CONTEXT. */
+               answerStructure.answer =  answer;
+               answerStructure.answer_copied = false;
+
+               command->header.message_type =
+                       TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+               command->header.message_size =
+                       (sizeof(struct tf_command_destroy_device_context) -
+                               sizeof(struct tf_command_header))/sizeof(u32);
+               command->header.operation_id =
+                       (u32) &answerStructure;
+               command->destroy_device_context.device_context =
+                       connection->device_context;
+
+               error = tf_send_recv(comm,
+                       command, &answerStructure, connection, false);
+               if (error == -EINTR) {
+                       /*
+                       * Another thread already sent
+                       * DESTROY_DEVICE_CONTEXT.
+                       * We must still wait for the answer
+                       * to the original command.
+                       */
+                       command = NULL;
+                       goto destroy_context;
+               } else {
+                        /* An answer was received.
+                        * Check if it is the answer
+                        * to the DESTROY_DEVICE_CONTEXT.
+                        */
+                        spin_lock(&comm->lock);
+                        if (answer->header.message_type !=
+                        TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+                               answerStructure.answer_copied = false;
+                        }
+                        spin_unlock(&comm->lock);
+                        if (!answerStructure.answer_copied) {
+                               /* Answer to DESTROY_DEVICE_CONTEXT
+                               * was not yet received.
+                               * Wait for the answer.
+                               */
+                               dprintk(KERN_INFO
+                                       "[pid=%d] tf_send_receive:"
+                                       "Answer to DESTROY_DEVICE_CONTEXT"
+                                       "not yet received.Retry\n",
+                                       current->pid);
+                               command = NULL;
+                               goto destroy_context;
+                        }
+               }
+       }
+
+       dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
+       goto exit;
+
+destroy_context:
+       error = tf_send_recv(comm,
+       command, &answerStructure, connection, false);
+
+       /*
+        * tf_send_recv cannot return an error because
+        * it's not killable and not within a connection
+        */
+       BUG_ON(error != 0);
+
+       /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+       spin_lock(&(connection->state_lock));
+       connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+       spin_unlock(&(connection->state_lock));
+
+exit:
+
+#ifdef CONFIG_SMP
+       ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+       if (ret_affinity != 0)
+               dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+       return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The operation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int tf_power_management(struct tf_comm *comm,
+       enum TF_POWER_OPERATION operation)
+{
+       u32 status;
+       int error = 0;
+
+       dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
+
+#ifdef CONFIG_TF_ZEBRA
+       if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+               dprintk(KERN_INFO "tf_power_management(%p): "
+                       "succeeded (not started)\n", comm);
+
+               return 0;
+       }
+#endif
+
+       status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+               & TF_STATUS_POWER_STATE_MASK)
+               >> TF_STATUS_POWER_STATE_SHIFT);
+
+       switch (operation) {
+       case TF_POWER_OPERATION_SHUTDOWN:
+               switch (status) {
+               case TF_POWER_MODE_ACTIVE:
+                       error = tf_pm_shutdown(comm);
+
+                       if (error) {
+                               dprintk(KERN_ERR "tf_power_management(): "
+                                       "Failed with error code 0x%08x\n",
+                                       error);
+                               goto error;
+                       }
+                       break;
+
+               default:
+                       goto not_allowed;
+               }
+               break;
+
+       case TF_POWER_OPERATION_HIBERNATE:
+               switch (status) {
+               case TF_POWER_MODE_ACTIVE:
+                       error = tf_pm_hibernate(comm);
+
+                       if (error) {
+                               dprintk(KERN_ERR "tf_power_management(): "
+                                       "Failed with error code 0x%08x\n",
+                                       error);
+                               goto error;
+                       }
+                       break;
+
+               default:
+                       goto not_allowed;
+               }
+               break;
+
+       case TF_POWER_OPERATION_RESUME:
+               error = tf_pm_resume(comm);
+
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_power_management(): "
+                               "Failed with error code 0x%08x\n",
+                               error);
+                       goto error;
+               }
+               break;
+       }
+
+       dprintk(KERN_INFO "tf_power_management(): succeeded\n");
+       return 0;
+
+not_allowed:
+       dprintk(KERN_ERR "tf_power_management(): "
+               "Power command not allowed in current "
+               "Secure World state %d\n", status);
+       error = -ENOTTY;
+error:
+       return error;
+}
+
diff --git a/security/tf_driver/tf_comm.h b/security/tf_driver/tf_comm.h
new file mode 100644 (file)
index 0000000..8921dc1
--- /dev/null
@@ -0,0 +1,202 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_COMM_H__
+#define __TF_COMM_H__
+
+#include "tf_defs.h"
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------
+ * Misc
+ *----------------------------------------------------------------------------*/
+
+void tf_set_current_time(struct tf_comm *comm);
+
+/*
+ * Atomic accesses to 32-bit variables in the L1 Shared buffer
+ */
+static inline u32 tf_read_reg32(const u32 *comm_buffer)
+{
+       u32 result;
+
+       __asm__ __volatile__("@ tf_read_reg32\n"
+               "ldrex %0, [%1]\n"
+               : "=&r" (result)
+               : "r" (comm_buffer)
+       );
+
+       return result;
+}
+
+static inline void tf_write_reg32(void *comm_buffer, u32 value)
+{
+       u32 tmp;
+
+       __asm__ __volatile__("@ tf_write_reg32\n"
+               "1:     ldrex %0, [%2]\n"
+               "       strex %0, %1, [%2]\n"
+               "       teq   %0, #0\n"
+               "       bne   1b"
+               : "=&r" (tmp)
+               : "r" (value), "r" (comm_buffer)
+               : "cc"
+       );
+}
+
+/*
+ * Atomic accesses to 64-bit variables in the L1 Shared buffer
+ */
+static inline u64 tf_read_reg64(void *comm_buffer)
+{
+       u64 result;
+
+       __asm__ __volatile__("@ tf_read_reg64\n"
+               "ldrexd %0, [%1]\n"
+               : "=&r" (result)
+               : "r" (comm_buffer)
+       );
+
+       return result;
+}
+
+static inline void tf_write_reg64(void *comm_buffer, u64 value)
+{
+       u64 tmp;
+
+       __asm__ __volatile__("@ tf_write_reg64\n"
+               "1:     ldrexd %0, [%2]\n"
+               "       strexd %0, %1, [%2]\n"
+               "       teq    %0, #0\n"
+               "       bne    1b"
+               : "=&r" (tmp)
+               : "r" (value), "r" (comm_buffer)
+               : "cc"
+       );
+}
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+/* RPC return values */
+#define RPC_NO         0x00    /* No RPC to execute */
+#define RPC_YIELD      0x01    /* Yield RPC */
+#define RPC_NON_YIELD  0x02    /* non-Yield RPC */
+
+int tf_rpc_execute(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+#define L1_DESCRIPTOR_FAULT            (0x00000000)
+#define L2_DESCRIPTOR_FAULT            (0x00000000)
+
+#define L2_DESCRIPTOR_ADDR_MASK         (0xFFFFF000)
+
+#define DESCRIPTOR_V13_12_MASK      (0x3 << PAGE_SHIFT)
+#define DESCRIPTOR_V13_12_GET(a)    ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       u32 type);
+
+void tf_free_coarse_page_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_coarse_page_table *coarse_pg_table,
+       int force);
+
+void tf_init_coarse_page_table_allocator(
+       struct tf_coarse_page_table_allocation_context *alloc_context);
+
+void tf_release_coarse_page_table_allocator(
+       struct tf_coarse_page_table_allocation_context *alloc_context);
+
+struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
+
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
+
+void tf_cleanup_shared_memory(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_shmem_desc *shmem_desc,
+       u32 full_cleanup);
+
+int tf_fill_descriptor_table(
+       struct tf_coarse_page_table_allocation_context *alloc_context,
+       struct tf_shmem_desc *shmem_desc,
+       u32 buffer,
+       struct vm_area_struct **vmas,
+       u32 descriptors[TF_MAX_COARSE_PAGES],
+       u32 buffer_size,
+       u32 *buffer_start_offset,
+       bool in_user_space,
+       u32 flags,
+       u32 *descriptor_count);
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+int tf_schedule_secure_world(struct tf_comm *comm);
+
+int tf_send_receive(
+       struct tf_comm *comm,
+       union tf_command *command,
+       union tf_answer *answer,
+       struct tf_connection *connection,
+       bool bKillable);
+
+
+/**
+ * get a pointer to the secure world description.
+ * This points directly into the L1 shared buffer
+ * and is valid only once the communication has
+ * been initialized
+ **/
+u8 *tf_get_description(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+enum TF_POWER_OPERATION {
+       TF_POWER_OPERATION_HIBERNATE = 1,
+       TF_POWER_OPERATION_SHUTDOWN = 2,
+       TF_POWER_OPERATION_RESUME = 3,
+};
+
+int tf_pm_hibernate(struct tf_comm *comm);
+int tf_pm_resume(struct tf_comm *comm);
+int tf_pm_shutdown(struct tf_comm *comm);
+
+int tf_power_management(struct tf_comm *comm,
+       enum TF_POWER_OPERATION operation);
+
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+int tf_init(struct tf_comm *comm);
+
+void tf_terminate(struct tf_comm *comm);
+
+
+#endif  /* __TF_COMM_H__ */
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644 (file)
index 0000000..4c89de8
--- /dev/null
@@ -0,0 +1,885 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+/*
+ * Structure common to all SMC operations
+ */
+struct tf_generic_smc {
+       u32 reg0;
+       u32 reg1;
+       u32 reg2;
+       u32 reg3;
+       u32 reg4;
+};
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+static inline void tf_smc_generic_call(
+       struct tf_generic_smc *generic_smc)
+{
+#ifdef CONFIG_SMP
+       long ret;
+       cpumask_t saved_cpu_mask;
+       cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+       cpu_set(0, local_cpu_mask);
+       sched_getaffinity(0, &saved_cpu_mask);
+       ret = sched_setaffinity(0, &local_cpu_mask);
+       if (ret != 0)
+               dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
+#endif
+
+       __asm__ volatile(
+               "mov r0, %2\n"
+               "mov r1, %3\n"
+               "mov r2, %4\n"
+               "mov r3, %5\n"
+               "mov r4, %6\n"
+               ".word    0xe1600070              @ SMC 0\n"
+               "mov %0, r0\n"
+               "mov %1, r1\n"
+               : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
+               : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
+                 "r" (generic_smc->reg2), "r" (generic_smc->reg3),
+                 "r" (generic_smc->reg4)
+               : "r0", "r1", "r2", "r3", "r4");
+
+#ifdef CONFIG_SMP
+               ret = sched_setaffinity(0, &saved_cpu_mask);
+               if (ret != 0)
+                       dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
+#endif
+}
+
+/*
+ * Calls the get protocol version SMC.
+ * Fills the parameter pProtocolVersion with the version number returned by the
+ * SMC
+ */
+static inline void tf_smc_get_protocol_version(u32 *protocol_version)
+{
+       struct tf_generic_smc generic_smc;
+
+       generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
+       generic_smc.reg1 = 0;
+       generic_smc.reg2 = 0;
+       generic_smc.reg3 = 0;
+       generic_smc.reg4 = 0;
+
+       tf_smc_generic_call(&generic_smc);
+       *protocol_version = generic_smc.reg1;
+}
+
+
+/*
+ * Calls the init SMC with the specified parameters.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_init(u32 shared_page_descriptor)
+{
+       struct tf_generic_smc generic_smc;
+
+       generic_smc.reg0 = TF_SMC_INIT;
+       /* Descriptor for the layer 1 shared buffer */
+       generic_smc.reg1 = shared_page_descriptor;
+       generic_smc.reg2 = 0;
+       generic_smc.reg3 = 0;
+       generic_smc.reg4 = 0;
+
+       tf_smc_generic_call(&generic_smc);
+       if (generic_smc.reg0 != S_SUCCESS)
+               printk(KERN_ERR "tf_smc_init:"
+                       " r0=0x%08X upon return (expected 0x%08X)!\n",
+                       generic_smc.reg0,
+                       S_SUCCESS);
+
+       return generic_smc.reg0;
+}
+
+
+/*
+ * Calls the reset irq SMC.
+ */
+static inline void tf_smc_reset_irq(void)
+{
+       struct tf_generic_smc generic_smc;
+
+       generic_smc.reg0 = TF_SMC_RESET_IRQ;
+       generic_smc.reg1 = 0;
+       generic_smc.reg2 = 0;
+       generic_smc.reg3 = 0;
+       generic_smc.reg4 = 0;
+
+       tf_smc_generic_call(&generic_smc);
+}
+
+
+/*
+ * Calls the WAKE_UP SMC.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
+       u32 shared_mem_start_offset,
+       u32 shared_mem_size)
+{
+       struct tf_generic_smc generic_smc;
+
+       generic_smc.reg0 = TF_SMC_WAKE_UP;
+       generic_smc.reg1 = shared_mem_start_offset;
+       /* long form command */
+       generic_smc.reg2 = shared_mem_size | 0x80000000;
+       generic_smc.reg3 = l1_shared_buffer_descriptor;
+       generic_smc.reg4 = 0;
+
+       tf_smc_generic_call(&generic_smc);
+
+       if (generic_smc.reg0 != S_SUCCESS)
+               printk(KERN_ERR "tf_smc_wake_up:"
+                       " r0=0x%08X upon return (expected 0x%08X)!\n",
+                       generic_smc.reg0,
+                       S_SUCCESS);
+
+       return generic_smc.reg0;
+}
+
+/*
+ * Calls the N-Yield SMC.
+ */
+static inline void tf_smc_nyield(void)
+{
+       struct tf_generic_smc generic_smc;
+
+       generic_smc.reg0 = TF_SMC_N_YIELD;
+       generic_smc.reg1 = 0;
+       generic_smc.reg2 = 0;
+       generic_smc.reg3 = 0;
+       generic_smc.reg4 = 0;
+
+       tf_smc_generic_call(&generic_smc);
+}
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct tf_comm *comm)
+{
+       tf_set_current_time(comm);
+
+       /* yield to the Secure World */
+       tf_smc_nyield();
+
+       return 0;
+}
+
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+
+#define L2_INIT_DESCRIPTOR_BASE           (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT   (4)
+
+static u32 tf_get_l2init_descriptor(u32 vaddr)
+{
+       struct page *page;
+       u32 paddr;
+       u32 descriptor;
+
+       descriptor = L2_INIT_DESCRIPTOR_BASE;
+
+       /* get physical address and add to descriptor */
+       page = virt_to_page(vaddr);
+       paddr = page_to_phys(page);
+       descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
+
+       /* Add virtual address v[13:12] bits to descriptor */
+       descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
+               << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
+
+       descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
+
+
+       return descriptor;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Free the memory used by the W3B buffer for the specified comm.
+ * This function does nothing if no W3B buffer is allocated for the device.
+ */
+static inline void tf_free_w3b(struct tf_comm *comm)
+{
+       tf_cleanup_shared_memory(
+               &(comm->w3b_cpt_alloc_context),
+               &(comm->w3b_shmem_desc),
+               0);
+
+       tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
+
+       internal_vfree((void *)comm->w3b);
+       comm->w3b = 0;
+       comm->w3b_shmem_size = 0;
+       clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+}
+
+
+/*
+ * Allocates the W3B buffer for the specified comm.
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static inline int tf_allocate_w3b(struct tf_comm *comm)
+{
+       int error;
+       u32 flags;
+       u32 config_flag_s;
+       u32 *w3b_descriptors;
+       u32 w3b_descriptor_count;
+       u32 w3b_current_size;
+
+       config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
+
+retry:
+       if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
+               /*
+                * Initialize the shared memory for the W3B
+                */
+               tf_init_coarse_page_table_allocator(
+                       &comm->w3b_cpt_alloc_context);
+       } else {
+               /*
+                * The W3B is allocated but do we have to reallocate a bigger
+                * one?
+                */
+               /* Check H bit */
+               if ((config_flag_s & (1<<4)) != 0) {
+                       /* The size of the W3B may change after SMC_INIT */
+                       /* Read the current value */
+                       w3b_current_size = tf_read_reg32(
+                               &comm->l1_buffer->w3b_size_current_s);
+                       if (comm->w3b_shmem_size > w3b_current_size)
+                               return 0;
+
+                       tf_free_w3b(comm);
+                       goto retry;
+               } else {
+                       return 0;
+               }
+       }
+
+       /* check H bit */
+       if ((config_flag_s & (1<<4)) != 0)
+               /* The size of the W3B may change after SMC_INIT */
+               /* Read the current value */
+               comm->w3b_shmem_size = tf_read_reg32(
+                       &comm->l1_buffer->w3b_size_current_s);
+       else
+               comm->w3b_shmem_size = tf_read_reg32(
+                       &comm->l1_buffer->w3b_size_max_s);
+
+       comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
+       if (comm->w3b == 0) {
+               printk(KERN_ERR "tf_allocate_w3b():"
+                       " Out of memory for W3B buffer (%u bytes)!\n",
+                       (unsigned int)(comm->w3b_shmem_size));
+               error = -ENOMEM;
+               goto error;
+       }
+
+       /* initialize the w3b_shmem_desc structure */
+       comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
+       INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
+
+       flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
+
+       /* directly point to the L1 shared buffer W3B descriptors */
+       w3b_descriptors = comm->l1_buffer->w3b_descriptors;
+
+       /*
+        * tf_fill_descriptor_table uses the following parameter as an
+        * IN/OUT
+        */
+
+       error = tf_fill_descriptor_table(
+               &(comm->w3b_cpt_alloc_context),
+               &(comm->w3b_shmem_desc),
+               comm->w3b,
+               NULL,
+               w3b_descriptors,
+               comm->w3b_shmem_size,
+               &(comm->w3b_shmem_offset),
+               false,
+               flags,
+               &w3b_descriptor_count);
+       if (error != 0) {
+               printk(KERN_ERR "tf_allocate_w3b():"
+                       " tf_fill_descriptor_table failed with "
+                       "error code 0x%08x!\n",
+                       error);
+               goto error;
+       }
+
+       set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
+
+       /* successful completion */
+       return 0;
+
+error:
+       tf_free_w3b(comm);
+
+       return error;
+}
+
+/*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_shutdown(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+       /* this function is useless for the TEGRA product */
+       return 0;
+#else
+       int error;
+       union tf_command command;
+       union tf_answer answer;
+
+       dprintk(KERN_INFO "tf_pm_shutdown()\n");
+
+       memset(&command, 0, sizeof(command));
+
+       command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+       command.header.message_size =
+                       (sizeof(struct tf_command_management) -
+                               sizeof(struct tf_command_header))/sizeof(u32);
+
+       command.management.command = TF_MANAGEMENT_SHUTDOWN;
+
+       error = tf_send_receive(
+               comm,
+               &command,
+               &answer,
+               NULL,
+               false);
+
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_pm_shutdown(): "
+                       "tf_send_receive failed (error %d)!\n",
+                       error);
+               return error;
+       }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+       if (answer.header.error_code != 0)
+               dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+       else
+               dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+       return answer.header.error_code;
+#endif
+}
+
+
+/*
+ * Perform a Secure World hibernate operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_hibernate(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+       /* this function is useless for the TEGRA product */
+       return 0;
+#else
+       int error;
+       union tf_command command;
+       union tf_answer answer;
+       u32 first_command;
+       u32 first_free_command;
+
+       dprintk(KERN_INFO "tf_pm_hibernate()\n");
+
+       error = tf_allocate_w3b(comm);
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_pm_hibernate(): "
+                       "tf_allocate_w3b failed (error %d)!\n",
+                       error);
+               return error;
+       }
+
+       /*
+        * As the polling thread is already hibernating, we
+        * should send the message and receive the answer ourself
+        */
+
+       /* build the "prepare to hibernate" message */
+       command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+       command.management.command = TF_MANAGEMENT_HIBERNATE;
+       /* Long Form Command */
+       command.management.shared_mem_descriptors[0] = 0;
+       command.management.shared_mem_descriptors[1] = 0;
+       command.management.w3b_size =
+               comm->w3b_shmem_size | 0x80000000;
+       command.management.w3b_start_offset =
+               comm->w3b_shmem_offset;
+       command.header.operation_id = (u32) &answer;
+
+       tf_dump_command(&command);
+
+       /* find a slot to send the message in */
+
+       /* AFY: why not use the function tf_send_receive?? We are
+        * duplicating a lot of subtle code here. And it's not going to be
+        * tested because power management is currently not supported by the
+        * secure world. */
+       for (;;) {
+               int queue_words_count, command_size;
+
+               spin_lock(&(comm->lock));
+
+               first_command = tf_read_reg32(
+                       &comm->l1_buffer->first_command);
+               first_free_command = tf_read_reg32(
+                       &comm->l1_buffer->first_free_command);
+
+               queue_words_count = first_free_command - first_command;
+               command_size     = command.header.message_size
+                       + sizeof(struct tf_command_header);
+               if ((queue_words_count + command_size) <
+                               TF_N_MESSAGE_QUEUE_CAPACITY) {
+                       /* Command queue is not full */
+                       memcpy(&comm->l1_buffer->command_queue[
+                               first_free_command %
+                                       TF_N_MESSAGE_QUEUE_CAPACITY],
+                               &command,
+                               command_size * sizeof(u32));
+
+                       tf_write_reg32(&comm->l1_buffer->first_free_command,
+                               first_free_command + command_size);
+
+                       spin_unlock(&(comm->lock));
+                       break;
+               }
+
+               spin_unlock(&(comm->lock));
+               (void)tf_schedule_secure_world(comm);
+       }
+
+       /* now wait for the answer, dispatching other answers */
+       while (1) {
+               u32 first_answer;
+               u32 first_free_answer;
+
+               /* check all the answers */
+               first_free_answer = tf_read_reg32(
+                       &comm->l1_buffer->first_free_answer);
+               first_answer = tf_read_reg32(
+                       &comm->l1_buffer->first_answer);
+
+               if (first_answer != first_free_answer) {
+                       int bFoundAnswer = 0;
+
+                       do {
+                               /* answer queue not empty */
+                               union tf_answer tmp_answer;
+                               struct tf_answer_header header;
+                               /* size of the command in words of 32bit */
+                               int command_size;
+
+                               /* get the message_size */
+                               memcpy(&header,
+                                       &comm->l1_buffer->answer_queue[
+                                               first_answer %
+                                               TF_S_ANSWER_QUEUE_CAPACITY],
+                                       sizeof(struct tf_answer_header));
+                               command_size = header.message_size +
+                                       sizeof(struct tf_answer_header);
+
+                               /*
+                                * NOTE: message_size is the number of words
+                                * following the first word
+                                */
+                               memcpy(&tmp_answer,
+                                       &comm->l1_buffer->answer_queue[
+                                               first_answer %
+                                               TF_S_ANSWER_QUEUE_CAPACITY],
+                                       command_size * sizeof(u32));
+
+                               tf_dump_answer(&tmp_answer);
+
+                               if (tmp_answer.header.operation_id ==
+                                               (u32) &answer) {
+                                       /*
+                                        * this is the answer to the "prepare to
+                                        * hibernate" message
+                                        */
+                                       memcpy(&answer,
+                                               &tmp_answer,
+                                               command_size * sizeof(u32));
+
+                                       bFoundAnswer = 1;
+                                       tf_write_reg32(
+                                               &comm->l1_buffer->first_answer,
+                                               first_answer + command_size);
+                                       break;
+                               } else {
+                                       /*
+                                        * this is a standard message answer,
+                                        * dispatch it
+                                        */
+                                       struct tf_answer_struct
+                                               *answerStructure;
+
+                                       answerStructure =
+                                               (struct tf_answer_struct *)
+                                               tmp_answer.header.operation_id;
+
+                                       memcpy(answerStructure->answer,
+                                               &tmp_answer,
+                                               command_size * sizeof(u32));
+
+                                       answerStructure->answer_copied = true;
+                               }
+
+                               tf_write_reg32(
+                                       &comm->l1_buffer->first_answer,
+                                       first_answer + command_size);
+                       } while (first_answer != first_free_answer);
+
+                       if (bFoundAnswer)
+                               break;
+               }
+
+               /*
+                * since the Secure World is at least running the "prepare to
+                * hibernate" message, its timeout must be immediate So there is
+                * no need to check its timeout and schedule() the current
+                * thread
+                */
+               (void)tf_schedule_secure_world(comm);
+       } /* while (1) */
+
+       printk(KERN_INFO "tf_driver: hibernate.\n");
+       return 0;
+#endif
+}
+
+
+/*
+ * Perform a Secure World resume operation.
+ * The routine returns once the Secure World is active again
+ * or if an error occurs during the "resume" process
+ */
+int tf_pm_resume(struct tf_comm *comm)
+{
+#ifdef CONFIG_TFN
+       /* this function is useless for the TEGRA product */
+       return 0;
+#else
+       int error;
+       u32 status;
+
+       dprintk(KERN_INFO "tf_pm_resume()\n");
+
+       error = tf_smc_wake_up(
+               tf_get_l2init_descriptor((u32)comm->l1_buffer),
+               comm->w3b_shmem_offset,
+               comm->w3b_shmem_size);
+
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_pm_resume(): "
+                       "tf_smc_wake_up failed (error %d)!\n",
+                       error);
+               return error;
+       }
+
+       status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+               & TF_STATUS_POWER_STATE_MASK)
+               >> TF_STATUS_POWER_STATE_SHIFT);
+
+       while ((status != TF_POWER_MODE_ACTIVE)
+                       && (status != TF_POWER_MODE_PANIC)) {
+               tf_smc_nyield();
+
+               status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
+                       & TF_STATUS_POWER_STATE_MASK)
+                       >> TF_STATUS_POWER_STATE_SHIFT);
+
+               /*
+                * As this may last quite a while, call the kernel scheduler to
+                * hand over CPU for other operations
+                */
+               schedule();
+       }
+
+       switch (status) {
+       case TF_POWER_MODE_ACTIVE:
+               break;
+
+       case TF_POWER_MODE_PANIC:
+               dprintk(KERN_ERR "tf_pm_resume(): "
+                       "Secure World POWER_MODE_PANIC!\n");
+               return -EINVAL;
+
+       default:
+               dprintk(KERN_ERR "tf_pm_resume(): "
+                       "unexpected Secure World POWER_MODE (%d)!\n", status);
+               return -EINVAL;
+       }
+
+       dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
+       return 0;
+#endif
+}
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Handles the software interrupts issued by the Secure World.
+ */
+static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
+{
+       struct tf_comm *comm = (struct tf_comm *) dev_id;
+
+       if (comm->l1_buffer == NULL)
+               return IRQ_NONE;
+
+       if ((tf_read_reg32(&comm->l1_buffer->status_s) &
+                       TF_STATUS_P_MASK) == 0)
+               /* interrupt not issued by the Trusted Foundations Software */
+               return IRQ_NONE;
+
+       tf_smc_reset_irq();
+
+       /* signal N_SM_EVENT */
+       wake_up(&comm->wait_queue);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Initializes the communication with the Secure World.
+ * The L1 shared buffer is allocated and the Secure World
+ * is yielded for the first time.
+ * returns successfuly once the communication with
+ * the Secure World is up and running
+ *
+ * Returns 0 upon success or appropriate error code
+ * upon failure
+ */
+int tf_init(struct tf_comm *comm)
+{
+       int error;
+       struct page *buffer_page;
+       u32 protocol_version;
+
+       dprintk(KERN_INFO "tf_init()\n");
+
+       spin_lock_init(&(comm->lock));
+       comm->flags = 0;
+       comm->l1_buffer = NULL;
+       init_waitqueue_head(&(comm->wait_queue));
+
+       /*
+        * Check the Secure World protocol version is the expected one.
+        */
+       tf_smc_get_protocol_version(&protocol_version);
+
+       if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
+                       != TF_S_PROTOCOL_MAJOR_VERSION) {
+               printk(KERN_ERR "tf_init():"
+                       " Unsupported Secure World Major Version "
+                       "(0x%02X, expected 0x%02X)!\n",
+                       GET_PROTOCOL_MAJOR_VERSION(protocol_version),
+                       TF_S_PROTOCOL_MAJOR_VERSION);
+               error = -EIO;
+               goto error;
+       }
+
+       /*
+        * Register the software interrupt handler if required to.
+        */
+       if (comm->soft_int_irq != -1) {
+               dprintk(KERN_INFO "tf_init(): "
+                       "Registering software interrupt handler (IRQ %d)\n",
+                       comm->soft_int_irq);
+
+               error = request_irq(comm->soft_int_irq,
+                       tf_soft_int_handler,
+                       IRQF_SHARED,
+                       TF_DEVICE_BASE_NAME,
+                       comm);
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_init(): "
+                               "request_irq failed for irq %d (error %d)\n",
+                       comm->soft_int_irq, error);
+                       goto error;
+               }
+               set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
+       }
+
+       /*
+        * Allocate and initialize the L1 shared buffer.
+        */
+       comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
+       if (comm->l1_buffer == NULL) {
+               printk(KERN_ERR "tf_init():"
+                       " get_zeroed_page failed for L1 shared buffer!\n");
+               error = -ENOMEM;
+               goto error;
+       }
+
+       /*
+        * Ensure the page storing the L1 shared buffer is mapped.
+        */
+       buffer_page = virt_to_page(comm->l1_buffer);
+       trylock_page(buffer_page);
+
+       dprintk(KERN_INFO "tf_init(): "
+               "L1 shared buffer allocated at virtual:%p, "
+               "physical:%p (page:%p)\n",
+               comm->l1_buffer,
+               (void *)virt_to_phys(comm->l1_buffer),
+               buffer_page);
+
+       set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
+
+       /*
+        * Init SMC
+        */
+       error = tf_smc_init(
+               tf_get_l2init_descriptor((u32)comm->l1_buffer));
+       if (error != S_SUCCESS) {
+               dprintk(KERN_ERR "tf_init(): "
+                       "tf_smc_init failed (error 0x%08X)!\n",
+                       error);
+               goto error;
+       }
+
+       /*
+        * check whether the interrupts are actually enabled
+        * If not, remove irq handler
+        */
+       if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
+                       TF_CONFIG_FLAG_S) == 0) {
+               if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+                               &(comm->flags)) != 0) {
+                       dprintk(KERN_INFO "tf_init(): "
+                               "Interrupts not used, unregistering "
+                               "softint (IRQ %d)\n",
+                               comm->soft_int_irq);
+
+                       free_irq(comm->soft_int_irq, comm);
+               }
+       } else {
+               if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+                               &(comm->flags)) == 0) {
+                       /*
+                        * Interrupts are enabled in the Secure World, but not
+                        * handled by driver
+                        */
+                       dprintk(KERN_ERR "tf_init(): "
+                               "soft_interrupt argument not provided\n");
+                       error = -EINVAL;
+                       goto error;
+               }
+       }
+
+       /*
+        * Successful completion.
+        */
+
+       /* yield for the first time */
+       (void)tf_schedule_secure_world(comm);
+
+       dprintk(KERN_INFO "tf_init(): Success\n");
+       return S_SUCCESS;
+
+error:
+       /*
+        * Error handling.
+        */
+       dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
+               error);
+       tf_terminate(comm);
+       return error;
+}
+
+
+/*
+ * Attempt to terminate the communication with the Secure World.
+ * The L1 shared buffer is freed.
+ * Calling this routine terminates definitaly the communication
+ * with the Secure World : there is no way to inform the Secure World of a new
+ * L1 shared buffer to be used once it has been initialized.
+ */
+void tf_terminate(struct tf_comm *comm)
+{
+       dprintk(KERN_INFO "tf_terminate()\n");
+
+       set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
+
+       if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
+                       &(comm->flags))) != 0) {
+               dprintk(KERN_INFO "tf_terminate(): "
+                       "Freeing the W3B buffer...\n");
+               tf_free_w3b(comm);
+       }
+
+       if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
+                       &(comm->flags))) != 0) {
+               __clear_page_locked(virt_to_page(comm->l1_buffer));
+               internal_free_page((unsigned long) comm->l1_buffer);
+       }
+
+       if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
+                       &(comm->flags))) != 0) {
+               dprintk(KERN_INFO "tf_terminate(): "
+                       "Unregistering softint (IRQ %d)\n",
+                       comm->soft_int_irq);
+               free_irq(comm->soft_int_irq, comm);
+       }
+}
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644 (file)
index 0000000..3148fec
--- /dev/null
@@ -0,0 +1,1574 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_crypto.h"
+#endif
+
+#ifdef CONFIG_ANDROID
+#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
+#else
+#define TF_PRIVILEGED_UID_GID 0
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+void tf_unmap_shmem(
+               struct tf_connection *connection,
+               struct tf_shmem_desc *shmem_desc,
+               u32 full_cleanup)
+{
+       /* check shmem_desc contains a descriptor */
+       if (shmem_desc == NULL)
+               return;
+
+       dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
+
+retry:
+       mutex_lock(&(connection->shmem_mutex));
+       if (atomic_read(&shmem_desc->ref_count) > 1) {
+               /*
+                * Shared mem still in use, wait for other operations completion
+                * before actually unmapping it.
+                */
+               dprintk(KERN_INFO "Descriptor in use\n");
+               mutex_unlock(&(connection->shmem_mutex));
+               schedule();
+               goto retry;
+       }
+
+       tf_cleanup_shared_memory(
+                       &(connection->cpt_alloc_context),
+                       shmem_desc,
+                       full_cleanup);
+
+       list_del(&(shmem_desc->list));
+
+       if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+                       (full_cleanup != 0)) {
+               internal_kfree(shmem_desc);
+
+               atomic_dec(&(connection->shmem_count));
+       } else {
+               /*
+                * This is a preallocated shared memory, add to free list
+                * Since the device context is unmapped last, it is
+                * always the first element of the free list if no
+                * device context has been created
+                */
+               shmem_desc->block_identifier = 0;
+               list_add(&(shmem_desc->list), &(connection->free_shmem_list));
+       }
+
+       mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the descriptors to L1 descriptors
+ * Update the buffer_start_offset and buffer_size fields
+ * shmem_desc is updated to the mapped shared memory descriptor
+ **/
+int tf_map_shmem(
+               struct tf_connection *connection,
+               u32 buffer,
+               /* flags for read-write access rights on the memory */
+               u32 flags,
+               bool in_user_space,
+               u32 descriptors[TF_MAX_COARSE_PAGES],
+               u32 *buffer_start_offset,
+               u32 buffer_size,
+               struct tf_shmem_desc **shmem_desc,
+               u32 *descriptor_count)
+{
+       struct tf_shmem_desc *desc = NULL;
+       int error;
+
+       dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
+                                       connection,
+                                       (void *) buffer,
+                                       flags);
+
+       mutex_lock(&(connection->shmem_mutex));
+
+       /*
+        * Check the list of free shared memory
+        * is not empty
+        */
+       if (list_empty(&(connection->free_shmem_list))) {
+               if (atomic_read(&(connection->shmem_count)) ==
+                               TF_SHMEM_MAX_COUNT) {
+                       printk(KERN_ERR "tf_map_shmem(%p):"
+                               " maximum shared memories already registered\n",
+                               connection);
+                       error = -ENOMEM;
+                       goto error;
+               }
+
+               /* no descriptor available, allocate a new one */
+
+               desc = (struct tf_shmem_desc *) internal_kmalloc(
+                       sizeof(*desc), GFP_KERNEL);
+               if (desc == NULL) {
+                       printk(KERN_ERR "tf_map_shmem(%p):"
+                               " failed to allocate descriptor\n",
+                               connection);
+                       error = -ENOMEM;
+                       goto error;
+               }
+
+               /* Initialize the structure */
+               desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
+               atomic_set(&desc->ref_count, 1);
+               INIT_LIST_HEAD(&(desc->list));
+
+               atomic_inc(&(connection->shmem_count));
+       } else {
+               /* take the first free shared memory descriptor */
+               desc = list_first_entry(&(connection->free_shmem_list),
+                       struct tf_shmem_desc, list);
+               list_del(&(desc->list));
+       }
+
+       /* Add the descriptor to the used list */
+       list_add(&(desc->list), &(connection->used_shmem_list));
+
+       error = tf_fill_descriptor_table(
+                       &(connection->cpt_alloc_context),
+                       desc,
+                       buffer,
+                       connection->vmas,
+                       descriptors,
+                       buffer_size,
+                       buffer_start_offset,
+                       in_user_space,
+                       flags,
+                       descriptor_count);
+
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_map_shmem(%p):"
+                       " tf_fill_descriptor_table failed with error "
+                       "code %d!\n",
+                       connection,
+                       error);
+               goto error;
+       }
+       desc->client_buffer = (u8 *) buffer;
+
+       /*
+        * Successful completion.
+        */
+       *shmem_desc = desc;
+       mutex_unlock(&(connection->shmem_mutex));
+       dprintk(KERN_DEBUG "tf_map_shmem: success\n");
+       return 0;
+
+
+       /*
+        * Error handling.
+        */
+error:
+       mutex_unlock(&(connection->shmem_mutex));
+       dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
+               error);
+
+       tf_unmap_shmem(
+                       connection,
+                       desc,
+                       0);
+
+       return error;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+       - memory block may end on vm_end
+       - check the full memory block is in the memory area
+       - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
+       unsigned long addr, unsigned long size)
+{
+       struct vm_area_struct *vma = NULL;
+
+       dprintk(KERN_INFO
+               "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
+
+       if (mm) {
+               /* Check the cache first. */
+               /* (Cache hit rate is typically around 35%.) */
+               vma = mm->mmap_cache;
+               if (!(vma && vma->vm_end >= (addr+size) &&
+                               vma->vm_start <= addr)) {
+                       struct rb_node *rb_node;
+
+                       rb_node = mm->mm_rb.rb_node;
+                       vma = NULL;
+
+                       while (rb_node) {
+                               struct vm_area_struct *vma_tmp;
+
+                               vma_tmp = rb_entry(rb_node,
+                                       struct vm_area_struct, vm_rb);
+
+                               dprintk(KERN_INFO
+                                       "vma_tmp->vm_start=0x%lX"
+                                       "vma_tmp->vm_end=0x%lX\n",
+                                       vma_tmp->vm_start,
+                                       vma_tmp->vm_end);
+
+                               if (vma_tmp->vm_end >= (addr+size)) {
+                                       vma = vma_tmp;
+                                       if (vma_tmp->vm_start <= addr)
+                                               break;
+
+                                       rb_node = rb_node->rb_left;
+                               } else {
+                                       rb_node = rb_node->rb_right;
+                               }
+                       }
+
+                       if (vma)
+                               mm->mmap_cache = vma;
+                       if (rb_node == NULL)
+                               vma = NULL;
+               }
+       }
+       return vma;
+}
+
+int tf_validate_shmem_and_flags(
+       u32 shmem,
+       u32 shmem_size,
+       u32 flags)
+{
+       struct vm_area_struct *vma;
+       u32 chunk;
+
+       if (shmem_size == 0)
+               /* This is always valid */
+               return 0;
+
+       if ((shmem + shmem_size) < shmem)
+               /* Overflow */
+               return -EINVAL;
+
+       down_read(&current->mm->mmap_sem);
+
+       /*
+        *  When looking for a memory address, split buffer into chunks of
+        *  size=PAGE_SIZE.
+        */
+       chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
+       if (chunk > shmem_size)
+               chunk = shmem_size;
+
+       do {
+               vma = tf_find_vma(current->mm, shmem, chunk);
+
+               if (vma == NULL) {
+                       dprintk(KERN_ERR "%s: area not found\n", __func__);
+                       goto error;
+               }
+
+               if (flags & TF_SHMEM_TYPE_READ)
+                       if (!(vma->vm_flags & VM_READ)) {
+                               dprintk(KERN_ERR "%s: no read permission\n",
+                                       __func__);
+                               goto error;
+                       }
+               if (flags & TF_SHMEM_TYPE_WRITE)
+                       if (!(vma->vm_flags & VM_WRITE)) {
+                               dprintk(KERN_ERR "%s: no write permission\n",
+                                       __func__);
+                               goto error;
+                       }
+
+               shmem_size -= chunk;
+               shmem += chunk;
+               chunk = (shmem_size <= PAGE_SIZE ?
+                               shmem_size : PAGE_SIZE);
+       } while (shmem_size != 0);
+
+       up_read(&current->mm->mmap_sem);
+       return 0;
+
+error:
+       up_read(&current->mm->mmap_sem);
+       return -EFAULT;
+}
+
+
+static int tf_map_temp_shmem(struct tf_connection *connection,
+        struct tf_command_param_temp_memref *temp_memref,
+        u32 param_type,
+        struct tf_shmem_desc **shmem_desc)
+{
+       u32 flags;
+       u32 error = S_SUCCESS;
+       bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+
+       dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
+               "0x%08x[size=0x%08x], offset=0x%08x)\n",
+               connection,
+               temp_memref->descriptor,
+               temp_memref->size,
+               temp_memref->offset);
+
+       switch (param_type) {
+       case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+               flags = TF_SHMEM_TYPE_READ;
+               break;
+       case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+               flags = TF_SHMEM_TYPE_WRITE;
+               break;
+       case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
+               flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
+               break;
+       default:
+               error = -EINVAL;
+               goto error;
+       }
+
+       if (temp_memref->descriptor == 0) {
+               /* NULL tmpref */
+               temp_memref->offset = 0;
+               *shmem_desc = NULL;
+       } else if ((temp_memref->descriptor != 0) &&
+                       (temp_memref->size == 0)) {
+               /* Empty tmpref */
+               temp_memref->offset = temp_memref->descriptor;
+               temp_memref->descriptor = 0;
+               temp_memref->size = 0;
+               *shmem_desc = NULL;
+       } else {
+               /* Map the temp shmem block */
+
+               u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+               u32 descriptor_count;
+
+               if (in_user_space) {
+                       error = tf_validate_shmem_and_flags(
+                               temp_memref->descriptor,
+                               temp_memref->size,
+                               flags);
+                       if (error != 0)
+                               goto error;
+               }
+
+               error = tf_map_shmem(
+                               connection,
+                               temp_memref->descriptor,
+                               flags,
+                               in_user_space,
+                               shared_mem_descriptors,
+                               &(temp_memref->offset),
+                               temp_memref->size,
+                               shmem_desc,
+                               &descriptor_count);
+               temp_memref->descriptor = shared_mem_descriptors[0];
+        }
+
+error:
+        return error;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void tf_shared_memory_cleanup_list(
+               struct tf_connection *connection,
+               struct list_head *shmem_desc_list)
+{
+       while (!list_empty(shmem_desc_list)) {
+               struct tf_shmem_desc *shmem_desc;
+
+               shmem_desc = list_first_entry(shmem_desc_list,
+                       struct tf_shmem_desc, list);
+
+               tf_unmap_shmem(connection, shmem_desc, 1);
+       }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+static void tf_cleanup_shared_memories(struct tf_connection *connection)
+{
+       /* clean up the list of used and free descriptors.
+        * done outside the mutex, because tf_unmap_shmem already
+        * mutex()ed
+        */
+       tf_shared_memory_cleanup_list(connection,
+               &connection->used_shmem_list);
+       tf_shared_memory_cleanup_list(connection,
+               &connection->free_shmem_list);
+
+       mutex_lock(&(connection->shmem_mutex));
+
+       /* Free the Vmas page */
+       if (connection->vmas) {
+               internal_free_page((unsigned long) connection->vmas);
+               connection->vmas = NULL;
+       }
+
+       tf_release_coarse_page_table_allocator(
+               &(connection->cpt_alloc_context));
+
+       mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int tf_init_shared_memory(struct tf_connection *connection)
+{
+       int error;
+       int i;
+       int coarse_page_index;
+
+       /*
+        * We only need to initialize special elements and attempt to allocate
+        * the minimum shared memory descriptors we want to support
+        */
+
+       mutex_init(&(connection->shmem_mutex));
+       INIT_LIST_HEAD(&(connection->free_shmem_list));
+       INIT_LIST_HEAD(&(connection->used_shmem_list));
+       atomic_set(&(connection->shmem_count), 0);
+
+       tf_init_coarse_page_table_allocator(
+               &(connection->cpt_alloc_context));
+
+
+       /*
+        * Preallocate 3 pages to increase the chances that a connection
+        * succeeds in allocating shared mem
+        */
+       for (i = 0;
+            i < 3;
+            i++) {
+               struct tf_shmem_desc *shmem_desc =
+                       (struct tf_shmem_desc *) internal_kmalloc(
+                               sizeof(*shmem_desc), GFP_KERNEL);
+
+               if (shmem_desc == NULL) {
+                       printk(KERN_ERR "tf_init_shared_memory(%p):"
+                               " failed to pre allocate descriptor %d\n",
+                               connection,
+                               i);
+                       error = -ENOMEM;
+                       goto error;
+               }
+
+               for (coarse_page_index = 0;
+                    coarse_page_index < TF_MAX_COARSE_PAGES;
+                    coarse_page_index++) {
+                       struct tf_coarse_page_table *coarse_pg_table;
+
+                       coarse_pg_table = tf_alloc_coarse_page_table(
+                               &(connection->cpt_alloc_context),
+                               TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+                       if (coarse_pg_table == NULL) {
+                               printk(KERN_ERR "tf_init_shared_memory(%p)"
+                                       ": descriptor %d coarse page %d - "
+                                       "tf_alloc_coarse_page_table() "
+                                       "failed\n",
+                                       connection,
+                                       i,
+                                       coarse_page_index);
+                               error = -ENOMEM;
+                               goto error;
+                       }
+
+                       shmem_desc->coarse_pg_table[coarse_page_index] =
+                               coarse_pg_table;
+               }
+               shmem_desc->coarse_pg_table_count = 0;
+
+               shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+               atomic_set(&shmem_desc->ref_count, 1);
+
+               /*
+                * add this preallocated descriptor to the list of free
+                * descriptors Keep the device context specific one at the
+                * beginning of the list
+                */
+               INIT_LIST_HEAD(&(shmem_desc->list));
+               list_add_tail(&(shmem_desc->list),
+                       &(connection->free_shmem_list));
+       }
+
+       /* allocate memory for the vmas structure */
+       connection->vmas =
+               (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+       if (connection->vmas == NULL) {
+               printk(KERN_ERR "tf_init_shared_memory(%p):"
+                       " vmas - failed to get_zeroed_page\n",
+                       connection);
+               error = -ENOMEM;
+               goto error;
+       }
+
+       return 0;
+
+error:
+       tf_cleanup_shared_memories(connection);
+       return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+       struct tf_connection *connection)
+{
+       union tf_command command;
+       union tf_answer  answer;
+       int error = 0;
+
+       dprintk(KERN_INFO "tf_create_device_context(%p)\n",
+                       connection);
+
+       command.create_device_context.message_type =
+               TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+       command.create_device_context.message_size =
+               (sizeof(struct tf_command_create_device_context)
+                       - sizeof(struct tf_command_header))/sizeof(u32);
+       command.create_device_context.operation_id = (u32) &answer;
+       command.create_device_context.device_context_id = (u32) connection;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               &command,
+               &answer,
+               connection,
+               true);
+
+       if ((error != 0) ||
+               (answer.create_device_context.error_code != S_SUCCESS))
+               goto error;
+
+       /*
+        * CREATE_DEVICE_CONTEXT succeeded,
+        * store device context handler and update connection status
+        */
+       connection->device_context =
+               answer.create_device_context.device_context;
+       spin_lock(&(connection->state_lock));
+       connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+       spin_unlock(&(connection->state_lock));
+
+       /* successful completion */
+       dprintk(KERN_INFO "tf_create_device_context(%p):"
+               " device_context=0x%08x\n",
+               connection,
+               answer.create_device_context.device_context);
+       return 0;
+
+error:
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_create_device_context failed with "
+                       "error %d\n", error);
+       } else {
+               /*
+                * We sent a DeviceCreateContext. The state is now
+                * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+                * reset if we ever want to send a DeviceCreateContext again
+                */
+               spin_lock(&(connection->state_lock));
+               connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+               spin_unlock(&(connection->state_lock));
+               dprintk(KERN_ERR "tf_create_device_context failed with "
+                       "error_code 0x%08X\n",
+                       answer.create_device_context.error_code);
+               if (answer.create_device_context.error_code ==
+                       S_ERROR_OUT_OF_MEMORY)
+                       error = -ENOMEM;
+               else
+                       error = -EFAULT;
+       }
+
+       return error;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool tf_check_gid(gid_t requested_gid)
+{
+       if (requested_gid == current_egid()) {
+               return true;
+       } else {
+               u32    size;
+               u32    i;
+               /* Look in the supplementary GIDs */
+               get_group_info(GROUP_INFO);
+               size = GROUP_INFO->ngroups;
+               for (i = 0; i < size; i++)
+                       if (requested_gid == GROUP_AT(GROUP_INFO , i))
+                               return true;
+       }
+       return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int tf_open_client_session(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+       struct tf_shmem_desc *shmem_desc[4] = {NULL};
+       u32 i;
+
+       dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
+
+       /*
+        * Initialize the message size with no login data. This will be later
+        * adjusted the the cases below
+        */
+       command->open_client_session.message_size =
+               (sizeof(struct tf_command_open_client_session) - 20
+                       - sizeof(struct tf_command_header))/4;
+
+       switch (command->open_client_session.login_type) {
+       case TF_LOGIN_PUBLIC:
+                /* Nothing to do */
+                break;
+
+       case TF_LOGIN_USER:
+               /*
+                * Send the EUID of the calling application in the login data.
+                * Update message size.
+                */
+               *(u32 *) &command->open_client_session.login_data =
+                       current_euid();
+#ifndef CONFIG_ANDROID
+               command->open_client_session.login_type =
+                       (u32) TF_LOGIN_USER_LINUX_EUID;
+#else
+               command->open_client_session.login_type =
+                       (u32) TF_LOGIN_USER_ANDROID_EUID;
+#endif
+
+               /* Added one word */
+               command->open_client_session.message_size += 1;
+               break;
+
+       case TF_LOGIN_GROUP: {
+               /* Check requested GID */
+               gid_t  requested_gid =
+                       *(u32 *) command->open_client_session.login_data;
+
+               if (!tf_check_gid(requested_gid)) {
+                       dprintk(KERN_ERR "tf_open_client_session(%p) "
+                               "TF_LOGIN_GROUP: requested GID (0x%x) does "
+                               "not match real eGID (0x%x)"
+                               "or any of the supplementary GIDs\n",
+                               connection, requested_gid, current_egid());
+                       error = -EACCES;
+                       goto error;
+               }
+#ifndef CONFIG_ANDROID
+               command->open_client_session.login_type =
+                       TF_LOGIN_GROUP_LINUX_GID;
+#else
+               command->open_client_session.login_type =
+                       TF_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+               command->open_client_session.message_size += 1; /* GID */
+               break;
+       }
+
+#ifndef CONFIG_ANDROID
+       case TF_LOGIN_APPLICATION: {
+               /*
+                * Compute SHA-1 hash of the application fully-qualified path
+                * name.  Truncate the hash to 16 bytes and send it as login
+                * data.  Update message size.
+                */
+               u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+               error = tf_hash_application_path_and_data(pSHA1Hash,
+                       NULL, 0);
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               "error in tf_hash_application_path_and_data\n");
+                       goto error;
+               }
+               memcpy(&command->open_client_session.login_data,
+                       pSHA1Hash, 16);
+               command->open_client_session.login_type =
+                       TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+               /* 16 bytes */
+               command->open_client_session.message_size += 4;
+               break;
+       }
+#else
+       case TF_LOGIN_APPLICATION:
+               /*
+                * Send the real UID of the calling application in the login
+                * data. Update message size.
+                */
+               *(u32 *) &command->open_client_session.login_data =
+                       current_uid();
+
+               command->open_client_session.login_type =
+                       (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
+
+               /* Added one word */
+               command->open_client_session.message_size += 1;
+               break;
+#endif
+
+#ifndef CONFIG_ANDROID
+       case TF_LOGIN_APPLICATION_USER: {
+               /*
+                * Compute SHA-1 hash of the concatenation of the application
+                * fully-qualified path name and the EUID of the calling
+                * application.  Truncate the hash to 16 bytes and send it as
+                * login data.  Update message size.
+                */
+               u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+               error = tf_hash_application_path_and_data(pSHA1Hash,
+                       (u8 *) &(current_euid()), sizeof(current_euid()));
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               "error in tf_hash_application_path_and_data\n");
+                       goto error;
+               }
+               memcpy(&command->open_client_session.login_data,
+                       pSHA1Hash, 16);
+               command->open_client_session.login_type =
+                       TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+               /* 16 bytes */
+               command->open_client_session.message_size += 4;
+
+               break;
+       }
+#else
+       case TF_LOGIN_APPLICATION_USER:
+               /*
+                * Send the real UID and the EUID of the calling application in
+                * the login data. Update message size.
+                */
+               *(u32 *) &command->open_client_session.login_data =
+                       current_uid();
+               *(u32 *) &command->open_client_session.login_data[4] =
+                       current_euid();
+
+               command->open_client_session.login_type =
+                       TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+               /* Added two words */
+               command->open_client_session.message_size += 2;
+               break;
+#endif
+
+#ifndef CONFIG_ANDROID
+       case TF_LOGIN_APPLICATION_GROUP: {
+               /*
+                * Check requested GID.  Compute SHA-1 hash of the concatenation
+                * of the application fully-qualified path name and the
+                * requested GID.  Update message size
+                */
+               gid_t  requested_gid;
+               u8     pSHA1Hash[SHA1_DIGEST_SIZE];
+
+               requested_gid = *(u32 *) &command->open_client_session.
+                       login_data;
+
+               if (!tf_check_gid(requested_gid)) {
+                       dprintk(KERN_ERR "tf_open_client_session(%p) "
+                       "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+                       "does not match real eGID (0x%x)"
+                       "or any of the supplementary GIDs\n",
+                       connection, requested_gid, current_egid());
+                       error = -EACCES;
+                       goto error;
+               }
+
+               error = tf_hash_application_path_and_data(pSHA1Hash,
+                       &requested_gid, sizeof(u32));
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               "error in tf_hash_application_path_and_data\n");
+                       goto error;
+               }
+
+               memcpy(&command->open_client_session.login_data,
+                       pSHA1Hash, 16);
+               command->open_client_session.login_type =
+                       TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+               /* 16 bytes */
+               command->open_client_session.message_size += 4;
+               break;
+       }
+#else
+       case TF_LOGIN_APPLICATION_GROUP: {
+               /*
+                * Check requested GID. Send the real UID and the requested GID
+                * in the login data. Update message size.
+                */
+               gid_t requested_gid;
+
+               requested_gid = *(u32 *) &command->open_client_session.
+                       login_data;
+
+               if (!tf_check_gid(requested_gid)) {
+                       dprintk(KERN_ERR "tf_open_client_session(%p) "
+                       "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+                       "does not match real eGID (0x%x)"
+                       "or any of the supplementary GIDs\n",
+                       connection, requested_gid, current_egid());
+                       error = -EACCES;
+                       goto error;
+               }
+
+               *(u32 *) &command->open_client_session.login_data =
+                       current_uid();
+               *(u32 *) &command->open_client_session.login_data[4] =
+                       requested_gid;
+
+               command->open_client_session.login_type =
+                       TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+               /* Added two words */
+               command->open_client_session.message_size += 2;
+
+               break;
+       }
+#endif
+
+       case TF_LOGIN_PRIVILEGED:
+               /* A privileged login may be performed only on behalf of the
+                  kernel itself or on behalf of a process with euid=0 or
+                  egid=0 or euid=system or egid=system. */
+               if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+                       dprintk(KERN_DEBUG "tf_open_client_session: "
+                               "TF_LOGIN_PRIVILEGED for kernel API\n");
+               } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
+                          (current_egid() != TF_PRIVILEGED_UID_GID) &&
+                          (current_euid() != 0) && (current_egid() != 0)) {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               " user %d, group %d not allowed to open "
+                               "session with TF_LOGIN_PRIVILEGED\n",
+                               current_euid(), current_egid());
+                       error = -EACCES;
+                       goto error;
+               } else {
+                       dprintk(KERN_DEBUG "tf_open_client_session: "
+                               "TF_LOGIN_PRIVILEGED for %u:%u\n",
+                               current_euid(), current_egid());
+               }
+               command->open_client_session.login_type =
+                       TF_LOGIN_PRIVILEGED;
+               break;
+
+       case TF_LOGIN_AUTHENTICATION: {
+               /*
+                * Compute SHA-1 hash of the application binary
+                * Send this hash as the login data (20 bytes)
+                */
+
+               u8 *hash;
+               hash = &(command->open_client_session.login_data[0]);
+
+               error = tf_get_current_process_hash(hash);
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               "error in tf_get_current_process_hash\n");
+                       goto error;
+               }
+               command->open_client_session.login_type =
+                       TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+               /* 20 bytes */
+               command->open_client_session.message_size += 5;
+               break;
+       }
+
+       case TF_LOGIN_PRIVILEGED_KERNEL:
+               /* A kernel login may be performed only on behalf of the
+                  kernel itself. */
+               if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+                       dprintk(KERN_DEBUG "tf_open_client_session: "
+                               "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
+                       command->open_client_session.login_type =
+                               TF_LOGIN_PRIVILEGED_KERNEL;
+               } else {
+                       dprintk(KERN_ERR "tf_open_client_session: "
+                               " user %d, group %d not allowed to open "
+                               "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
+                               current_euid(), current_egid());
+                       error = -EACCES;
+                       goto error;
+               }
+               command->open_client_session.login_type =
+                       TF_LOGIN_PRIVILEGED_KERNEL;
+               break;
+
+       default:
+                dprintk(KERN_ERR "tf_open_client_session: "
+                       "unknown login_type(%08X)\n",
+                       command->open_client_session.login_type);
+                error = -EOPNOTSUPP;
+                goto error;
+       }
+
+       /* Map the temporary memory references */
+       for (i = 0; i < 4; i++) {
+               int param_type;
+               param_type = TF_GET_PARAM_TYPE(
+                       command->open_client_session.param_types, i);
+               if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+                                  TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+                               == TF_PARAM_TYPE_MEMREF_FLAG) {
+                       /* Map temp mem ref */
+                       error = tf_map_temp_shmem(connection,
+                               &command->open_client_session.
+                                       params[i].temp_memref,
+                               param_type,
+                               &shmem_desc[i]);
+                       if (error != 0) {
+                               dprintk(KERN_ERR "tf_open_client_session: "
+                                       "unable to map temporary memory block "
+                                       "(%08X)\n", error);
+                               goto error;
+                       }
+               }
+       }
+
+       /* Fill the handle of the Device Context */
+       command->open_client_session.device_context =
+               connection->device_context;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               command,
+               answer,
+               connection,
+               true);
+
+error:
+       /* Unmap the temporary memory references */
+       for (i = 0; i < 4; i++)
+               if (shmem_desc[i] != NULL)
+                       tf_unmap_shmem(connection, shmem_desc[i], 0);
+
+       if (error != 0)
+               dprintk(KERN_ERR "tf_open_client_session returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_open_client_session returns "
+                       "error_code 0x%08X\n",
+                       answer->open_client_session.error_code);
+
+       return error;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int tf_close_client_session(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+
+       dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
+
+       command->close_client_session.message_size =
+               (sizeof(struct tf_command_close_client_session) -
+                       sizeof(struct tf_command_header)) / 4;
+       command->close_client_session.device_context =
+               connection->device_context;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               command,
+               answer,
+               connection,
+               true);
+
+       if (error != 0)
+               dprintk(KERN_ERR "tf_close_client_session returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_close_client_session returns "
+                       "error 0x%08X\n",
+                       answer->close_client_session.error_code);
+
+       return error;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int tf_register_shared_memory(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+       struct tf_shmem_desc *shmem_desc = NULL;
+       bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+       struct tf_command_register_shared_memory *msg =
+               &command->register_shared_memory;
+
+       dprintk(KERN_INFO "tf_register_shared_memory(%p) "
+               "%p[0x%08X][0x%08x]\n",
+               connection,
+               (void *)msg->shared_mem_descriptors[0],
+               msg->shared_mem_size,
+               (u32)msg->memory_flags);
+
+       if (in_user_space) {
+               error = tf_validate_shmem_and_flags(
+                       msg->shared_mem_descriptors[0],
+                       msg->shared_mem_size,
+                       (u32)msg->memory_flags);
+               if (error != 0)
+                       goto error;
+       }
+
+       /* Initialize message_size with no descriptors */
+       msg->message_size
+               = (offsetof(struct tf_command_register_shared_memory,
+                                               shared_mem_descriptors) -
+                       sizeof(struct tf_command_header)) / 4;
+
+       /* Map the shmem block and update the message */
+       if (msg->shared_mem_size == 0) {
+               /* Empty shared mem */
+               msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
+       } else {
+               u32 descriptor_count;
+               error = tf_map_shmem(
+                       connection,
+                       msg->shared_mem_descriptors[0],
+                       msg->memory_flags,
+                       in_user_space,
+                       msg->shared_mem_descriptors,
+                       &(msg->shared_mem_start_offset),
+                       msg->shared_mem_size,
+                       &shmem_desc,
+                       &descriptor_count);
+               if (error != 0) {
+                       dprintk(KERN_ERR "tf_register_shared_memory: "
+                               "unable to map shared memory block\n");
+                       goto error;
+               }
+               msg->message_size += descriptor_count;
+       }
+
+       /*
+        * write the correct device context handle and the address of the shared
+        * memory descriptor in the message
+        */
+       msg->device_context = connection->device_context;
+       msg->block_id = (u32)shmem_desc;
+
+       /* Send the updated message */
+       error = tf_send_receive(
+               &connection->dev->sm,
+               command,
+               answer,
+               connection,
+               true);
+
+       if ((error != 0) ||
+               (answer->register_shared_memory.error_code
+                       != S_SUCCESS)) {
+               dprintk(KERN_ERR "tf_register_shared_memory: "
+                       "operation failed. Unmap block\n");
+               goto error;
+       }
+
+       /* Saves the block handle returned by the secure world */
+       if (shmem_desc != NULL)
+               shmem_desc->block_identifier =
+                       answer->register_shared_memory.block;
+
+       /* successful completion */
+       dprintk(KERN_INFO "tf_register_shared_memory(%p):"
+               " block_id=0x%08x block=0x%08x\n",
+               connection, msg->block_id,
+               answer->register_shared_memory.block);
+       return 0;
+
+       /* error completion */
+error:
+       tf_unmap_shmem(
+               connection,
+               shmem_desc,
+               0);
+
+       if (error != 0)
+               dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_register_shared_memory returns "
+                       "error_code 0x%08X\n",
+                       answer->register_shared_memory.error_code);
+
+       return error;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int tf_release_shared_memory(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+
+       dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
+
+       command->release_shared_memory.message_size =
+               (sizeof(struct tf_command_release_shared_memory) -
+                       sizeof(struct tf_command_header)) / 4;
+       command->release_shared_memory.device_context =
+               connection->device_context;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               command,
+               answer,
+               connection,
+               true);
+
+       if ((error != 0) ||
+               (answer->release_shared_memory.error_code != S_SUCCESS))
+               goto error;
+
+       /* Use block_id to get back the pointer to shmem_desc */
+       tf_unmap_shmem(
+               connection,
+               (struct tf_shmem_desc *)
+                       answer->release_shared_memory.block_id,
+               0);
+
+       /* successful completion */
+       dprintk(KERN_INFO "tf_release_shared_memory(%p):"
+               " block_id=0x%08x block=0x%08x\n",
+               connection, answer->release_shared_memory.block_id,
+               command->release_shared_memory.block);
+       return 0;
+
+
+error:
+       if (error != 0)
+               dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_release_shared_memory returns "
+                       "nChannelStatus 0x%08X\n",
+                       answer->release_shared_memory.error_code);
+
+       return error;
+
+}
+
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int tf_invoke_client_command(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+       struct tf_shmem_desc *shmem_desc[4] = {NULL};
+       int i;
+
+       dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
+
+       command->release_shared_memory.message_size =
+               (sizeof(struct tf_command_invoke_client_command) -
+                       sizeof(struct tf_command_header)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+       error = tf_crypto_try_shortcuted_update(connection,
+               (struct tf_command_invoke_client_command *) command,
+               (struct tf_answer_invoke_client_command *) answer);
+       if (error == 0)
+               return error;
+#endif
+
+       /* Map the tmprefs */
+       for (i = 0; i < 4; i++) {
+               int param_type = TF_GET_PARAM_TYPE(
+                       command->invoke_client_command.param_types, i);
+               if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+                                  TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+                               == TF_PARAM_TYPE_MEMREF_FLAG) {
+                       /* A temporary memref: map it */
+                       error = tf_map_temp_shmem(connection,
+                                       &command->invoke_client_command.
+                                               params[i].temp_memref,
+                                       param_type, &shmem_desc[i]);
+                       if (error != 0) {
+                               dprintk(KERN_ERR
+                                       "tf_invoke_client_command: "
+                                       "unable to map temporary memory "
+                                       "block\n (%08X)", error);
+                               goto error;
+                       }
+               }
+       }
+
+       command->invoke_client_command.device_context =
+               connection->device_context;
+
+       error = tf_send_receive(&connection->dev->sm, command,
+               answer, connection, true);
+
+error:
+       /* Unmap de temp mem refs */
+       for (i = 0; i < 4; i++) {
+               if (shmem_desc[i] != NULL) {
+                       dprintk(KERN_INFO "tf_invoke_client_command: "
+                               "UnMatemp_memref %d\n ", i);
+                       tf_unmap_shmem(connection, shmem_desc[i], 0);
+               }
+       }
+
+       if (error != 0)
+               dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_invoke_client_command returns "
+                       "error_code 0x%08X\n",
+                       answer->invoke_client_command.error_code);
+
+       return error;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int tf_cancel_client_command(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer)
+{
+       int error = 0;
+
+       dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
+
+       command->cancel_client_operation.device_context =
+               connection->device_context;
+       command->cancel_client_operation.message_size =
+               (sizeof(struct tf_command_cancel_client_operation) -
+                       sizeof(struct tf_command_header)) / 4;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               command,
+               answer,
+               connection,
+               true);
+
+       if ((error != 0) ||
+               (answer->cancel_client_operation.error_code != S_SUCCESS))
+               goto error;
+
+
+       /* successful completion */
+       return 0;
+
+error:
+       if (error != 0)
+               dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
+                       error);
+       else
+               dprintk(KERN_ERR "tf_cancel_client_command returns "
+                       "nChannelStatus 0x%08X\n",
+                       answer->cancel_client_operation.error_code);
+
+       return error;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int tf_destroy_device_context(
+       struct tf_connection *connection)
+{
+       int error;
+       /*
+        * AFY: better use the specialized tf_command_destroy_device_context
+        * structure: this will save stack
+        */
+       union tf_command command;
+       union tf_answer answer;
+
+       dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
+
+       BUG_ON(connection == NULL);
+
+       command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+       command.header.message_size =
+               (sizeof(struct tf_command_destroy_device_context) -
+                       sizeof(struct tf_command_header))/sizeof(u32);
+
+       /*
+        * fill in the device context handler
+        * it is guarantied that the first shared memory descriptor describes
+        * the device context
+        */
+       command.destroy_device_context.device_context =
+               connection->device_context;
+
+       error = tf_send_receive(
+               &connection->dev->sm,
+               &command,
+               &answer,
+               connection,
+               false);
+
+       if ((error != 0) ||
+               (answer.destroy_device_context.error_code != S_SUCCESS))
+               goto error;
+
+       spin_lock(&(connection->state_lock));
+       connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+       spin_unlock(&(connection->state_lock));
+
+       /* successful completion */
+       dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
+               connection);
+       return 0;
+
+error:
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_destroy_device_context failed with "
+                       "error %d\n", error);
+       } else {
+               dprintk(KERN_ERR "tf_destroy_device_context failed with "
+                       "error_code 0x%08X\n",
+                       answer.destroy_device_context.error_code);
+               if (answer.destroy_device_context.error_code ==
+                       S_ERROR_OUT_OF_MEMORY)
+                       error = -ENOMEM;
+               else
+                       error = -EFAULT;
+       }
+
+       return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by connection is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_open(struct tf_device *dev,
+       struct file *file,
+       struct tf_connection **connection)
+{
+       int error;
+       struct tf_connection *conn = NULL;
+
+       dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
+
+       /*
+        * Allocate and initialize the conn.
+        * kmalloc only allocates sizeof(*conn) virtual memory
+        */
+       conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
+               GFP_KERNEL);
+       if (conn == NULL) {
+               printk(KERN_ERR "tf_open(): "
+                       "Out of memory for conn!\n");
+               error = -ENOMEM;
+               goto error;
+       }
+
+       memset(conn, 0, sizeof(*conn));
+
+       conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+       conn->dev = dev;
+       spin_lock_init(&(conn->state_lock));
+       atomic_set(&(conn->pending_op_count), 0);
+       INIT_LIST_HEAD(&(conn->list));
+
+       /*
+        * Initialize the shared memory
+        */
+       error = tf_init_shared_memory(conn);
+       if (error != 0)
+               goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+       /*
+        * Initialize CUS specifics
+        */
+       tf_crypto_init_cus(conn);
+#endif
+
+       /*
+        * Attach the conn to the device.
+        */
+       spin_lock(&(dev->connection_list_lock));
+       list_add(&(conn->list), &(dev->connection_list));
+       spin_unlock(&(dev->connection_list_lock));
+
+       /*
+        * Successful completion.
+        */
+
+       *connection = conn;
+
+       dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
+       return 0;
+
+       /*
+        * Error handling.
+        */
+
+error:
+       dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
+       /* Deallocate the descriptor pages if necessary */
+       internal_kfree(conn);
+       *connection = NULL;
+       return error;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection has been destroyed and cannot be used anymore.
+ *
+ * This function does nothing if connection is set to NULL.
+ */
+void tf_close(struct tf_connection *connection)
+{
+       int error;
+       enum TF_CONN_STATE state;
+
+       dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
+
+       if (connection == NULL)
+               return;
+
+       /*
+        * Assumption: Linux guarantees that no other operation is in progress
+        * and that no other operation will be started when close is called
+        */
+       BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
+
+       /*
+        * Exchange a Destroy Device Context message if needed.
+        */
+       spin_lock(&(connection->state_lock));
+       state = connection->state;
+       spin_unlock(&(connection->state_lock));
+       if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
+               /*
+                * A DestroyDeviceContext operation was not performed. Do it
+                * now.
+                */
+               error = tf_destroy_device_context(connection);
+               if (error != 0)
+                       /* avoid cleanup if destroy device context fails */
+                       goto error;
+       }
+
+       /*
+        * Clean up the shared memory
+        */
+       tf_cleanup_shared_memories(connection);
+
+       spin_lock(&(connection->dev->connection_list_lock));
+       list_del(&(connection->list));
+       spin_unlock(&(connection->dev->connection_list_lock));
+
+       internal_kfree(connection);
+
+       return;
+
+error:
+       dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
+               connection, error);
+}
+
diff --git a/security/tf_driver/tf_conn.h b/security/tf_driver/tf_conn.h
new file mode 100644 (file)
index 0000000..8bed16f
--- /dev/null
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_CONN_H__
+#define __TF_CONN_H__
+
+#include "tf_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct tf_connection *tf_conn_from_file(
+       struct file *file)
+{
+       return file->private_data;
+}
+
+int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
+
+int tf_map_shmem(
+               struct tf_connection *connection,
+               u32 buffer,
+               /* flags for read-write access rights on the memory */
+               u32 flags,
+               bool in_user_space,
+               u32 descriptors[TF_MAX_COARSE_PAGES],
+               u32 *buffer_start_offset,
+               u32 buffer_size,
+               struct tf_shmem_desc **shmem_desc,
+               u32 *descriptor_count);
+
+void tf_unmap_shmem(
+               struct tf_connection *connection,
+               struct tf_shmem_desc *shmem_desc,
+               u32 full_cleanup);
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+        struct tf_connection *connection);
+
+int tf_destroy_device_context(
+       struct tf_connection *connection);
+
+int tf_open_client_session(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+int tf_close_client_session(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+int tf_register_shared_memory(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+int tf_release_shared_memory(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+int tf_invoke_client_command(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+int tf_cancel_client_command(
+       struct tf_connection *connection,
+       union tf_command *command,
+       union tf_answer *answer);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int tf_open(struct tf_device *dev,
+       struct file *file,
+       struct tf_connection **connection);
+
+void tf_close(
+       struct tf_connection *connection);
+
+
+#endif  /* !defined(__TF_CONN_H__) */
diff --git a/security/tf_driver/tf_defs.h b/security/tf_driver/tf_defs.h
new file mode 100644 (file)
index 0000000..ac20937
--- /dev/null
@@ -0,0 +1,538 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_DEFS_H__
+#define __TF_DEFS_H__
+
+#include <linux/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define TF_SHMEM_MAX_COUNT   (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ *    The descriptor describes a registered shared memory.
+ *    Its coarse pages are preallocated when initializing the
+ *    connection
+ * TF_SHMEM_TYPE_REGISTERED_SHMEM :
+ *    The descriptor describes a registered shared memory.
+ *    Its coarse pages are not preallocated
+ * TF_SHMEM_TYPE_PM_HIBERNATE :
+ *    The descriptor describes a power management shared memory.
+ */
+enum TF_SHMEM_TYPE {
+       TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+       TF_SHMEM_TYPE_REGISTERED_SHMEM,
+       TF_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct tf_coarse_page_table {
+       /*
+        * Identifies the coarse page table descriptor in
+        * free_coarse_page_tables list
+        */
+       struct list_head list;
+
+       /*
+        * The address of the coarse page table
+        */
+       u32 *descriptors;
+
+       /*
+        * The address of the array containing this coarse page table
+        */
+       struct tf_coarse_page_table_array *parent;
+};
+
+
+#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL       0
+#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct tf_coarse_page_table_array {
+       /*
+        * identifies the element in the coarse_page_table_arrays list
+        */
+       struct list_head list;
+
+       /*
+        * Type of page descriptor
+        * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
+        */
+       u32 type;
+
+       struct tf_coarse_page_table coarse_page_tables[4];
+
+       /*
+        * A counter of the number of coarse pages currently used
+        * the max value should be 4 (one coarse page table is 1KB while one
+        * page is 4KB)
+        */
+       u8 ref_count;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct tf_coarse_page_table_allocation_context {
+       /*
+        * The spin lock protecting concurrent access to the structure.
+        */
+       spinlock_t lock;
+
+       /*
+        * The list of allocated coarse page table arrays
+        */
+       struct list_head coarse_page_table_arrays;
+
+       /*
+        * The list of free coarse page tables
+        */
+       struct list_head free_coarse_page_tables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct tf_shmem_desc {
+       /*
+        * Identifies the shared memory descriptor in the list of free shared
+        * memory descriptors
+        */
+       struct list_head list;
+
+       /*
+        * Identifies the type of shared memory descriptor
+        */
+       enum TF_SHMEM_TYPE type;
+
+       /*
+        * The identifier of the block of shared memory, as returned by the
+        * Secure World.
+        * This identifier is block field of a REGISTER_SHARED_MEMORY answer
+        */
+       u32 block_identifier;
+
+       /* Client buffer */
+       u8 *client_buffer;
+
+       /* Up to eight coarse page table context */
+       struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
+
+       u32 coarse_pg_table_count;
+
+       /* Reference counter */
+       atomic_t ref_count;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct tf_comm {
+       /*
+        * The spin lock protecting concurrent access to the structure.
+        */
+       spinlock_t lock;
+
+       /*
+        * Bit vector with the following possible flags:
+        *    - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+        *      the IRQ has been successfuly requested.
+        *    - TF_COMM_FLAG_TERMINATING: If set, indicates that the
+        *      communication with the Secure World is being terminated.
+        *      Transmissions to the Secure World are not permitted
+        *    - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+        *      W3B buffer has been allocated.
+        *
+        * This bit vector must be accessed with the kernel's atomic bitwise
+        * operations.
+        */
+       unsigned long flags;
+
+       /*
+        * The virtual address of the L1 shared buffer.
+        */
+       struct tf_l1_shared_buffer *l1_buffer;
+
+       /*
+        * The wait queue the client threads are waiting on.
+        */
+       wait_queue_head_t wait_queue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+       /*
+        * The interrupt line used by the Secure World.
+        */
+       int soft_int_irq;
+
+       /* ----- W3B ----- */
+       /* shared memory descriptor to identify the W3B */
+       struct tf_shmem_desc w3b_shmem_desc;
+
+       /* Virtual address of the kernel allocated shared memory */
+       u32 w3b;
+
+       /* offset of data in shared memory coarse pages */
+       u32 w3b_shmem_offset;
+
+       u32 w3b_shmem_size;
+
+       struct tf_coarse_page_table_allocation_context
+               w3b_cpt_alloc_context;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+       /*
+        * The SE SDP can only be initialized once...
+        */
+       int se_initialized;
+
+       /*
+        * Lock to be held by a client when executing an RPC
+        */
+       struct mutex rpc_mutex;
+
+       /*
+        * Lock to protect concurrent accesses to DMA channels
+        */
+       struct mutex dma_mutex;
+#endif
+};
+
+
+#define TF_COMM_FLAG_IRQ_REQUESTED             (0)
+#define TF_COMM_FLAG_PA_AVAILABLE              (1)
+#define TF_COMM_FLAG_TERMINATING               (2)
+#define TF_COMM_FLAG_W3B_ALLOCATED             (3)
+#define TF_COMM_FLAG_L1_SHARED_ALLOCATED       (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct tf_device_stats {
+       atomic_t stat_pages_allocated;
+       atomic_t stat_memories_allocated;
+       atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_tf_dev
+
+ */
+struct tf_device {
+       /*
+        * The kernel object for the device
+        */
+       struct kobject kobj;
+
+       /*
+        * The device number for the device.
+        */
+       dev_t dev_number;
+
+       /*
+        * Interfaces the char device with the kernel.
+        */
+       struct cdev cdev;
+
+#ifdef CONFIG_TF_TEEC
+       struct cdev cdev_teec;
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+       struct cdev cdev_ctrl;
+
+       /*
+        * Globals for CUS
+        */
+       /* Current key handles loaded in HWAs */
+       u32 aes1_key_context;
+       u32 des_key_context;
+       bool sham1_is_public;
+
+       /* Object used to serialize HWA accesses */
+       struct semaphore aes1_sema;
+       struct semaphore des_sema;
+       struct semaphore sha_sema;
+
+       /*
+        * An aligned and correctly shaped pre-allocated buffer used for DMA
+        * transfers
+        */
+       u32 dma_buffer_length;
+       u8 *dma_buffer;
+       dma_addr_t dma_buffer_phys;
+
+       /* Workspace allocated at boot time and reserved to the Secure World */
+       u32 workspace_addr;
+       u32 workspace_size;
+
+       /*
+       * A Mutex to provide exclusive locking of the ioctl()
+       */
+       struct mutex dev_mutex;
+#endif
+
+       /*
+        * Communications with the SM.
+        */
+       struct tf_comm sm;
+
+       /*
+        * Lists the connections attached to this device.  A connection is
+        * created each time a user space application "opens" a file descriptor
+        * on the driver
+        */
+       struct list_head connection_list;
+
+       /*
+        * The spin lock used to protect concurrent access to the connection
+        * list.
+        */
+       spinlock_t connection_list_lock;
+
+       struct tf_device_stats stats;
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * TF_CONN_STATE_NO_DEVICE_CONTEXT :
+ *    The connection has no DEVICE_CONTEXT created and no
+ *    CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ *    The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ *    World
+ * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
+ *    The connection has a DEVICE_CONTEXT created and no
+ *    DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ *    The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ *    World
+ */
+enum TF_CONN_STATE {
+       TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+       TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+       TF_CONN_STATE_VALID_DEVICE_CONTEXT,
+       TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ *  This type describes the  status of the command.
+ *
+ *  PENDING:
+ *     The initial state; the command has not been sent yet.
+ *     SENT:
+ *     The command has been sent, we are waiting for an answer.
+ *     ABORTED:
+ *     The command cannot be sent because the device context is invalid.
+ *     Note that this only covers the case where some other thread
+ *     sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum TF_COMMAND_STATE {
+       TF_COMMAND_STATE_PENDING = 0,
+       TF_COMMAND_STATE_SENT,
+       TF_COMMAND_STATE_ABORTED
+};
+
+/*
+ * The origin of connection parameters such as login data and
+ * memory reference pointers.
+ *
+ * PROCESS: the calling process. All arguments must be validated.
+ * KERNEL: kernel code. All arguments can be trusted by this driver.
+ */
+enum TF_CONNECTION_OWNER {
+       TF_CONNECTION_OWNER_PROCESS = 0,
+       TF_CONNECTION_OWNER_KERNEL,
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct tf_connection {
+       /*
+        * Identifies the connection in the list of the connections attached to
+        * the same device.
+        */
+       struct list_head list;
+
+       /*
+        * State of the connection.
+        */
+       enum TF_CONN_STATE state;
+
+       /*
+        * A pointer to the corresponding device structure
+        */
+       struct tf_device *dev;
+
+       /*
+        * A spinlock to use to access state
+        */
+       spinlock_t state_lock;
+
+       /*
+        * Counts the number of operations currently pending on the connection.
+        * (for debug only)
+        */
+       atomic_t pending_op_count;
+
+       /*
+        * A handle for the device context
+        */
+        u32 device_context;
+
+       /*
+        * Lists the used shared memory descriptors
+        */
+       struct list_head used_shmem_list;
+
+       /*
+        * Lists the free shared memory descriptors
+        */
+       struct list_head free_shmem_list;
+
+       /*
+        * A mutex to use to access this structure
+        */
+       struct mutex shmem_mutex;
+
+       /*
+        * Counts the number of shared memories registered.
+        */
+       atomic_t shmem_count;
+
+       /*
+        * Page to retrieve memory properties when
+        * registering shared memory through REGISTER_SHARED_MEMORY
+        * messages
+        */
+       struct vm_area_struct **vmas;
+
+       /*
+        * coarse page table allocation context
+        */
+       struct tf_coarse_page_table_allocation_context cpt_alloc_context;
+
+       /* The origin of connection parameters such as login data and
+          memory reference pointers. */
+       enum TF_CONNECTION_OWNER owner;
+
+#ifdef CONFIG_TF_ZEBRA
+       /* Lists all the Cryptoki Update Shortcuts */
+       struct list_head shortcut_list;
+
+       /* Lock to protect concurrent accesses to shortcut_list */
+       spinlock_t shortcut_list_lock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The operation_id field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct tf_answer_struct {
+       bool answer_copied;
+       union tf_answer *answer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define TF_DEVICE_BASE_NAME    "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define TF_DEVICE_MINOR_NUMBER (0)
+
+struct tf_device *tf_get_device(void);
+
+#define CLEAN_CACHE_CFG_MASK   (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO             get_current_groups()
+#else
+#define GROUP_INFO             (current->group_info)
+#endif
+
+#endif  /* !defined(__TF_DEFS_H__) */
diff --git a/security/tf_driver/tf_device.c b/security/tf_driver/tf_device.c
new file mode 100644 (file)
index 0000000..47ff39e
--- /dev/null
@@ -0,0 +1,827 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "tf_zebra.h"
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+#include "tf_crypto.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_device_register(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int tf_device_open(
+               struct inode *inode,
+               struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int tf_device_release(
+               struct inode *inode,
+               struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long tf_device_ioctl(
+               struct file *file,
+               unsigned int ioctl_num,
+               unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static int tf_device_shutdown(
+               struct sys_device *sysdev);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int tf_device_suspend(
+               struct sys_device *sysdev,
+               pm_message_t state);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static int tf_device_resume(
+               struct sys_device *sysdev);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+       "The device major number used to register a unique character "
+       "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+       "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+unsigned tf_debug_level = UINT_MAX;
+module_param_named(debug, tf_debug_level, uint, 0644);
+#endif
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+char *tf_integrity_hmac_sha256_expected_value;
+module_param_named(hmac_sha256, tf_integrity_hmac_sha256_expected_value,
+                  charp, 0444);
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+unsigned tf_fault_injection_mask;
+module_param_named(fault, tf_fault_injection_mask, uint, 0644);
+#endif
+
+int tf_self_test_blkcipher_align;
+module_param_named(post_align, tf_self_test_blkcipher_align, int, 0644);
+int tf_self_test_blkcipher_use_vmalloc;
+module_param_named(post_vmalloc, tf_self_test_blkcipher_use_vmalloc, int, 0644);
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*
+ * Interfaces the system device with the kernel.
+ */
+struct sys_device g_tf_sysdev;
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_tf_device_file_ops = {
+       .owner = THIS_MODULE,
+       .open = tf_device_open,
+       .release = tf_device_release,
+       .unlocked_ioctl = tf_device_ioctl,
+       .llseek = no_llseek,
+};
+
+
+static struct sysdev_class g_tf_device_sys_class = {
+       .name = TF_DEVICE_BASE_NAME,
+       .shutdown = tf_device_shutdown,
+       .suspend = tf_device_suspend,
+       .resume = tf_device_resume,
+};
+
+/* The single device supported by this driver */
+static struct tf_device g_tf_dev;
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct tf_device *tf_get_device(void)
+{
+       return &g_tf_dev;
+}
+
+/*
+ * sysfs entries
+ */
+struct tf_sysfs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct tf_device *, char *);
+       ssize_t (*store)(struct tf_device *, const char *, size_t);
+};
+
+/*
+ * sysfs entry showing allocation stats
+ */
+static ssize_t info_show(struct tf_device *dev, char *buf)
+{
+       struct tf_device_stats *dev_stats = &dev->stats;
+
+       return snprintf(buf, PAGE_SIZE,
+               "stat.memories.allocated: %d\n"
+               "stat.pages.allocated:    %d\n"
+               "stat.pages.locked:       %d\n",
+               atomic_read(&dev_stats->stat_memories_allocated),
+               atomic_read(&dev_stats->stat_pages_allocated),
+               atomic_read(&dev_stats->stat_pages_locked));
+}
+static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
+
+#ifdef CONFIG_TF_ZEBRA
+/*
+ * sysfs entry showing whether secure world is up and running
+ */
+static ssize_t tf_started_show(struct tf_device *dev, char *buf)
+{
+       int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
+               &dev->sm.flags);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
+}
+static struct tf_sysfs_entry tf_started_entry =
+       __ATTR_RO(tf_started);
+
+static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
+}
+static struct tf_sysfs_entry tf_workspace_addr_entry =
+       __ATTR_RO(workspace_addr);
+
+static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
+}
+static struct tf_sysfs_entry tf_workspace_size_entry =
+       __ATTR_RO(workspace_size);
+#endif
+
+static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
+       char *page)
+{
+       struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+               attr);
+       struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(dev, page);
+}
+
+static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
+       const char *page, size_t length)
+{
+       struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+               attr);
+       struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(dev, page, length);
+}
+
+static void tf_kobj_release(struct kobject *kobj) {}
+
+static struct attribute *tf_default_attrs[] = {
+       &tf_info_entry.attr,
+#ifdef CONFIG_TF_ZEBRA
+       &tf_started_entry.attr,
+       &tf_workspace_addr_entry.attr,
+       &tf_workspace_size_entry.attr,
+#endif
+       NULL,
+};
+static const struct sysfs_ops tf_sysfs_ops = {
+       .show   = tf_attr_show,
+       .store  = tf_attr_store,
+};
+static struct kobj_type tf_ktype = {
+       .release        = tf_kobj_release,
+       .sysfs_ops      = &tf_sysfs_ops,
+       .default_attrs  = tf_default_attrs
+};
+
+/*----------------------------------------------------------------------------*/
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+static char *smc_mem;
+module_param(smc_mem, charp, S_IRUGO);
+#endif
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init tf_device_register(void)
+{
+       int error;
+       struct tf_device *dev = &g_tf_dev;
+
+       dprintk(KERN_INFO "tf_device_register()\n");
+
+       /*
+        * Initialize the device
+        */
+       dev->dev_number = MKDEV(device_major_number,
+               TF_DEVICE_MINOR_NUMBER);
+       cdev_init(&dev->cdev, &g_tf_device_file_ops);
+       dev->cdev.owner = THIS_MODULE;
+
+       g_tf_sysdev.id = 0;
+       g_tf_sysdev.cls = &g_tf_device_sys_class;
+
+       INIT_LIST_HEAD(&dev->connection_list);
+       spin_lock_init(&dev->connection_list_lock);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+       error = (*tf_comm_early_init)();
+       if (error)
+               goto module_early_init_failed;
+
+       error = tf_device_mshield_init(smc_mem);
+       if (error)
+               goto mshield_init_failed;
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+       error = tf_crypto_hmac_module_init();
+       if (error)
+               goto hmac_init_failed;
+
+       error = tf_self_test_register_device();
+       if (error)
+               goto self_test_register_device_failed;
+#endif
+#endif
+
+       /* register the sysfs object driver stats */
+       error = kobject_init_and_add(&dev->kobj,  &tf_ktype, NULL, "%s",
+                TF_DEVICE_BASE_NAME);
+       if (error) {
+               printk(KERN_ERR "tf_device_register(): "
+                       "kobject_init_and_add failed (error %d)!\n", error);
+               kobject_put(&dev->kobj);
+               goto kobject_init_and_add_failed;
+       }
+
+       /*
+        * Register the system device.
+        */
+
+       error = sysdev_class_register(&g_tf_device_sys_class);
+       if (error != 0) {
+               printk(KERN_ERR "tf_device_register():"
+                       " sysdev_class_register failed (error %d)!\n",
+                       error);
+               goto sysdev_class_register_failed;
+       }
+
+       error = sysdev_register(&g_tf_sysdev);
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_device_register(): "
+                       "sysdev_register failed (error %d)!\n",
+                       error);
+               goto sysdev_register_failed;
+       }
+
+       /*
+        * Register the char device.
+        */
+       printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+               TF_DEVICE_BASE_NAME,
+               MAJOR(dev->dev_number),
+               MINOR(dev->dev_number));
+       error = register_chrdev_region(dev->dev_number, 1,
+               TF_DEVICE_BASE_NAME);
+       if (error != 0) {
+               printk(KERN_ERR "tf_device_register():"
+                       " register_chrdev_region failed (error %d)!\n",
+                       error);
+               goto register_chrdev_region_failed;
+       }
+
+       error = cdev_add(&dev->cdev, dev->dev_number, 1);
+       if (error != 0) {
+               printk(KERN_ERR "tf_device_register(): "
+                       "cdev_add failed (error %d)!\n",
+                       error);
+               goto cdev_add_failed;
+       }
+
+       /*
+        * Initialize the communication with the Secure World.
+        */
+#ifdef CONFIG_TF_TRUSTZONE
+       dev->sm.soft_int_irq = soft_interrupt;
+#endif
+       error = tf_init(&g_tf_dev.sm);
+       if (error != S_SUCCESS) {
+               dprintk(KERN_ERR "tf_device_register(): "
+                       "tf_init failed (error %d)!\n",
+                       error);
+               goto init_failed;
+       }
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+       error = tf_self_test_post_init(&(dev_stats->kobj));
+       /* N.B. error > 0 indicates a POST failure, which will not
+          prevent the module from loading. */
+       if (error < 0) {
+               dprintk(KERN_ERR "tf_device_register(): "
+                       "tf_self_test_post_vectors failed (error %d)!\n",
+                       error);
+               goto post_failed;
+       }
+#endif
+
+#ifdef CONFIG_ANDROID
+       tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
+       device_create(tf_class, NULL,
+               dev->dev_number,
+               NULL, TF_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+       /*
+        * Initializes the /dev/tf_ctrl device node.
+        */
+       error = tf_ctrl_device_register();
+       if (error)
+               goto ctrl_failed;
+#endif
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+       address_cache_property((unsigned long) &tf_device_register);
+#endif
+       /*
+        * Successful completion.
+        */
+
+       dprintk(KERN_INFO "tf_device_register(): Success\n");
+       return 0;
+
+       /*
+        * Error: undo all operations in the reverse order
+        */
+#ifdef CONFIG_TF_ZEBRA
+ctrl_failed:
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+       tf_self_test_post_exit();
+post_failed:
+#endif
+init_failed:
+       cdev_del(&dev->cdev);
+cdev_add_failed:
+       unregister_chrdev_region(dev->dev_number, 1);
+register_chrdev_region_failed:
+       sysdev_unregister(&g_tf_sysdev);
+sysdev_register_failed:
+       sysdev_class_unregister(&g_tf_device_sys_class);
+sysdev_class_register_failed:
+kobject_init_and_add_failed:
+       kobject_del(&g_tf_dev.kobj);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+       tf_self_test_unregister_device();
+self_test_register_device_failed:
+       tf_crypto_hmac_module_exit();
+hmac_init_failed:
+#endif
+       tf_device_mshield_exit();
+mshield_init_failed:
+module_early_init_failed:
+#endif
+       dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
+               error);
+       return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_open(struct inode *inode, struct file *file)
+{
+       int error;
+       struct tf_device *dev = &g_tf_dev;
+       struct tf_connection *connection = NULL;
+
+       dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
+               imajor(inode), iminor(inode), file);
+
+       /* Dummy lseek for non-seekable driver */
+       error = nonseekable_open(inode, file);
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_device_open(%p): "
+                       "nonseekable_open failed (error %d)!\n",
+                       file, error);
+               goto error;
+       }
+
+#ifndef CONFIG_ANDROID
+       /*
+        * Check file flags. We only autthorize the O_RDWR access
+        */
+       if (file->f_flags != O_RDWR) {
+               dprintk(KERN_ERR "tf_device_open(%p): "
+                       "Invalid access mode %u\n",
+                       file, file->f_flags);
+               error = -EACCES;
+               goto error;
+       }
+#endif
+
+       /*
+        * Open a new connection.
+        */
+
+       error = tf_open(dev, file, &connection);
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_device_open(%p): "
+                       "tf_open failed (error %d)!\n",
+                       file, error);
+               goto error;
+       }
+
+       file->private_data = connection;
+
+       /*
+        * Send the CreateDeviceContext command to the secure
+        */
+       error = tf_create_device_context(connection);
+       if (error != 0) {
+               dprintk(KERN_ERR "tf_device_open(%p): "
+                       "tf_create_device_context failed (error %d)!\n",
+                       file, error);
+               goto error1;
+       }
+
+       /*
+        * Successful completion.
+        */
+
+       dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
+               file, connection);
+       return 0;
+
+       /*
+        * Error handling.
+        */
+
+error1:
+       tf_close(connection);
+error:
+       dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
+               file, error);
+       return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_release(struct inode *inode, struct file *file)
+{
+       struct tf_connection *connection;
+
+       dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
+               imajor(inode), iminor(inode), file);
+
+       connection = tf_conn_from_file(file);
+       tf_close(connection);
+
+       dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
+       return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
+       unsigned long ioctl_param)
+{
+       int result = S_SUCCESS;
+       struct tf_connection *connection;
+       union tf_command command;
+       struct tf_command_header header;
+       union tf_answer answer;
+       u32 command_size;
+       u32 answer_size;
+       void *user_answer;
+
+       dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
+               file, ioctl_num, (void *) ioctl_param);
+
+       switch (ioctl_num) {
+       case IOCTL_TF_GET_VERSION:
+               /* ioctl is asking for the driver interface version */
+               result = TF_DRIVER_INTERFACE_VERSION;
+               goto exit;
+
+       case IOCTL_TF_EXCHANGE:
+               /*
+                * ioctl is asking to perform a message exchange with the Secure
+                * Module
+                */
+
+               /*
+                * Make a local copy of the data from the user application
+                * This routine checks the data is readable
+                *
+                * Get the header first.
+                */
+               if (copy_from_user(&header,
+                               (struct tf_command_header *)ioctl_param,
+                               sizeof(struct tf_command_header))) {
+                       dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                               "Cannot access ioctl parameter %p\n",
+                               file, (void *) ioctl_param);
+                       result = -EFAULT;
+                       goto exit;
+               }
+
+               /* size in words of u32 */
+               command_size = header.message_size +
+                       sizeof(struct tf_command_header)/sizeof(u32);
+               if (command_size > sizeof(command)/sizeof(u32)) {
+                       dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                               "Buffer overflow: too many bytes to copy %d\n",
+                               file, command_size);
+                       result = -EFAULT;
+                       goto exit;
+               }
+
+               if (copy_from_user(&command,
+                               (union tf_command *)ioctl_param,
+                               command_size * sizeof(u32))) {
+                       dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                               "Cannot access ioctl parameter %p\n",
+                               file, (void *) ioctl_param);
+                       result = -EFAULT;
+                       goto exit;
+               }
+
+               connection = tf_conn_from_file(file);
+               BUG_ON(connection == NULL);
+
+               /*
+                * The answer memory space address is in the operation_id field
+                */
+               user_answer = (void *) command.header.operation_id;
+
+               atomic_inc(&(connection->pending_op_count));
+
+               dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+                       "Sending message type  0x%08x\n",
+                       file, command.header.message_type);
+
+               switch (command.header.message_type) {
+               case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+                       result = tf_open_client_session(connection,
+                               &command, &answer);
+                       break;
+
+               case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+                       result = tf_close_client_session(connection,
+                               &command, &answer);
+                       break;
+
+               case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+                       result = tf_register_shared_memory(connection,
+                               &command, &answer);
+                       break;
+
+               case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+                       result = tf_release_shared_memory(connection,
+                               &command, &answer);
+                       break;
+
+               case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+                       result = tf_invoke_client_command(connection,
+                               &command, &answer);
+                       break;
+
+               case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+                       result = tf_cancel_client_command(connection,
+                               &command, &answer);
+                       break;
+
+               default:
+                       dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                               "Incorrect message type (0x%08x)!\n",
+                               connection, command.header.message_type);
+                       result = -EOPNOTSUPP;
+                       break;
+               }
+
+               atomic_dec(&(connection->pending_op_count));
+
+               if (result != 0) {
+                       dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+                               "Operation returning error code 0x%08x)!\n",
+                               file, result);
+                       goto exit;
+               }
+
+               /*
+                * Copy the answer back to the user space application.
+                * The driver does not check this field, only copy back to user
+                * space the data handed over by Secure World
+                */
+               answer_size = answer.header.message_size +
+                       sizeof(struct tf_answer_header)/sizeof(u32);
+               if (copy_to_user(user_answer,
+                               &answer, answer_size * sizeof(u32))) {
+                       dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+                               "Failed to copy back the full command "
+                               "answer to %p\n", file, user_answer);
+                       result = -EFAULT;
+                       goto exit;
+               }
+
+               /* successful completion */
+               dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
+               break;
+
+       case  IOCTL_TF_GET_DESCRIPTION: {
+               /* ioctl asking for the version information buffer */
+               struct tf_version_information_buffer *pInfoBuffer;
+
+               dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
+                       file, ioctl_num, (void *) ioctl_param);
+
+               pInfoBuffer =
+                       ((struct tf_version_information_buffer *) ioctl_param);
+
+               dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
+                       "driver_description=\"%64s\"\n", S_VERSION_STRING);
+
+               if (copy_to_user(pInfoBuffer->driver_description,
+                               S_VERSION_STRING,
+                               strlen(S_VERSION_STRING) + 1)) {
+                       dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                               "Fail to copy back the driver description "
+                               "to  %p\n",
+                               file, pInfoBuffer->driver_description);
+                       result = -EFAULT;
+                       goto exit;
+               }
+
+               dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
+                       "secure_world_description=\"%64s\"\n",
+                       tf_get_description(&g_tf_dev.sm));
+
+               if (copy_to_user(pInfoBuffer->secure_world_description,
+                               tf_get_description(&g_tf_dev.sm),
+                               TF_DESCRIPTION_BUFFER_LENGTH)) {
+                       dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+                               "Failed to copy back the secure world "
+                               "description to %p\n",
+                               file, pInfoBuffer->secure_world_description);
+                       result = -EFAULT;
+                       goto exit;
+               }
+               break;
+       }
+
+       default:
+               dprintk(KERN_ERR "tf_device_ioctl(%p): "
+                       "Unknown IOCTL code 0x%08x!\n",
+                       file, ioctl_num);
+               result = -EOPNOTSUPP;
+               goto exit;
+       }
+
+exit:
+       return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_shutdown(struct sys_device *sysdev)
+{
+
+       return tf_power_management(&g_tf_dev.sm,
+               TF_POWER_OPERATION_SHUTDOWN);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_suspend(struct sys_device *sysdev, pm_message_t state)
+{
+       dprintk(KERN_INFO "tf_device_suspend: Enter\n");
+       return tf_power_management(&g_tf_dev.sm,
+               TF_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_resume(struct sys_device *sysdev)
+{
+       return tf_power_management(&g_tf_dev.sm,
+               TF_POWER_OPERATION_RESUME);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(tf_device_register);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/tf_protocol.h b/security/tf_driver/tf_protocol.h
new file mode 100644 (file)
index 0000000..403df8e
--- /dev/null
@@ -0,0 +1,690 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PROTOCOL_H__
+#define __TF_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define TF_DRIVER_INTERFACE_VERSION     0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define TF_S_PROTOCOL_MAJOR_VERSION  (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The S flag of the config_flag_s register.
+ */
+#define TF_CONFIG_FLAG_S   (1 << 3)
+
+/*
+ * The TimeSlot field of the sync_serial_n register.
+ */
+#define TF_SYNC_SERIAL_TIMESLOT_N   (1)
+
+/*
+ * status_s related defines.
+ */
+#define TF_STATUS_P_MASK            (0X00000001)
+#define TF_STATUS_POWER_STATE_SHIFT (3)
+#define TF_STATUS_POWER_STATE_MASK  (0x1F << TF_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the status_s register
+ */
+#define TF_POWER_MODE_COLD_BOOT          (0)
+#define TF_POWER_MODE_WARM_BOOT          (1)
+#define TF_POWER_MODE_ACTIVE             (3)
+#define TF_POWER_MODE_READY_TO_SHUTDOWN  (5)
+#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
+#define TF_POWER_MODE_WAKEUP             (8)
+#define TF_POWER_MODE_PANIC              (15)
+
+/*
+ * Possible command values for MANAGEMENT commands
+ */
+#define TF_MANAGEMENT_HIBERNATE            (1)
+#define TF_MANAGEMENT_SHUTDOWN             (2)
+#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define TF_N_MESSAGE_QUEUE_CAPACITY  (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define TF_S_ANSWER_QUEUE_CAPACITY  (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define TF_S_TIMEOUT_0_INFINITE  (0xFFFFFFFF)
+#define TF_S_TIMEOUT_1_INFINITE  (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define TF_S_TIMEOUT_0_IMMEDIATE  (0x0)
+#define TF_S_TIMEOUT_1_IMMEDIATE  (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define TF_SMC_INIT                 (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define TF_SMC_RESET_IRQ            (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define TF_SMC_WAKE_UP              (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define TF_SMC_STOP                 (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define TF_SMC_N_YIELD              (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE           (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN            (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct tf_uuid {
+       u32 time_low;
+       u16 time_mid;
+       u16 time_hi_and_version;
+       u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct tf_command_param_value {
+       u32    a;
+       u32    b;
+};
+
+struct tf_command_param_temp_memref {
+       u32    descriptor; /* data pointer for exchange message.*/
+       u32    size;
+       u32    offset;
+};
+
+struct tf_command_param_memref {
+       u32      block;
+       u32      size;
+       u32      offset;
+};
+
+union tf_command_param {
+       struct tf_command_param_value        value;
+       struct tf_command_param_temp_memref  temp_memref;
+       struct tf_command_param_memref       memref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct tf_answer_param_value {
+       u32   a;
+       u32   b;
+};
+
+struct tf_answer_param_size {
+       u32   _ignored;
+       u32   size;
+};
+
+union tf_answer_param {
+       struct tf_answer_param_size    size;
+       struct tf_answer_param_value   value;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define TF_MAX_W3B_COARSE_PAGES                 (2)
+/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
+ * 1MB each) that can be shared with the secure world in a single registered
+ * shared memory block. It must be kept in synch with
+ * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
+ * protocol spec. */
+#define TF_MAX_COARSE_PAGES                     128
+#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT  (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY \
+       (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
+       (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
+       (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define TF_MAX_SHMEM_SIZE \
+       (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define TF_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define TF_SHMEM_TYPE_READ         (0x00000001)
+#define TF_SHMEM_TYPE_WRITE        (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define TF_SHARED_MEM_FLAG_INPUT   1
+#define TF_SHARED_MEM_FLAG_OUTPUT  2
+#define TF_SHARED_MEM_FLAG_INOUT   3
+
+
+/*
+ * Parameter types
+ */
+#define TF_PARAM_TYPE_NONE               0x0
+#define TF_PARAM_TYPE_VALUE_INPUT        0x1
+#define TF_PARAM_TYPE_VALUE_OUTPUT       0x2
+#define TF_PARAM_TYPE_VALUE_INOUT        0x3
+#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT  0x5
+#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT  0x7
+#define TF_PARAM_TYPE_MEMREF_INPUT       0xD
+#define TF_PARAM_TYPE_MEMREF_OUTPUT      0xE
+#define TF_PARAM_TYPE_MEMREF_INOUT       0xF
+
+#define TF_PARAM_TYPE_MEMREF_FLAG               0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG    0x8
+
+
+#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+       ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define TF_LOGIN_PUBLIC              0x00000000
+#define TF_LOGIN_USER                0x00000001
+#define TF_LOGIN_GROUP               0x00000002
+#define TF_LOGIN_APPLICATION         0x00000004
+#define TF_LOGIN_APPLICATION_USER    0x00000005
+#define TF_LOGIN_APPLICATION_GROUP   0x00000006
+#define TF_LOGIN_AUTHENTICATION      0x80000000
+#define TF_LOGIN_PRIVILEGED          0x80000002
+
+/* Login variants */
+
+#define TF_LOGIN_VARIANT(main_type, os, variant) \
+       ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define TF_LOGIN_GET_MAIN_TYPE(type) \
+       ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define TF_LOGIN_OS_ANY       0x00
+#define TF_LOGIN_OS_LINUX     0x01
+#define TF_LOGIN_OS_ANDROID   0x04
+
+/* OS-independent variants */
+#define TF_LOGIN_USER_NONE \
+       TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_GROUP_NONE \
+       TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_APPLICATION_USER_NONE \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+       TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
+#define TF_LOGIN_PRIVILEGED_KERNEL \
+       TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define TF_LOGIN_USER_LINUX_EUID \
+       TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_GROUP_LINUX_GID \
+       TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define TF_LOGIN_USER_ANDROID_EUID \
+       TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_GROUP_ANDROID_GID \
+       TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_ANDROID_UID \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
+               0x01)
+#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+       TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
+               0x01)
+
+/*
+ *  return origins
+ */
+#define TF_ORIGIN_COMMS       2
+#define TF_ORIGIN_TEE         3
+#define TF_ORIGIN_TRUSTED_APP 4
+/*
+ * The message types.
+ */
+#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT   0x02
+#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT  0xFD
+#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY  0xF7
+#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY   0xF9
+#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION     0xF0
+#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION    0xF2
+#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND   0xF5
+#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND   0xF4
+#define TF_MESSAGE_TYPE_MANAGEMENT              0xFE
+
+
+/*
+ * The SChannel error codes.
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct tf_command_header {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info;
+       u32                      operation_id;
+};
+
+struct tf_answer_header {
+       u8                   message_size;
+       u8                   message_type;
+       u16                  message_info;
+       u32                  operation_id;
+       u32                  error_code;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct tf_command_create_device_context {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       u32                      operation_id;
+       u32                      device_context_id;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_create_device_context {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      error_code;
+       /* an opaque Normal World identifier for the device context */
+       u32                      device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct tf_command_destroy_device_context {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       u32                      operation_id;
+       u32                      device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_destroy_device_context {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      error_code;
+       u32                      device_context_id;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct tf_command_open_client_session {
+       u8                            message_size;
+       u8                            message_type;
+       u16                           param_types;
+       /* an opaque Normal World identifier for the operation */
+       u32                           operation_id;
+       u32                           device_context;
+       u32                           cancellation_id;
+       u64                           timeout;
+       struct tf_uuid                destination_uuid;
+       union tf_command_param        params[4];
+       u32                           login_type;
+       /*
+        * Size = 0 for public, [16] for group identification, [20] for
+        * authentication
+        */
+       u8                            login_data[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct tf_answer_open_client_session {
+       u8                       message_size;
+       u8                       message_type;
+       u8                       error_origin;
+       u8                       __reserved;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      error_code;
+       u32                      client_session;
+       union tf_answer_param    answers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct tf_command_close_client_session {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      device_context;
+       u32                      client_session;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct tf_answer_close_client_session {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      error_code;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct tf_command_register_shared_memory {
+       u8  message_size;
+       u8  message_type;
+       u16 memory_flags;
+       u32 operation_id;
+       u32 device_context;
+       u32 block_id;
+       u32 shared_mem_size;
+       u32 shared_mem_start_offset;
+       u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct tf_answer_register_shared_memory {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      error_code;
+       u32                      block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct tf_command_release_shared_memory {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                      operation_id;
+       u32                      device_context;
+       u32                      block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct tf_answer_release_shared_memory {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      message_info_rfu;
+       u32                      operation_id;
+       u32                      error_code;
+       u32                      block_id;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct tf_command_invoke_client_command {
+       u8                       message_size;
+       u8                       message_type;
+       u16                      param_types;
+       u32                      operation_id;
+       u32                      device_context;
+       u32                      client_session;
+       u64                      timeout;
+       u32                      cancellation_id;
+       u32                      client_command_identifier;
+       union tf_command_param   params[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct tf_answer_invoke_client_command {
+       u8                     message_size;
+       u8                     message_type;
+       u8                     error_origin;
+       u8                     __reserved;
+       u32                    operation_id;
+       u32                    error_code;
+       union tf_answer_param  answers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct tf_command_cancel_client_operation {
+       u8                   message_size;
+       u8                   message_type;
+       u16                  message_info_rfu;
+       /* an opaque Normal World identifier for the operation */
+       u32                  operation_id;
+       u32                  device_context;
+       u32                  client_session;
+       u32                  cancellation_id;
+};
+
+struct tf_answer_cancel_client_operation {
+       u8                   message_size;
+       u8                   message_type;
+       u16                  message_info_rfu;
+       u32                  operation_id;
+       u32                  error_code;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct tf_command_management {
+       u8                   message_size;
+       u8                   message_type;
+       u16                  command;
+       u32                  operation_id;
+       u32                  w3b_size;
+       u32                  w3b_start_offset;
+       u32                  shared_mem_descriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union tf_command {
+       struct tf_command_header                  header;
+       struct tf_command_create_device_context   create_device_context;
+       struct tf_command_destroy_device_context  destroy_device_context;
+       struct tf_command_open_client_session     open_client_session;
+       struct tf_command_close_client_session    close_client_session;
+       struct tf_command_register_shared_memory  register_shared_memory;
+       struct tf_command_release_shared_memory   release_shared_memory;
+       struct tf_command_invoke_client_command   invoke_client_command;
+       struct tf_command_cancel_client_operation cancel_client_operation;
+       struct tf_command_management              management;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union tf_answer {
+       struct tf_answer_header                  header;
+       struct tf_answer_create_device_context   create_device_context;
+       struct tf_answer_open_client_session     open_client_session;
+       struct tf_answer_close_client_session    close_client_session;
+       struct tf_answer_register_shared_memory  register_shared_memory;
+       struct tf_answer_release_shared_memory   release_shared_memory;
+       struct tf_answer_invoke_client_command   invoke_client_command;
+       struct tf_answer_destroy_device_context  destroy_device_context;
+       struct tf_answer_cancel_client_operation cancel_client_operation;
+};
+
+/* Structure of the Communication Buffer */
+struct tf_l1_shared_buffer {
+       #ifdef CONFIG_TF_ZEBRA
+       u32 exit_code;
+       u32 l1_shared_buffer_descr;
+       u32 backing_store_addr;
+       u32 backext_storage_addr;
+       u32 workspace_addr;
+       u32 workspace_size;
+       u32 conf_descriptor;
+       u32 conf_size;
+       u32 conf_offset;
+       u32 protocol_version;
+       u32 rpc_command;
+       u32 rpc_status;
+       u8  reserved1[16];
+       #else
+       u32 config_flag_s;
+       u32 w3b_size_max_s;
+       u32 reserved0;
+       u32 w3b_size_current_s;
+       u8  reserved1[48];
+       #endif
+       u8  version_description[TF_DESCRIPTION_BUFFER_LENGTH];
+       u32 status_s;
+       u32 reserved2;
+       u32 sync_serial_n;
+       u32 sync_serial_s;
+       u64 time_n[2];
+       u64 timeout_s[2];
+       u32 first_command;
+       u32 first_free_command;
+       u32 first_answer;
+       u32 first_free_answer;
+       u32 w3b_descriptors[128];
+       #ifdef CONFIG_TF_ZEBRA
+       u8  rpc_trace_buffer[140];
+       u8  rpc_cus_buffer[180];
+       #else
+       u8  reserved3[320];
+       #endif
+       u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
+       u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * tf_version_information_buffer structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct tf_version_information_buffer {
+       u8 driver_description[65];
+       u8 secure_world_description[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_TF_GET_VERSION     _IO('z', 0)
+#define IOCTL_TF_EXCHANGE        _IOWR('z', 1, union tf_command)
+#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
+       struct tf_version_information_buffer)
+
+#endif  /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/tf_driver/tf_util.c b/security/tf_driver/tf_util.c
new file mode 100644 (file)
index 0000000..78f90bf
--- /dev/null
@@ -0,0 +1,1143 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/mman.h>
+#include "tf_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void tf_trace_array(const char *fun, const char *msg,
+                   const void *ptr, size_t len)
+{
+       char hex[511];
+       bool ell = (len > sizeof(hex)/2);
+       unsigned lim = (len > sizeof(hex)/2 ? sizeof(hex)/2 : len);
+       unsigned i;
+       for (i = 0; i < lim; i++)
+               sprintf(hex + 2 * i, "%02x", ((unsigned char *)ptr)[i]);
+       pr_info("%s: %s[%u] = %s%s\n",
+               fun, msg, len, hex, ell ? "..." : "");
+}
+
+void address_cache_property(unsigned long va)
+{
+       unsigned long pa;
+       unsigned long inner;
+       unsigned long outer;
+
+       asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+       asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+       dprintk(KERN_INFO "VA:%x, PA:%x\n",
+               (unsigned int) va,
+               (unsigned int) pa);
+
+       if (pa & 1) {
+               dprintk(KERN_INFO "Prop Error\n");
+               return;
+       }
+
+       outer = (pa >> 2) & 3;
+       dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+       switch (outer) {
+       case 3:
+               dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+               break;
+       case 2:
+               dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+               break;
+       case 1:
+               dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+               break;
+       case 0:
+               dprintk(KERN_INFO "Non-cacheable.\n");
+               break;
+       }
+
+       inner = (pa >> 4) & 7;
+       dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+       switch (inner) {
+       case 7:
+               dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+               break;
+       case 6:
+               dprintk(KERN_INFO "Write-Through.\n");
+               break;
+       case 5:
+               dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+               break;
+       case 3:
+               dprintk(KERN_INFO "Device.\n");
+               break;
+       case 1:
+               dprintk(KERN_INFO "Strongly-ordered.\n");
+               break;
+       case 0:
+               dprintk(KERN_INFO "Non-cacheable.\n");
+               break;
+       }
+
+       if (pa & 0x00000002)
+               dprintk(KERN_INFO "SuperSection.\n");
+       if (pa & 0x00000080)
+               dprintk(KERN_INFO "Memory is shareable.\n");
+       else
+               dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+       if (pa & 0x00000200)
+               dprintk(KERN_INFO "Non-secure.\n");
+}
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
+{
+       dprintk(KERN_INFO
+               "buffer@%p:\n"
+               #ifndef CONFIG_TF_ZEBRA
+               "  config_flag_s=%08X\n"
+               #endif
+               "  version_description=%64s\n"
+               "  status_s=%08X\n"
+               "  sync_serial_n=%08X\n"
+               "  sync_serial_s=%08X\n"
+               "  time_n[0]=%016llX\n"
+               "  time_n[1]=%016llX\n"
+               "  timeout_s[0]=%016llX\n"
+               "  timeout_s[1]=%016llX\n"
+               "  first_command=%08X\n"
+               "  first_free_command=%08X\n"
+               "  first_answer=%08X\n"
+               "  first_free_answer=%08X\n\n",
+               buffer,
+               #ifndef CONFIG_TF_ZEBRA
+               buffer->config_flag_s,
+               #endif
+               buffer->version_description,
+               buffer->status_s,
+               buffer->sync_serial_n,
+               buffer->sync_serial_s,
+               buffer->time_n[0],
+               buffer->time_n[1],
+               buffer->timeout_s[0],
+               buffer->timeout_s[1],
+               buffer->first_command,
+               buffer->first_free_command,
+               buffer->first_answer,
+               buffer->first_free_answer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void tf_dump_command(union tf_command *command)
+{
+       u32 i;
+
+       dprintk(KERN_INFO "message@%p:\n", command);
+
+       switch (command->header.message_type) {
+       case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+               dprintk(KERN_INFO
+                       "   message_size             = 0x%02X\n"
+                       "   message_type             = 0x%02X "
+                               "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+                       "   operation_id             = 0x%08X\n"
+                       "   device_context_id         = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->create_device_context.device_context_id
+               );
+               break;
+
+       case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+               dprintk(KERN_INFO
+                       "   message_size    = 0x%02X\n"
+                       "   message_type    = 0x%02X "
+                               "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+                       "   operation_id    = 0x%08X\n"
+                       "   device_context  = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->destroy_device_context.device_context);
+               break;
+
+       case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+               dprintk(KERN_INFO
+                       "   message_size                = 0x%02X\n"
+                       "   message_type                = 0x%02X "
+                               "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+                       "   param_types                 = 0x%04X\n"
+                       "   operation_id                = 0x%08X\n"
+                       "   device_context              = 0x%08X\n"
+                       "   cancellation_id             = 0x%08X\n"
+                       "   timeout                    = 0x%016llX\n"
+                       "   destination_uuid            = "
+                               "%08X-%04X-%04X-%02X%02X-"
+                               "%02X%02X%02X%02X%02X%02X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->open_client_session.param_types,
+                       command->header.operation_id,
+                       command->open_client_session.device_context,
+                       command->open_client_session.cancellation_id,
+                       command->open_client_session.timeout,
+                       command->open_client_session.destination_uuid.
+                               time_low,
+                       command->open_client_session.destination_uuid.
+                               time_mid,
+                       command->open_client_session.destination_uuid.
+                               time_hi_and_version,
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[0],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[1],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[2],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[3],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[4],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[5],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[6],
+                       command->open_client_session.destination_uuid.
+                               clock_seq_and_node[7]
+               );
+
+               for (i = 0; i < 4; i++) {
+                       uint32_t *param = (uint32_t *) &command->
+                               open_client_session.params[i];
+                       dprintk(KERN_INFO "   params[%d] = "
+                               "0x%08X:0x%08X:0x%08X\n",
+                               i, param[0], param[1], param[2]);
+               }
+
+               switch (TF_LOGIN_GET_MAIN_TYPE(
+                       command->open_client_session.login_type)) {
+               case TF_LOGIN_PUBLIC:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_PUBLIC\n");
+                       break;
+               case TF_LOGIN_USER:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_USER\n");
+                       break;
+                case TF_LOGIN_GROUP:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_GROUP\n");
+                       break;
+               case TF_LOGIN_APPLICATION:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_APPLICATION\n");
+                       break;
+               case TF_LOGIN_APPLICATION_USER:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_APPLICATION_USER\n");
+                       break;
+               case TF_LOGIN_APPLICATION_GROUP:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_APPLICATION_GROUP\n");
+                       break;
+               case TF_LOGIN_AUTHENTICATION:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_AUTHENTICATION\n");
+                       break;
+               case TF_LOGIN_PRIVILEGED:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_PRIVILEGED\n");
+                       break;
+               case TF_LOGIN_PRIVILEGED_KERNEL:
+                       dprintk(
+                               KERN_INFO "   login_type               = "
+                                       "TF_LOGIN_PRIVILEGED_KERNEL\n");
+                       break;
+               default:
+                       dprintk(
+                               KERN_ERR "   login_type               = "
+                                       "0x%08X (Unknown login type)\n",
+                               command->open_client_session.login_type);
+                       break;
+               }
+
+               dprintk(
+                       KERN_INFO "   login_data               = ");
+               for (i = 0; i < 20; i++)
+                       dprintk(
+                               KERN_INFO "%d",
+                               command->open_client_session.
+                                       login_data[i]);
+               dprintk("\n");
+               break;
+
+       case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+               dprintk(KERN_INFO
+                       "   message_size                = 0x%02X\n"
+                       "   message_type                = 0x%02X "
+                               "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+                       "   operation_id                = 0x%08X\n"
+                       "   device_context              = 0x%08X\n"
+                       "   client_session              = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->close_client_session.device_context,
+                       command->close_client_session.client_session
+               );
+               break;
+
+       case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+               dprintk(KERN_INFO
+                       "   message_size             = 0x%02X\n"
+                       "   message_type             = 0x%02X "
+                               "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+                       "   memory_flags             = 0x%04X\n"
+                       "   operation_id             = 0x%08X\n"
+                       "   device_context           = 0x%08X\n"
+                       "   block_id                 = 0x%08X\n"
+                       "   shared_mem_size           = 0x%08X\n"
+                       "   shared_mem_start_offset    = 0x%08X\n"
+                       "   shared_mem_descriptors[0] = 0x%08X\n"
+                       "   shared_mem_descriptors[1] = 0x%08X\n"
+                       "   shared_mem_descriptors[2] = 0x%08X\n"
+                       "   shared_mem_descriptors[3] = 0x%08X\n"
+                       "   shared_mem_descriptors[4] = 0x%08X\n"
+                       "   shared_mem_descriptors[5] = 0x%08X\n"
+                       "   shared_mem_descriptors[6] = 0x%08X\n"
+                       "   shared_mem_descriptors[7] = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->register_shared_memory.memory_flags,
+                       command->header.operation_id,
+                       command->register_shared_memory.device_context,
+                       command->register_shared_memory.block_id,
+                       command->register_shared_memory.shared_mem_size,
+                       command->register_shared_memory.
+                               shared_mem_start_offset,
+                       command->register_shared_memory.
+                               shared_mem_descriptors[0],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[1],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[2],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[3],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[4],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[5],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[6],
+                       command->register_shared_memory.
+                               shared_mem_descriptors[7]);
+               break;
+
+       case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+               dprintk(KERN_INFO
+                       "   message_size    = 0x%02X\n"
+                       "   message_type    = 0x%02X "
+                               "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+                       "   operation_id    = 0x%08X\n"
+                       "   device_context  = 0x%08X\n"
+                       "   block          = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->release_shared_memory.device_context,
+                       command->release_shared_memory.block);
+               break;
+
+       case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+               dprintk(KERN_INFO
+                        "   message_size                = 0x%02X\n"
+                       "   message_type                = 0x%02X "
+                               "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+                       "   param_types                 = 0x%04X\n"
+                       "   operation_id                = 0x%08X\n"
+                       "   device_context              = 0x%08X\n"
+                       "   client_session              = 0x%08X\n"
+                       "   timeout                    = 0x%016llX\n"
+                       "   cancellation_id             = 0x%08X\n"
+                       "   client_command_identifier    = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->invoke_client_command.param_types,
+                       command->header.operation_id,
+                       command->invoke_client_command.device_context,
+                       command->invoke_client_command.client_session,
+                       command->invoke_client_command.timeout,
+                       command->invoke_client_command.cancellation_id,
+                       command->invoke_client_command.
+                               client_command_identifier
+               );
+
+               for (i = 0; i < 4; i++) {
+                       uint32_t *param = (uint32_t *) &command->
+                               open_client_session.params[i];
+                       dprintk(KERN_INFO "   params[%d] = "
+                               "0x%08X:0x%08X:0x%08X\n", i,
+                               param[0], param[1], param[2]);
+               }
+               break;
+
+       case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+               dprintk(KERN_INFO
+                       "   message_size       = 0x%02X\n"
+                       "   message_type       = 0x%02X "
+                               "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+                       "   operation_id       = 0x%08X\n"
+                       "   device_context     = 0x%08X\n"
+                       "   client_session     = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->cancel_client_operation.device_context,
+                       command->cancel_client_operation.client_session);
+               break;
+
+       case TF_MESSAGE_TYPE_MANAGEMENT:
+               dprintk(KERN_INFO
+                       "   message_size             = 0x%02X\n"
+                       "   message_type             = 0x%02X "
+                               "TF_MESSAGE_TYPE_MANAGEMENT\n"
+                       "   operation_id             = 0x%08X\n"
+                       "   command                 = 0x%08X\n"
+                       "   w3b_size                 = 0x%08X\n"
+                       "   w3b_start_offset          = 0x%08X\n",
+                       command->header.message_size,
+                       command->header.message_type,
+                       command->header.operation_id,
+                       command->management.command,
+                       command->management.w3b_size,
+                       command->management.w3b_start_offset);
+               break;
+
+       default:
+               dprintk(
+                       KERN_ERR "   message_type = 0x%08X "
+                               "(Unknown message type)\n",
+                       command->header.message_type);
+               break;
+       }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void tf_dump_answer(union tf_answer *answer)
+{
+       u32 i;
+       dprintk(
+               KERN_INFO "answer@%p:\n",
+               answer);
+
+       switch (answer->header.message_type) {
+       case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+               dprintk(KERN_INFO
+                       "   message_size    = 0x%02X\n"
+                       "   message_type    = 0x%02X "
+                               "tf_answer_create_device_context\n"
+                       "   operation_id    = 0x%08X\n"
+                       "   error_code      = 0x%08X\n"
+                       "   device_context  = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->create_device_context.error_code,
+                       answer->create_device_context.device_context);
+               break;
+
+       case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+               dprintk(KERN_INFO
+                       "   message_size     = 0x%02X\n"
+                       "   message_type     = 0x%02X "
+                               "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+                       "   operation_id     = 0x%08X\n"
+                       "   error_code       = 0x%08X\n"
+                       "   device_context_id = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->destroy_device_context.error_code,
+                       answer->destroy_device_context.device_context_id);
+               break;
+
+
+       case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+               dprintk(KERN_INFO
+                       "   message_size      = 0x%02X\n"
+                       "   message_type      = 0x%02X "
+                               "tf_answer_open_client_session\n"
+                       "   error_origin     = 0x%02X\n"
+                       "   operation_id      = 0x%08X\n"
+                       "   error_code        = 0x%08X\n"
+                       "   client_session    = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->open_client_session.error_origin,
+                       answer->header.operation_id,
+                       answer->open_client_session.error_code,
+                       answer->open_client_session.client_session);
+               for (i = 0; i < 4; i++) {
+                       dprintk(KERN_INFO "   answers[%d]=0x%08X:0x%08X\n",
+                               i,
+                               answer->open_client_session.answers[i].
+                                       value.a,
+                               answer->open_client_session.answers[i].
+                                       value.b);
+               }
+               break;
+
+       case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+               dprintk(KERN_INFO
+                       "   message_size      = 0x%02X\n"
+                       "   message_type      = 0x%02X "
+                               "ANSWER_CLOSE_CLIENT_SESSION\n"
+                       "   operation_id      = 0x%08X\n"
+                       "   error_code        = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->close_client_session.error_code);
+               break;
+
+       case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+               dprintk(KERN_INFO
+                       "   message_size    = 0x%02X\n"
+                       "   message_type    = 0x%02X "
+                               "tf_answer_register_shared_memory\n"
+                       "   operation_id    = 0x%08X\n"
+                       "   error_code      = 0x%08X\n"
+                       "   block          = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->register_shared_memory.error_code,
+                       answer->register_shared_memory.block);
+               break;
+
+       case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+               dprintk(KERN_INFO
+                       "   message_size    = 0x%02X\n"
+                       "   message_type    = 0x%02X "
+                               "ANSWER_RELEASE_SHARED_MEMORY\n"
+                       "   operation_id    = 0x%08X\n"
+                       "   error_code      = 0x%08X\n"
+                       "   block_id        = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->release_shared_memory.error_code,
+                       answer->release_shared_memory.block_id);
+               break;
+
+       case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+               dprintk(KERN_INFO
+                       "   message_size      = 0x%02X\n"
+                       "   message_type      = 0x%02X "
+                               "tf_answer_invoke_client_command\n"
+                       "   error_origin     = 0x%02X\n"
+                       "   operation_id      = 0x%08X\n"
+                       "   error_code        = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->invoke_client_command.error_origin,
+                       answer->header.operation_id,
+                       answer->invoke_client_command.error_code
+                       );
+               for (i = 0; i < 4; i++) {
+                       dprintk(KERN_INFO "   answers[%d]=0x%08X:0x%08X\n",
+                               i,
+                               answer->invoke_client_command.answers[i].
+                                       value.a,
+                               answer->invoke_client_command.answers[i].
+                                       value.b);
+               }
+               break;
+
+       case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+               dprintk(KERN_INFO
+                       "   message_size      = 0x%02X\n"
+                       "   message_type      = 0x%02X "
+                               "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
+                       "   operation_id      = 0x%08X\n"
+                       "   error_code        = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->cancel_client_operation.error_code);
+               break;
+
+       case TF_MESSAGE_TYPE_MANAGEMENT:
+               dprintk(KERN_INFO
+                       "   message_size      = 0x%02X\n"
+                       "   message_type      = 0x%02X "
+                               "TF_MESSAGE_TYPE_MANAGEMENT\n"
+                       "   operation_id      = 0x%08X\n"
+                       "   error_code        = 0x%08X\n",
+                       answer->header.message_size,
+                       answer->header.message_type,
+                       answer->header.operation_id,
+                       answer->header.error_code);
+               break;
+
+       default:
+               dprintk(
+                       KERN_ERR "   message_type = 0x%02X "
+                               "(Unknown message type)\n",
+                       answer->header.message_type);
+               break;
+
+       }
+}
+
+#endif  /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+       u64 count;
+       u32 state[5];
+       u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+       return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+       block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+       block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+       z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+       w = rol(w, 30); } while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+       z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+       w = rol(w, 30); } while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+       z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+       w = rol(w, 30); } while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+       z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+       w = rol(w, 30); } while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+       z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+       w = rol(w, 30); } while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+       u32 a, b, c, d, e;
+       u32 block32[16];
+
+       /* convert/copy data to workspace */
+       for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+               block32[a] = ((u32) in[4 * a]) << 24 |
+                            ((u32) in[4 * a + 1]) << 16 |
+                            ((u32) in[4 * a + 2]) <<  8 |
+                            ((u32) in[4 * a + 3]);
+
+       /* Copy context->state[] to working vars */
+       a = state[0];
+       b = state[1];
+       c = state[2];
+       d = state[3];
+       e = state[4];
+
+       /* 4 rounds of 20 operations each. Loop unrolled. */
+       R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+       R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+       R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+       R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+       R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+       R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+       R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+       R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+       R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+       R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+       R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+       R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+       R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+       R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+       R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+       R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+       R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+       R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+       R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+       R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+       R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+       R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+