arm64: Add CONFIG_HARDEN_BRANCH_PREDICTOR option
Martin Gao [Thu, 21 Dec 2017 23:48:19 +0000 (15:48 -0800)]
Aliasing attacks against CPU branch predictors can allow an attacker to
redirect speculative control flow on some CPUs and potentially divulge
information from one context to another.

This patch adds a Kconfig option to enable implementation-specific
mitigations against these attacks for CPUs that are affected. Currently,
a workaround is only implemented for Cortex-A57 and Cortex-A72, which
additionally relies on the EL3 firmware setting CPUACTLR_EL1[0] to 1.

Back ported from K4.9: https://git-master.nvidia.com/r/1621628/

Bug 1975157

Change-Id: Id0b12003837f64a60780ec96b2cf22725615ad35
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Gagan Grover <ggrover@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1626828
(cherry picked from commit bfb554062622f53f47eb762302c98df1f3ee4959)
Signed-off-by: Jeetesh Burman <jburman@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1648611
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
GVS: Gerrit_Virtual_Submit

arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/entry.S
arch/arm64/mm/fault.c
arch/arm64/mm/proc.S

index 4e40b0d..f1c69fd 100644 (file)
@@ -542,6 +542,21 @@ config SECCOMP
          and the task is only allowed to execute a few safe syscalls
          defined by each seccomp mode.
 
+config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+       default y
+       help
+         Speculation attacks against some high-performance processors rely on
+         being able to manipulate the branch predictor for a victim context by
+         executing aliasing branches in the attacker context.  Such attacks
+         can be partially mitigated against by clearing internal branch
+         predictor state and limiting the prediction logic in some situations.
+
+         This config option will take CPU-specific actions to harden the
+         branch predictor against aliasing attacks and may rely on specific
+         control bits being set by the system firmware.
+
+         If unsure, say Y.
 
 endmenu
 
index 9ebb84d..7e24bce 100644 (file)
@@ -24,8 +24,8 @@
 #define ARM64_WORKAROUND_CLEAN_CACHE           0
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE   1
 #define ARM64_WORKAROUND_845719                        2
-
-#define ARM64_NCAPS                            3
+#define ARM64_IC_IALLU_ON_CTX_CHANGE           3
+#define ARM64_NCAPS                            4
 
 #ifndef __ASSEMBLY__
 
index b9c7917..433d5da 100644 (file)
@@ -64,6 +64,7 @@
 
 #define ARM_CPU_PART_AEM_V8    0xD0F
 #define ARM_CPU_PART_FOUNDATION        0xD00
+#define ARM_CPU_PART_CORTEX_A72        0xD08
 #define ARM_CPU_PART_CORTEX_A57        0xD07
 #define ARM_CPU_PART_CORTEX_A53        0xD03
 
index bbc710a..bcd6602 100644 (file)
@@ -25,6 +25,7 @@
 
 #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
 
 /*
  * Add a struct or another datatype to the union below if you need
@@ -64,6 +65,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry)
        .midr_range_min = min, \
        .midr_range_max = max
 
+#define MIDR_ALL_VERSIONS(model) \
+       .is_affected = is_affected_midr_range, \
+       .midr_model = model, \
+       .midr_range_min = 0, \
+       .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
 struct arm64_cpu_capabilities arm64_errata[] = {
 #if    defined(CONFIG_ARM64_ERRATUM_826319) || \
        defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -100,6 +107,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
        },
 #endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       {
+               .capability = ARM64_IC_IALLU_ON_CTX_CHANGE,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+       },
+       {
+               .capability = ARM64_IC_IALLU_ON_CTX_CHANGE,
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+       },
+#endif
        {
        }
 };
index b43e144..7df1bf5 100644 (file)
        ARM64_WORKAROUND_845719
 #endif
 #endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+alternative_if_not ARM64_IC_IALLU_ON_CTX_CHANGE
+       nop
+alternative_else
+       ic      iallu
+       /* DSB in __switch_to */
+alternative_endif
+#endif
        .endif
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
index 88de844..5c97ecc 100644 (file)
@@ -156,6 +156,10 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
        si.si_errno = 0;
        si.si_code = code;
        si.si_addr = (void __user *)addr;
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       asm(ALTERNATIVE("nop; nop", "ic iallu; dsb nsh",
+                       ARM64_IC_IALLU_ON_CTX_CHANGE));
+#endif
        force_sig_info(sig, &si, tsk);
 }
 
index f452dab..35e26e6 100644 (file)
@@ -25,6 +25,8 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
 
 #include "proc-macros.S"
 
@@ -62,6 +64,7 @@ ENDPROC(cpu_cache_off)
  */
        .align  5
 ENTRY(cpu_reset)
+
        mrs     x1, sctlr_el1
        bic     x1, x1, #1
        msr     sctlr_el1, x1                   // disable the MMU
@@ -172,6 +175,14 @@ ENTRY(cpu_do_switch_mm)
        bfi     x0, x1, #48, #16                // set the ASID
        msr     ttbr0_el1, x0                   // set TTBR0
        isb
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+alternative_if_not ARM64_IC_IALLU_ON_CTX_CHANGE
+       nop
+alternative_else
+       ic      iallu
+       /* DSB in __switch_to */
+alternative_endif
+#endif
        ret
 ENDPROC(cpu_do_switch_mm)