arm: Add BTB invalidation on switch_mm for Cortex-A9, A12 and A17 l4t/l4t-r21 l4t/l4t-r21.7 tegra-l4t-r21.7 tegra-l4t-r21.7.update-01
Marc Zyngier [Thu, 1 Feb 2018 11:07:33 +0000 (11:07 +0000)]
** Not yet queued for inclusion in mainline **

In order to avoid aliasing attacks against the branch predictor,
some implementations require to invalidate the BTB when switching
from one user context to another.

For this, we reuse the existing implementation for Cortex-A8, and
apply it to A9, A12 and A17.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Change-Id: Ibbd99465a5dcf5eda6a29dd23a55f9b21b280e65
Reviewed-on: https://git-master.nvidia.com/r/1704129
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bibek Basu <bbasu@nvidia.com>
Tested-by: Bibek Basu <bbasu@nvidia.com>

arch/arm/mm/Kconfig
arch/arm/mm/proc-v7-3level.S
arch/arm/mm/proc-v7.S

index 2c7e308..07c3309 100644 (file)
@@ -947,3 +947,20 @@ config ARM_SAVE_DEBUG_CONTEXT_NO_LOCK
          the ARM debug registers across CPU powerdown. This option
          should not be selected unless you are actively debugging
          the context save/restore code. If unsure, say N.
+
+config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+       default y
+       help
+         Speculation attacks against some high-performance processors rely on
+         being able to manipulate the branch predictor for a victim context by
+         executing aliasing branches in the attacker context.  Such attacks
+         can be partially mitigated against by clearing internal branch
+         predictor state and limiting the prediction logic in some situations.
+
+         This config option will take CPU-specific actions to harden the
+         branch predictor against aliasing attacks and may rely on specific
+         instruction sequences or control bits being set by the system
+         firmware.
+
+         If unsure, say Y.
index 8ba8dfd..e2bb1ae 100644 (file)
@@ -62,6 +62,10 @@ ENTRY(cpu_v7_icinv_switch_mm)
        mcr     p15, 0, r0, c7, c5, 0           @ ICIALLU
        /* Fall through to switch_mm... */
 #endif
+ENTRY(cpu_v7_btbinv_switch_mm)
+#ifdef CONFIG_MMU
+       mcr     p15, 0, r0, c7, c5, 6                   @ flush BTAC/BTB
+#endif
 ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_MMU
        mmid    r2, r2                          @ get mm->context.id
@@ -73,6 +77,7 @@ ENTRY(cpu_v7_switch_mm)
 #endif
        mov     pc, lr
 ENDPROC(cpu_v7_switch_mm)
+ENDPROC(cpu_v7_btbinv_switch_mm)
 ENDPROC(cpu_v7_icinv_switch_mm)
 
 /*
index aac1d85..322ecf5 100644 (file)
@@ -435,6 +435,26 @@ ENDPROC(cpu_v7_do_resume)
        globl_equ       cpu_ca15_do_resume,     cpu_v7_do_resume
 #endif
 
+/*
+ * Cortex-A12/A17
+ */
+       globl_equ       cpu_ca17_proc_init,     cpu_v7_proc_init
+       globl_equ       cpu_ca17_proc_fin,      cpu_v7_proc_fin
+       globl_equ       cpu_ca17_reset,         cpu_v7_reset
+       globl_equ       cpu_ca17_do_idle,       cpu_v7_do_idle
+       globl_equ       cpu_ca17_dcache_clean_area, cpu_v7_dcache_clean_area
+       globl_equ       cpu_ca17_set_pte_ext,   cpu_v7_set_pte_ext
+       globl_equ       cpu_ca17_suspend_size,  cpu_v7_suspend_size
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       globl_equ       cpu_ca17_switch_mm,     cpu_v7_btbinv_switch_mm
+#else
+       globl_equ       cpu_ca17_switch_mm,     cpu_v7_switch_mm
+#endif
+#ifdef CONFIG_ARM_CPU_SUSPEND
+       globl_equ       cpu_ca17_do_suspend,    cpu_v7_do_suspend
+       globl_equ       cpu_ca17_do_resume,     cpu_v7_do_resume
+#endif
+
 #ifdef CONFIG_CPU_PJ4B
        globl_equ       cpu_pj4b_switch_mm,     cpu_v7_switch_mm
        globl_equ       cpu_pj4b_set_pte_ext,   cpu_v7_set_pte_ext