arm64: Implement array_index_mask_nospec()
Robin Murphy [Mon, 5 Feb 2018 15:34:17 +0000 (15:34 +0000)]
Provide an optimised, assembly implementation of array_index_mask_nospec()
for arm64 so that the compiler is not in a position to transform the code
in ways which affect its ability to inhibit speculation (e.g. by introducing
conditional branches).

This is similar to the sequence used by x86, modulo architectural differences
in the carry/borrow flags.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Change-Id: I168000d0f3c718902ffd7ca1ad2147d914f19e94
Reviewed-on: https://git-master.nvidia.com/r/1662099
(cherry picked from commit 39c48ac56f2d7db8c291e16c9a1bc53898f86ba5)
Signed-off-by: Jeetesh Burman <jburman@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1687455
Reviewed-by: Matthew Pedro <mapedro@nvidia.com>

arch/arm64/include/asm/barrier.h

index 3cefc36..d124a03 100644 (file)
 #define rmb()          asm volatile("dsb ld" : : : "memory")
 #define wmb()          asm volatile("dsb st" : : : "memory")
 
+#define dma_rmb()     dmb(oshld)
+#define dma_wmb()     dmb(oshst)
+/*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+                                                   unsigned long sz)
+{
+       unsigned long mask;
+
+       asm volatile(
+       "       cmp     %1, %2\n"
+       "       sbc     %0, xzr, xzr\n"
+       : "=r" (mask)
+       : "r" (idx), "Ir" (sz)
+       : "cc");
+
+       csdb();
+       return mask;
+}
+
 #ifndef CONFIG_SMP
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()