Andre Przywara | e116a37 | 2014-11-14 15:54:09 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Contains CPU specific errata definitions |
| 3 | * |
| 4 | * Copyright (C) 2014 ARM Ltd. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 17 | */ |
| 18 | |
Andre Przywara | e116a37 | 2014-11-14 15:54:09 +0000 | [diff] [blame] | 19 | #include <linux/types.h> |
| 20 | #include <asm/cpu.h> |
| 21 | #include <asm/cputype.h> |
| 22 | #include <asm/cpufeature.h> |
| 23 | |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 24 | static bool __maybe_unused |
Suzuki K Poulose | 92406f0 | 2016-04-22 12:25:31 +0100 | [diff] [blame] | 25 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 26 | { |
Suzuki K Poulose | 92406f0 | 2016-04-22 12:25:31 +0100 | [diff] [blame] | 27 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
Will Deacon | d5370f7 | 2016-02-02 12:46:24 +0000 | [diff] [blame] | 28 | return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, |
| 29 | entry->midr_range_min, |
| 30 | entry->midr_range_max); |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 31 | } |
| 32 | |
Suzuki K Poulose | 116c81f | 2016-09-09 14:07:16 +0100 | [diff] [blame] | 33 | static bool |
| 34 | has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, |
| 35 | int scope) |
| 36 | { |
| 37 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
| 38 | return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != |
| 39 | (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); |
| 40 | } |
| 41 | |
James Morse | 2a6dcb2 | 2016-10-18 11:27:46 +0100 | [diff] [blame] | 42 | static int cpu_enable_trap_ctr_access(void *__unused) |
Suzuki K Poulose | 116c81f | 2016-09-09 14:07:16 +0100 | [diff] [blame] | 43 | { |
| 44 | /* Clear SCTLR_EL1.UCT */ |
| 45 | config_sctlr_el1(SCTLR_EL1_UCT, 0); |
James Morse | 2a6dcb2 | 2016-10-18 11:27:46 +0100 | [diff] [blame] | 46 | return 0; |
Suzuki K Poulose | 116c81f | 2016-09-09 14:07:16 +0100 | [diff] [blame] | 47 | } |
| 48 | |
Will Deacon | 0f15adb | 2018-01-03 11:17:58 +0000 | [diff] [blame] | 49 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| 50 | #include <asm/mmu_context.h> |
| 51 | #include <asm/cacheflush.h> |
| 52 | |
| 53 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
| 54 | |
| 55 | #ifdef CONFIG_KVM |
Will Deacon | aa6acde | 2018-01-03 12:46:21 +0000 | [diff] [blame^] | 56 | extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; |
| 57 | |
Will Deacon | 0f15adb | 2018-01-03 11:17:58 +0000 | [diff] [blame] | 58 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
| 59 | const char *hyp_vecs_end) |
| 60 | { |
| 61 | void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); |
| 62 | int i; |
| 63 | |
| 64 | for (i = 0; i < SZ_2K; i += 0x80) |
| 65 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); |
| 66 | |
| 67 | flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
| 68 | } |
| 69 | |
| 70 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
| 71 | const char *hyp_vecs_start, |
| 72 | const char *hyp_vecs_end) |
| 73 | { |
| 74 | static int last_slot = -1; |
| 75 | static DEFINE_SPINLOCK(bp_lock); |
| 76 | int cpu, slot = -1; |
| 77 | |
| 78 | spin_lock(&bp_lock); |
| 79 | for_each_possible_cpu(cpu) { |
| 80 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { |
| 81 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); |
| 82 | break; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | if (slot == -1) { |
| 87 | last_slot++; |
| 88 | BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) |
| 89 | / SZ_2K) <= last_slot); |
| 90 | slot = last_slot; |
| 91 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
| 92 | } |
| 93 | |
| 94 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); |
| 95 | __this_cpu_write(bp_hardening_data.fn, fn); |
| 96 | spin_unlock(&bp_lock); |
| 97 | } |
| 98 | #else |
Will Deacon | aa6acde | 2018-01-03 12:46:21 +0000 | [diff] [blame^] | 99 | #define __psci_hyp_bp_inval_start NULL |
| 100 | #define __psci_hyp_bp_inval_end NULL |
| 101 | |
Will Deacon | 0f15adb | 2018-01-03 11:17:58 +0000 | [diff] [blame] | 102 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
| 103 | const char *hyp_vecs_start, |
| 104 | const char *hyp_vecs_end) |
| 105 | { |
| 106 | __this_cpu_write(bp_hardening_data.fn, fn); |
| 107 | } |
| 108 | #endif /* CONFIG_KVM */ |
| 109 | |
| 110 | static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, |
| 111 | bp_hardening_cb_t fn, |
| 112 | const char *hyp_vecs_start, |
| 113 | const char *hyp_vecs_end) |
| 114 | { |
| 115 | u64 pfr0; |
| 116 | |
| 117 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) |
| 118 | return; |
| 119 | |
| 120 | pfr0 = read_cpuid(ID_AA64PFR0_EL1); |
| 121 | if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) |
| 122 | return; |
| 123 | |
| 124 | __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); |
| 125 | } |
Will Deacon | aa6acde | 2018-01-03 12:46:21 +0000 | [diff] [blame^] | 126 | |
| 127 | #include <linux/psci.h> |
| 128 | |
| 129 | static int enable_psci_bp_hardening(void *data) |
| 130 | { |
| 131 | const struct arm64_cpu_capabilities *entry = data; |
| 132 | |
| 133 | if (psci_ops.get_version) |
| 134 | install_bp_hardening_cb(entry, |
| 135 | (bp_hardening_cb_t)psci_ops.get_version, |
| 136 | __psci_hyp_bp_inval_start, |
| 137 | __psci_hyp_bp_inval_end); |
| 138 | |
| 139 | return 0; |
| 140 | } |
Will Deacon | 0f15adb | 2018-01-03 11:17:58 +0000 | [diff] [blame] | 141 | #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
| 142 | |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 143 | #define MIDR_RANGE(model, min, max) \ |
Suzuki K Poulose | 92406f0 | 2016-04-22 12:25:31 +0100 | [diff] [blame] | 144 | .def_scope = SCOPE_LOCAL_CPU, \ |
Marc Zyngier | 359b706 | 2015-03-27 13:09:23 +0000 | [diff] [blame] | 145 | .matches = is_affected_midr_range, \ |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 146 | .midr_model = model, \ |
| 147 | .midr_range_min = min, \ |
| 148 | .midr_range_max = max |
| 149 | |
Marc Zyngier | 06f1494 | 2017-02-01 14:38:46 +0000 | [diff] [blame] | 150 | #define MIDR_ALL_VERSIONS(model) \ |
| 151 | .def_scope = SCOPE_LOCAL_CPU, \ |
| 152 | .matches = is_affected_midr_range, \ |
| 153 | .midr_model = model, \ |
| 154 | .midr_range_min = 0, \ |
| 155 | .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) |
| 156 | |
Marc Zyngier | 359b706 | 2015-03-27 13:09:23 +0000 | [diff] [blame] | 157 | const struct arm64_cpu_capabilities arm64_errata[] = { |
Andre Przywara | c0a01b8 | 2014-11-14 15:54:12 +0000 | [diff] [blame] | 158 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
| 159 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
| 160 | defined(CONFIG_ARM64_ERRATUM_824069) |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 161 | { |
| 162 | /* Cortex-A53 r0p[012] */ |
| 163 | .desc = "ARM errata 826319, 827319, 824069", |
| 164 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
| 165 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), |
Andre Przywara | 7dd01ae | 2016-06-28 18:07:32 +0100 | [diff] [blame] | 166 | .enable = cpu_enable_cache_maint_trap, |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 167 | }, |
Andre Przywara | c0a01b8 | 2014-11-14 15:54:12 +0000 | [diff] [blame] | 168 | #endif |
| 169 | #ifdef CONFIG_ARM64_ERRATUM_819472 |
| 170 | { |
| 171 | /* Cortex-A53 r0p[01] */ |
| 172 | .desc = "ARM errata 819472", |
| 173 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
| 174 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), |
Andre Przywara | 7dd01ae | 2016-06-28 18:07:32 +0100 | [diff] [blame] | 175 | .enable = cpu_enable_cache_maint_trap, |
Andre Przywara | c0a01b8 | 2014-11-14 15:54:12 +0000 | [diff] [blame] | 176 | }, |
| 177 | #endif |
| 178 | #ifdef CONFIG_ARM64_ERRATUM_832075 |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 179 | { |
Andre Przywara | 5afaa1f | 2014-11-14 15:54:11 +0000 | [diff] [blame] | 180 | /* Cortex-A57 r0p0 - r1p2 */ |
| 181 | .desc = "ARM erratum 832075", |
| 182 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, |
Robert Richter | fa5ce3d | 2017-01-13 14:12:09 +0100 | [diff] [blame] | 183 | MIDR_RANGE(MIDR_CORTEX_A57, |
| 184 | MIDR_CPU_VAR_REV(0, 0), |
| 185 | MIDR_CPU_VAR_REV(1, 2)), |
Andre Przywara | 5afaa1f | 2014-11-14 15:54:11 +0000 | [diff] [blame] | 186 | }, |
Andre Przywara | c0a01b8 | 2014-11-14 15:54:12 +0000 | [diff] [blame] | 187 | #endif |
Marc Zyngier | 498cd5c | 2015-11-16 10:28:18 +0000 | [diff] [blame] | 188 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
| 189 | { |
| 190 | /* Cortex-A57 r0p0 - r1p2 */ |
| 191 | .desc = "ARM erratum 834220", |
| 192 | .capability = ARM64_WORKAROUND_834220, |
Robert Richter | fa5ce3d | 2017-01-13 14:12:09 +0100 | [diff] [blame] | 193 | MIDR_RANGE(MIDR_CORTEX_A57, |
| 194 | MIDR_CPU_VAR_REV(0, 0), |
| 195 | MIDR_CPU_VAR_REV(1, 2)), |
Marc Zyngier | 498cd5c | 2015-11-16 10:28:18 +0000 | [diff] [blame] | 196 | }, |
| 197 | #endif |
Will Deacon | 905e8c5 | 2015-03-23 19:07:02 +0000 | [diff] [blame] | 198 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
| 199 | { |
| 200 | /* Cortex-A53 r0p[01234] */ |
| 201 | .desc = "ARM erratum 845719", |
| 202 | .capability = ARM64_WORKAROUND_845719, |
| 203 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), |
| 204 | }, |
| 205 | #endif |
Robert Richter | 6d4e11c | 2015-09-21 22:58:35 +0200 | [diff] [blame] | 206 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 |
| 207 | { |
| 208 | /* Cavium ThunderX, pass 1.x */ |
| 209 | .desc = "Cavium erratum 23154", |
| 210 | .capability = ARM64_WORKAROUND_CAVIUM_23154, |
| 211 | MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), |
| 212 | }, |
| 213 | #endif |
Andrew Pinski | 104a0c0 | 2016-02-24 17:44:57 -0800 | [diff] [blame] | 214 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
| 215 | { |
| 216 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
| 217 | .desc = "Cavium erratum 27456", |
| 218 | .capability = ARM64_WORKAROUND_CAVIUM_27456, |
Robert Richter | fa5ce3d | 2017-01-13 14:12:09 +0100 | [diff] [blame] | 219 | MIDR_RANGE(MIDR_THUNDERX, |
| 220 | MIDR_CPU_VAR_REV(0, 0), |
| 221 | MIDR_CPU_VAR_REV(1, 1)), |
Andrew Pinski | 104a0c0 | 2016-02-24 17:44:57 -0800 | [diff] [blame] | 222 | }, |
Ganapatrao Kulkarni | 47c459b | 2016-07-07 10:18:17 +0530 | [diff] [blame] | 223 | { |
| 224 | /* Cavium ThunderX, T81 pass 1.0 */ |
| 225 | .desc = "Cavium erratum 27456", |
| 226 | .capability = ARM64_WORKAROUND_CAVIUM_27456, |
| 227 | MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), |
| 228 | }, |
Andrew Pinski | 104a0c0 | 2016-02-24 17:44:57 -0800 | [diff] [blame] | 229 | #endif |
David Daney | 690a341 | 2017-06-09 12:49:48 +0100 | [diff] [blame] | 230 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
| 231 | { |
| 232 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ |
| 233 | .desc = "Cavium erratum 30115", |
| 234 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
| 235 | MIDR_RANGE(MIDR_THUNDERX, 0x00, |
| 236 | (1 << MIDR_VARIANT_SHIFT) | 2), |
| 237 | }, |
| 238 | { |
| 239 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ |
| 240 | .desc = "Cavium erratum 30115", |
| 241 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
| 242 | MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02), |
| 243 | }, |
| 244 | { |
| 245 | /* Cavium ThunderX, T83 pass 1.0 */ |
| 246 | .desc = "Cavium erratum 30115", |
| 247 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
| 248 | MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00), |
| 249 | }, |
| 250 | #endif |
Andre Przywara | 5afaa1f | 2014-11-14 15:54:11 +0000 | [diff] [blame] | 251 | { |
Suzuki K Poulose | 116c81f | 2016-09-09 14:07:16 +0100 | [diff] [blame] | 252 | .desc = "Mismatched cache line size", |
| 253 | .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, |
| 254 | .matches = has_mismatched_cache_line_size, |
| 255 | .def_scope = SCOPE_LOCAL_CPU, |
| 256 | .enable = cpu_enable_trap_ctr_access, |
| 257 | }, |
Christopher Covington | 38fd94b | 2017-02-08 15:08:37 -0500 | [diff] [blame] | 258 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
| 259 | { |
| 260 | .desc = "Qualcomm Technologies Falkor erratum 1003", |
| 261 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
| 262 | MIDR_RANGE(MIDR_QCOM_FALKOR_V1, |
| 263 | MIDR_CPU_VAR_REV(0, 0), |
| 264 | MIDR_CPU_VAR_REV(0, 0)), |
| 265 | }, |
| 266 | #endif |
Christopher Covington | d9ff80f | 2017-01-31 12:50:19 -0500 | [diff] [blame] | 267 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
| 268 | { |
| 269 | .desc = "Qualcomm Technologies Falkor erratum 1009", |
| 270 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
| 271 | MIDR_RANGE(MIDR_QCOM_FALKOR_V1, |
| 272 | MIDR_CPU_VAR_REV(0, 0), |
| 273 | MIDR_CPU_VAR_REV(0, 0)), |
| 274 | }, |
| 275 | #endif |
Marc Zyngier | eeb1efb | 2017-03-20 17:18:06 +0000 | [diff] [blame] | 276 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
| 277 | { |
| 278 | /* Cortex-A73 all versions */ |
| 279 | .desc = "ARM erratum 858921", |
| 280 | .capability = ARM64_WORKAROUND_858921, |
| 281 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
| 282 | }, |
| 283 | #endif |
Will Deacon | aa6acde | 2018-01-03 12:46:21 +0000 | [diff] [blame^] | 284 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
| 285 | { |
| 286 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
| 287 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
| 288 | .enable = enable_psci_bp_hardening, |
| 289 | }, |
| 290 | { |
| 291 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
| 292 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
| 293 | .enable = enable_psci_bp_hardening, |
| 294 | }, |
| 295 | { |
| 296 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
| 297 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
| 298 | .enable = enable_psci_bp_hardening, |
| 299 | }, |
| 300 | { |
| 301 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
| 302 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), |
| 303 | .enable = enable_psci_bp_hardening, |
| 304 | }, |
| 305 | #endif |
Suzuki K Poulose | 116c81f | 2016-09-09 14:07:16 +0100 | [diff] [blame] | 306 | { |
Andre Przywara | 301bcfa | 2014-11-14 15:54:10 +0000 | [diff] [blame] | 307 | } |
Andre Przywara | e116a37 | 2014-11-14 15:54:09 +0000 | [diff] [blame] | 308 | }; |
| 309 | |
Suzuki K Poulose | 6a6efbb | 2016-04-22 12:25:34 +0100 | [diff] [blame] | 310 | /* |
| 311 | * The CPU Errata work arounds are detected and applied at boot time |
| 312 | * and the related information is freed soon after. If the new CPU requires |
| 313 | * an errata not detected at boot, fail this CPU. |
| 314 | */ |
Suzuki K Poulose | 89ba264 | 2016-09-09 14:07:09 +0100 | [diff] [blame] | 315 | void verify_local_cpu_errata_workarounds(void) |
Suzuki K Poulose | 6a6efbb | 2016-04-22 12:25:34 +0100 | [diff] [blame] | 316 | { |
| 317 | const struct arm64_cpu_capabilities *caps = arm64_errata; |
| 318 | |
| 319 | for (; caps->matches; caps++) |
| 320 | if (!cpus_have_cap(caps->capability) && |
| 321 | caps->matches(caps, SCOPE_LOCAL_CPU)) { |
| 322 | pr_crit("CPU%d: Requires work around for %s, not detected" |
| 323 | " at boot time\n", |
| 324 | smp_processor_id(), |
| 325 | caps->desc ? : "an erratum"); |
| 326 | cpu_die_early(); |
| 327 | } |
| 328 | } |
| 329 | |
Suzuki K Poulose | 89ba264 | 2016-09-09 14:07:09 +0100 | [diff] [blame] | 330 | void update_cpu_errata_workarounds(void) |
Andre Przywara | e116a37 | 2014-11-14 15:54:09 +0000 | [diff] [blame] | 331 | { |
Suzuki K. Poulose | ce8b602 | 2015-10-19 14:24:49 +0100 | [diff] [blame] | 332 | update_cpu_capabilities(arm64_errata, "enabling workaround for"); |
Andre Przywara | e116a37 | 2014-11-14 15:54:09 +0000 | [diff] [blame] | 333 | } |
Andre Przywara | 8e23185 | 2016-06-28 18:07:30 +0100 | [diff] [blame] | 334 | |
| 335 | void __init enable_errata_workarounds(void) |
| 336 | { |
| 337 | enable_cpu_capabilities(arm64_errata); |
| 338 | } |