blob: 70e5f1826fd9601c7285638e8df5411bd49509e4 [file] [log] [blame]
Andre Przywarae116a372014-11-14 15:54:09 +00001/*
2 * Contains CPU specific errata definitions
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
Andre Przywarae116a372014-11-14 15:54:09 +000019#include <linux/types.h>
20#include <asm/cpu.h>
21#include <asm/cputype.h>
22#include <asm/cpufeature.h>
23
Andre Przywara301bcfa2014-11-14 15:54:10 +000024static bool __maybe_unused
Suzuki K Poulose92406f02016-04-22 12:25:31 +010025is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
Andre Przywara301bcfa2014-11-14 15:54:10 +000026{
Suzuki K Poulose92406f02016-04-22 12:25:31 +010027 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
Will Deacond5370f72016-02-02 12:46:24 +000028 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29 entry->midr_range_min,
30 entry->midr_range_max);
Andre Przywara301bcfa2014-11-14 15:54:10 +000031}
32
Suzuki K Poulose116c81f2016-09-09 14:07:16 +010033static bool
34has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
35 int scope)
36{
37 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
38 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
40}
41
James Morse2a6dcb22016-10-18 11:27:46 +010042static int cpu_enable_trap_ctr_access(void *__unused)
Suzuki K Poulose116c81f2016-09-09 14:07:16 +010043{
44 /* Clear SCTLR_EL1.UCT */
45 config_sctlr_el1(SCTLR_EL1_UCT, 0);
James Morse2a6dcb22016-10-18 11:27:46 +010046 return 0;
Suzuki K Poulose116c81f2016-09-09 14:07:16 +010047}
48
Will Deacon0f15adb2018-01-03 11:17:58 +000049#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
50#include <asm/mmu_context.h>
51#include <asm/cacheflush.h>
52
53DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
54
55#ifdef CONFIG_KVM
Will Deaconaa6acde2018-01-03 12:46:21 +000056extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
Shanker Donthineniec82b562018-01-05 14:28:59 -060057extern char __qcom_hyp_sanitize_link_stack_start[];
58extern char __qcom_hyp_sanitize_link_stack_end[];
Will Deaconaa6acde2018-01-03 12:46:21 +000059
Will Deacon0f15adb2018-01-03 11:17:58 +000060static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
61 const char *hyp_vecs_end)
62{
63 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
64 int i;
65
66 for (i = 0; i < SZ_2K; i += 0x80)
67 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
68
69 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
70}
71
72static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
73 const char *hyp_vecs_start,
74 const char *hyp_vecs_end)
75{
76 static int last_slot = -1;
77 static DEFINE_SPINLOCK(bp_lock);
78 int cpu, slot = -1;
79
80 spin_lock(&bp_lock);
81 for_each_possible_cpu(cpu) {
82 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
83 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
84 break;
85 }
86 }
87
88 if (slot == -1) {
89 last_slot++;
90 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
91 / SZ_2K) <= last_slot);
92 slot = last_slot;
93 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
94 }
95
96 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
97 __this_cpu_write(bp_hardening_data.fn, fn);
98 spin_unlock(&bp_lock);
99}
100#else
Shanker Donthineniec82b562018-01-05 14:28:59 -0600101#define __psci_hyp_bp_inval_start NULL
102#define __psci_hyp_bp_inval_end NULL
103#define __qcom_hyp_sanitize_link_stack_start NULL
104#define __qcom_hyp_sanitize_link_stack_end NULL
Will Deaconaa6acde2018-01-03 12:46:21 +0000105
Will Deacon0f15adb2018-01-03 11:17:58 +0000106static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
107 const char *hyp_vecs_start,
108 const char *hyp_vecs_end)
109{
110 __this_cpu_write(bp_hardening_data.fn, fn);
111}
112#endif /* CONFIG_KVM */
113
114static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
115 bp_hardening_cb_t fn,
116 const char *hyp_vecs_start,
117 const char *hyp_vecs_end)
118{
119 u64 pfr0;
120
121 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
122 return;
123
124 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
125 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
126 return;
127
128 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
129}
Will Deaconaa6acde2018-01-03 12:46:21 +0000130
131#include <linux/psci.h>
132
133static int enable_psci_bp_hardening(void *data)
134{
135 const struct arm64_cpu_capabilities *entry = data;
136
137 if (psci_ops.get_version)
138 install_bp_hardening_cb(entry,
139 (bp_hardening_cb_t)psci_ops.get_version,
140 __psci_hyp_bp_inval_start,
141 __psci_hyp_bp_inval_end);
142
143 return 0;
144}
Shanker Donthineniec82b562018-01-05 14:28:59 -0600145
146static void qcom_link_stack_sanitization(void)
147{
148 u64 tmp;
149
150 asm volatile("mov %0, x30 \n"
151 ".rept 16 \n"
152 "bl . + 4 \n"
153 ".endr \n"
154 "mov x30, %0 \n"
155 : "=&r" (tmp));
156}
157
158static int qcom_enable_link_stack_sanitization(void *data)
159{
160 const struct arm64_cpu_capabilities *entry = data;
161
162 install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
163 __qcom_hyp_sanitize_link_stack_start,
164 __qcom_hyp_sanitize_link_stack_end);
165
166 return 0;
167}
Will Deacon0f15adb2018-01-03 11:17:58 +0000168#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
169
Andre Przywara301bcfa2014-11-14 15:54:10 +0000170#define MIDR_RANGE(model, min, max) \
Suzuki K Poulose92406f02016-04-22 12:25:31 +0100171 .def_scope = SCOPE_LOCAL_CPU, \
Marc Zyngier359b7062015-03-27 13:09:23 +0000172 .matches = is_affected_midr_range, \
Andre Przywara301bcfa2014-11-14 15:54:10 +0000173 .midr_model = model, \
174 .midr_range_min = min, \
175 .midr_range_max = max
176
Marc Zyngier06f14942017-02-01 14:38:46 +0000177#define MIDR_ALL_VERSIONS(model) \
178 .def_scope = SCOPE_LOCAL_CPU, \
179 .matches = is_affected_midr_range, \
180 .midr_model = model, \
181 .midr_range_min = 0, \
182 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
183
Marc Zyngier359b7062015-03-27 13:09:23 +0000184const struct arm64_cpu_capabilities arm64_errata[] = {
Andre Przywarac0a01b82014-11-14 15:54:12 +0000185#if defined(CONFIG_ARM64_ERRATUM_826319) || \
186 defined(CONFIG_ARM64_ERRATUM_827319) || \
187 defined(CONFIG_ARM64_ERRATUM_824069)
Andre Przywara301bcfa2014-11-14 15:54:10 +0000188 {
189 /* Cortex-A53 r0p[012] */
190 .desc = "ARM errata 826319, 827319, 824069",
191 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
192 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100193 .enable = cpu_enable_cache_maint_trap,
Andre Przywara301bcfa2014-11-14 15:54:10 +0000194 },
Andre Przywarac0a01b82014-11-14 15:54:12 +0000195#endif
196#ifdef CONFIG_ARM64_ERRATUM_819472
197 {
198 /* Cortex-A53 r0p[01] */
199 .desc = "ARM errata 819472",
200 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
201 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100202 .enable = cpu_enable_cache_maint_trap,
Andre Przywarac0a01b82014-11-14 15:54:12 +0000203 },
204#endif
205#ifdef CONFIG_ARM64_ERRATUM_832075
Andre Przywara301bcfa2014-11-14 15:54:10 +0000206 {
Andre Przywara5afaa1f2014-11-14 15:54:11 +0000207 /* Cortex-A57 r0p0 - r1p2 */
208 .desc = "ARM erratum 832075",
209 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
Robert Richterfa5ce3d2017-01-13 14:12:09 +0100210 MIDR_RANGE(MIDR_CORTEX_A57,
211 MIDR_CPU_VAR_REV(0, 0),
212 MIDR_CPU_VAR_REV(1, 2)),
Andre Przywara5afaa1f2014-11-14 15:54:11 +0000213 },
Andre Przywarac0a01b82014-11-14 15:54:12 +0000214#endif
Marc Zyngier498cd5c2015-11-16 10:28:18 +0000215#ifdef CONFIG_ARM64_ERRATUM_834220
216 {
217 /* Cortex-A57 r0p0 - r1p2 */
218 .desc = "ARM erratum 834220",
219 .capability = ARM64_WORKAROUND_834220,
Robert Richterfa5ce3d2017-01-13 14:12:09 +0100220 MIDR_RANGE(MIDR_CORTEX_A57,
221 MIDR_CPU_VAR_REV(0, 0),
222 MIDR_CPU_VAR_REV(1, 2)),
Marc Zyngier498cd5c2015-11-16 10:28:18 +0000223 },
224#endif
Will Deacon905e8c52015-03-23 19:07:02 +0000225#ifdef CONFIG_ARM64_ERRATUM_845719
226 {
227 /* Cortex-A53 r0p[01234] */
228 .desc = "ARM erratum 845719",
229 .capability = ARM64_WORKAROUND_845719,
230 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
231 },
232#endif
Robert Richter6d4e11c2015-09-21 22:58:35 +0200233#ifdef CONFIG_CAVIUM_ERRATUM_23154
234 {
235 /* Cavium ThunderX, pass 1.x */
236 .desc = "Cavium erratum 23154",
237 .capability = ARM64_WORKAROUND_CAVIUM_23154,
238 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
239 },
240#endif
Andrew Pinski104a0c02016-02-24 17:44:57 -0800241#ifdef CONFIG_CAVIUM_ERRATUM_27456
242 {
243 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
244 .desc = "Cavium erratum 27456",
245 .capability = ARM64_WORKAROUND_CAVIUM_27456,
Robert Richterfa5ce3d2017-01-13 14:12:09 +0100246 MIDR_RANGE(MIDR_THUNDERX,
247 MIDR_CPU_VAR_REV(0, 0),
248 MIDR_CPU_VAR_REV(1, 1)),
Andrew Pinski104a0c02016-02-24 17:44:57 -0800249 },
Ganapatrao Kulkarni47c459b2016-07-07 10:18:17 +0530250 {
251 /* Cavium ThunderX, T81 pass 1.0 */
252 .desc = "Cavium erratum 27456",
253 .capability = ARM64_WORKAROUND_CAVIUM_27456,
254 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
255 },
Andrew Pinski104a0c02016-02-24 17:44:57 -0800256#endif
David Daney690a3412017-06-09 12:49:48 +0100257#ifdef CONFIG_CAVIUM_ERRATUM_30115
258 {
259 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
260 .desc = "Cavium erratum 30115",
261 .capability = ARM64_WORKAROUND_CAVIUM_30115,
262 MIDR_RANGE(MIDR_THUNDERX, 0x00,
263 (1 << MIDR_VARIANT_SHIFT) | 2),
264 },
265 {
266 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
267 .desc = "Cavium erratum 30115",
268 .capability = ARM64_WORKAROUND_CAVIUM_30115,
269 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
270 },
271 {
272 /* Cavium ThunderX, T83 pass 1.0 */
273 .desc = "Cavium erratum 30115",
274 .capability = ARM64_WORKAROUND_CAVIUM_30115,
275 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
276 },
277#endif
Andre Przywara5afaa1f2014-11-14 15:54:11 +0000278 {
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100279 .desc = "Mismatched cache line size",
280 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
281 .matches = has_mismatched_cache_line_size,
282 .def_scope = SCOPE_LOCAL_CPU,
283 .enable = cpu_enable_trap_ctr_access,
284 },
Christopher Covington38fd94b2017-02-08 15:08:37 -0500285#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
286 {
287 .desc = "Qualcomm Technologies Falkor erratum 1003",
288 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
289 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
290 MIDR_CPU_VAR_REV(0, 0),
291 MIDR_CPU_VAR_REV(0, 0)),
292 },
293#endif
Christopher Covingtond9ff80f2017-01-31 12:50:19 -0500294#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
295 {
296 .desc = "Qualcomm Technologies Falkor erratum 1009",
297 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
298 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
299 MIDR_CPU_VAR_REV(0, 0),
300 MIDR_CPU_VAR_REV(0, 0)),
301 },
302#endif
Marc Zyngiereeb1efb2017-03-20 17:18:06 +0000303#ifdef CONFIG_ARM64_ERRATUM_858921
304 {
305 /* Cortex-A73 all versions */
306 .desc = "ARM erratum 858921",
307 .capability = ARM64_WORKAROUND_858921,
308 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
309 },
310#endif
Will Deaconaa6acde2018-01-03 12:46:21 +0000311#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
312 {
313 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
314 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
315 .enable = enable_psci_bp_hardening,
316 },
317 {
318 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
319 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
320 .enable = enable_psci_bp_hardening,
321 },
322 {
323 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
324 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
325 .enable = enable_psci_bp_hardening,
326 },
327 {
328 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
330 .enable = enable_psci_bp_hardening,
331 },
Shanker Donthineniec82b562018-01-05 14:28:59 -0600332 {
333 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
334 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
335 .enable = qcom_enable_link_stack_sanitization,
336 },
337 {
338 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
339 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
340 },
Will Deaconaa6acde2018-01-03 12:46:21 +0000341#endif
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100342 {
Andre Przywara301bcfa2014-11-14 15:54:10 +0000343 }
Andre Przywarae116a372014-11-14 15:54:09 +0000344};
345
Suzuki K Poulose6a6efbb2016-04-22 12:25:34 +0100346/*
347 * The CPU Errata work arounds are detected and applied at boot time
348 * and the related information is freed soon after. If the new CPU requires
349 * an errata not detected at boot, fail this CPU.
350 */
Suzuki K Poulose89ba2642016-09-09 14:07:09 +0100351void verify_local_cpu_errata_workarounds(void)
Suzuki K Poulose6a6efbb2016-04-22 12:25:34 +0100352{
353 const struct arm64_cpu_capabilities *caps = arm64_errata;
354
355 for (; caps->matches; caps++)
356 if (!cpus_have_cap(caps->capability) &&
357 caps->matches(caps, SCOPE_LOCAL_CPU)) {
358 pr_crit("CPU%d: Requires work around for %s, not detected"
359 " at boot time\n",
360 smp_processor_id(),
361 caps->desc ? : "an erratum");
362 cpu_die_early();
363 }
364}
365
Suzuki K Poulose89ba2642016-09-09 14:07:09 +0100366void update_cpu_errata_workarounds(void)
Andre Przywarae116a372014-11-14 15:54:09 +0000367{
Suzuki K. Poulosece8b6022015-10-19 14:24:49 +0100368 update_cpu_capabilities(arm64_errata, "enabling workaround for");
Andre Przywarae116a372014-11-14 15:54:09 +0000369}
Andre Przywara8e231852016-06-28 18:07:30 +0100370
371void __init enable_errata_workarounds(void)
372{
373 enable_cpu_capabilities(arm64_errata);
374}