blob: 90ecd099c4b09a41dde743513fc4d1be2376367e [file] [log] [blame]
Paul Burton0ee958e2014-01-15 10:31:53 +00001/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
Marcin Nowakowski1f83f5e2017-04-07 13:40:28 +020011#include <linux/cpu.h>
Paul Burtona8c20612015-09-22 11:12:14 -070012#include <linux/delay.h>
Paul Burton0ee958e2014-01-15 10:31:53 +000013#include <linux/io.h>
Andrew Bresticker4060bbe2014-10-20 12:03:53 -070014#include <linux/irqchip/mips-gic.h>
Ingo Molnarf3ac6062017-02-03 22:59:33 +010015#include <linux/sched/task_stack.h>
Ingo Molnaref8bd772017-02-08 18:51:36 +010016#include <linux/sched/hotplug.h>
Paul Burton0ee958e2014-01-15 10:31:53 +000017#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/types.h>
20
Paul Burton0fc07082014-07-09 12:51:05 +010021#include <asm/bcache.h>
Paul Burton0ee958e2014-01-15 10:31:53 +000022#include <asm/mips-cm.h>
23#include <asm/mips-cpc.h>
24#include <asm/mips_mt.h>
25#include <asm/mipsregs.h>
Paul Burton1d8f1f52014-04-14 14:13:57 +010026#include <asm/pm-cps.h>
Paul Burton0fc07082014-07-09 12:51:05 +010027#include <asm/r4kcache.h>
Paul Burton0ee958e2014-01-15 10:31:53 +000028#include <asm/smp-cps.h>
29#include <asm/time.h>
30#include <asm/uasm.h>
31
Paul Burton6422a912016-02-03 03:15:34 +000032static bool threads_disabled;
Paul Burton0ee958e2014-01-15 10:31:53 +000033static DECLARE_BITMAP(core_power, NR_CPUS);
34
Paul Burton245a7862014-04-14 12:04:27 +010035struct core_boot_config *mips_cps_core_bootcfg;
Paul Burton0ee958e2014-01-15 10:31:53 +000036
Paul Burton6422a912016-02-03 03:15:34 +000037static int __init setup_nothreads(char *s)
38{
39 threads_disabled = true;
40 return 0;
41}
42early_param("nothreads", setup_nothreads);
43
Paul Burton245a7862014-04-14 12:04:27 +010044static unsigned core_vpe_count(unsigned core)
Paul Burton0ee958e2014-01-15 10:31:53 +000045{
Paul Burton245a7862014-04-14 12:04:27 +010046 unsigned cfg;
Paul Burton0ee958e2014-01-15 10:31:53 +000047
Paul Burton6422a912016-02-03 03:15:34 +000048 if (threads_disabled)
49 return 1;
50
Masahiro Yamada97f26452016-08-03 13:45:50 -070051 if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
52 && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
Paul Burton245a7862014-04-14 12:04:27 +010053 return 1;
Paul Burton0ee958e2014-01-15 10:31:53 +000054
Paul Burton4ede3162015-09-22 11:12:17 -070055 mips_cm_lock_other(core, 0);
Paul Burton245a7862014-04-14 12:04:27 +010056 cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
Paul Burton4ede3162015-09-22 11:12:17 -070057 mips_cm_unlock_other();
Paul Burton245a7862014-04-14 12:04:27 +010058 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
Paul Burton0ee958e2014-01-15 10:31:53 +000059}
60
61static void __init cps_smp_setup(void)
62{
63 unsigned int ncores, nvpes, core_vpes;
Paul Burton5a3e7c02016-02-03 03:15:33 +000064 unsigned long core_entry;
Paul Burton0ee958e2014-01-15 10:31:53 +000065 int c, v;
Paul Burton0ee958e2014-01-15 10:31:53 +000066
67 /* Detect & record VPE topology */
68 ncores = mips_cm_numcores();
Paul Burton5a3e7c02016-02-03 03:15:33 +000069 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
Paul Burton0ee958e2014-01-15 10:31:53 +000070 for (c = nvpes = 0; c < ncores; c++) {
Paul Burton245a7862014-04-14 12:04:27 +010071 core_vpes = core_vpe_count(c);
Paul Burton0ee958e2014-01-15 10:31:53 +000072 pr_cont("%c%u", c ? ',' : '{', core_vpes);
73
Paul Burton245a7862014-04-14 12:04:27 +010074 /* Use the number of VPEs in core 0 for smp_num_siblings */
75 if (!c)
76 smp_num_siblings = core_vpes;
77
Paul Burton0ee958e2014-01-15 10:31:53 +000078 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
79 cpu_data[nvpes + v].core = c;
Paul Burton5a3e7c02016-02-03 03:15:33 +000080#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
Paul Burton0ee958e2014-01-15 10:31:53 +000081 cpu_data[nvpes + v].vpe_id = v;
82#endif
83 }
84
85 nvpes += core_vpes;
86 }
87 pr_cont("} total %u\n", nvpes);
88
89 /* Indicate present CPUs (CPU being synonymous with VPE) */
90 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
91 set_cpu_possible(v, true);
92 set_cpu_present(v, true);
93 __cpu_number_map[v] = v;
94 __cpu_logical_map[v] = v;
95 }
96
Paul Burton33b68662014-04-14 15:58:45 +010097 /* Set a coherent default CCA (CWB) */
98 change_c0_config(CONF_CM_CMASK, 0x5);
99
Paul Burton0ee958e2014-01-15 10:31:53 +0000100 /* Core 0 is powered up (we're running on it) */
101 bitmap_set(core_power, 0, 1);
102
Paul Burton0ee958e2014-01-15 10:31:53 +0000103 /* Initialise core 0 */
Paul Burton245a7862014-04-14 12:04:27 +0100104 mips_cps_core_init();
Paul Burton0ee958e2014-01-15 10:31:53 +0000105
106 /* Make core 0 coherent with everything */
107 write_gcr_cl_coherence(0xff);
Niklas Cassel90db0242015-01-15 16:41:13 +0100108
Paul Burton5a3e7c02016-02-03 03:15:33 +0000109 if (mips_cm_revision() >= CM_REV_CM3) {
110 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
111 write_gcr_bev_base(core_entry);
112 }
113
Niklas Cassel90db0242015-01-15 16:41:13 +0100114#ifdef CONFIG_MIPS_MT_FPAFF
115 /* If we have an FPU, enroll ourselves in the FPU-full mask */
116 if (cpu_has_fpu)
Ezequiel Garcia7363cb72015-04-28 18:34:23 -0300117 cpumask_set_cpu(0, &mt_fpu_cpumask);
Niklas Cassel90db0242015-01-15 16:41:13 +0100118#endif /* CONFIG_MIPS_MT_FPAFF */
Paul Burton0ee958e2014-01-15 10:31:53 +0000119}
120
121static void __init cps_prepare_cpus(unsigned int max_cpus)
122{
Paul Burton5c399f62014-04-14 15:21:25 +0100123 unsigned ncores, core_vpes, c, cca;
124 bool cca_unsuitable;
Paul Burton0f4d3d12014-04-14 12:21:49 +0100125 u32 *entry_code;
Paul Burton245a7862014-04-14 12:04:27 +0100126
Paul Burton0ee958e2014-01-15 10:31:53 +0000127 mips_mt_set_cpuoptions();
Paul Burton245a7862014-04-14 12:04:27 +0100128
Paul Burton5c399f62014-04-14 15:21:25 +0100129 /* Detect whether the CCA is unsuited to multi-core SMP */
130 cca = read_c0_config() & CONF_CM_CMASK;
131 switch (cca) {
132 case 0x4: /* CWBE */
133 case 0x5: /* CWB */
134 /* The CCA is coherent, multi-core is fine */
135 cca_unsuitable = false;
136 break;
137
138 default:
139 /* CCA is not coherent, multi-core is not usable */
140 cca_unsuitable = true;
141 }
142
143 /* Warn the user if the CCA prevents multi-core */
144 ncores = mips_cm_numcores();
Paul Burton5570ba22017-06-02 14:48:53 -0700145 if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) {
146 pr_warn("Using only one core due to %s%s%s\n",
147 cca_unsuitable ? "unsuitable CCA" : "",
148 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
149 cpu_has_dc_aliases ? "dcache aliasing" : "");
Paul Burton5c399f62014-04-14 15:21:25 +0100150
151 for_each_present_cpu(c) {
152 if (cpu_data[c].core)
153 set_cpu_present(c, false);
154 }
155 }
156
Paul Burton0155a062014-04-16 11:10:57 +0100157 /*
158 * Patch the start of mips_cps_core_entry to provide:
159 *
Paul Burton0155a062014-04-16 11:10:57 +0100160 * s0 = kseg0 CCA
161 */
Paul Burton0f4d3d12014-04-14 12:21:49 +0100162 entry_code = (u32 *)&mips_cps_core_entry;
Paul Burton0155a062014-04-16 11:10:57 +0100163 uasm_i_addiu(&entry_code, 16, 0, cca);
Paul Burton0fc07082014-07-09 12:51:05 +0100164 blast_dcache_range((unsigned long)&mips_cps_core_entry,
165 (unsigned long)entry_code);
166 bc_wback_inv((unsigned long)&mips_cps_core_entry,
167 (void *)entry_code - (void *)&mips_cps_core_entry);
168 __sync();
Paul Burton0f4d3d12014-04-14 12:21:49 +0100169
Paul Burton245a7862014-04-14 12:04:27 +0100170 /* Allocate core boot configuration structs */
Paul Burton245a7862014-04-14 12:04:27 +0100171 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
172 GFP_KERNEL);
173 if (!mips_cps_core_bootcfg) {
174 pr_err("Failed to allocate boot config for %u cores\n", ncores);
175 goto err_out;
176 }
177
178 /* Allocate VPE boot configuration structs */
179 for (c = 0; c < ncores; c++) {
180 core_vpes = core_vpe_count(c);
181 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
182 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
183 GFP_KERNEL);
184 if (!mips_cps_core_bootcfg[c].vpe_config) {
185 pr_err("Failed to allocate %u VPE boot configs\n",
186 core_vpes);
187 goto err_out;
188 }
189 }
190
191 /* Mark this CPU as booted */
192 atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
193 1 << cpu_vpe_id(&current_cpu_data));
194
195 return;
196err_out:
197 /* Clean up allocations */
198 if (mips_cps_core_bootcfg) {
199 for (c = 0; c < ncores; c++)
200 kfree(mips_cps_core_bootcfg[c].vpe_config);
201 kfree(mips_cps_core_bootcfg);
202 mips_cps_core_bootcfg = NULL;
203 }
204
205 /* Effectively disable SMP by declaring CPUs not present */
206 for_each_possible_cpu(c) {
207 if (c == 0)
208 continue;
209 set_cpu_present(c, false);
210 }
Paul Burton0ee958e2014-01-15 10:31:53 +0000211}
212
Matt Redfearn9736c612016-07-07 08:50:38 +0100213static void boot_core(unsigned int core, unsigned int vpe_id)
Paul Burton0ee958e2014-01-15 10:31:53 +0000214{
Paul Burtona8c20612015-09-22 11:12:14 -0700215 u32 access, stat, seq_state;
216 unsigned timeout;
Paul Burton0ee958e2014-01-15 10:31:53 +0000217
218 /* Select the appropriate core */
Paul Burton4ede3162015-09-22 11:12:17 -0700219 mips_cm_lock_other(core, 0);
Paul Burton0ee958e2014-01-15 10:31:53 +0000220
221 /* Set its reset vector */
222 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
223
224 /* Ensure its coherency is disabled */
225 write_gcr_co_coherence(0);
226
Matt Redfearn497e803e2015-12-18 12:47:00 +0000227 /* Start it with the legacy memory map and exception base */
228 write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
229
Paul Burton0ee958e2014-01-15 10:31:53 +0000230 /* Ensure the core can access the GCRs */
231 access = read_gcr_access();
Paul Burton245a7862014-04-14 12:04:27 +0100232 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
Paul Burton0ee958e2014-01-15 10:31:53 +0000233 write_gcr_access(access);
234
Paul Burton0ee958e2014-01-15 10:31:53 +0000235 if (mips_cpc_present()) {
Paul Burton0ee958e2014-01-15 10:31:53 +0000236 /* Reset the core */
Paul Burtondd9233d2014-03-07 10:42:52 +0000237 mips_cpc_lock_other(core);
Paul Burton5a3e7c02016-02-03 03:15:33 +0000238
239 if (mips_cm_revision() >= CM_REV_CM3) {
Matt Redfearn9736c612016-07-07 08:50:38 +0100240 /* Run only the requested VP following the reset */
241 write_cpc_co_vp_stop(0xf);
242 write_cpc_co_vp_run(1 << vpe_id);
Paul Burton5a3e7c02016-02-03 03:15:33 +0000243
244 /*
245 * Ensure that the VP_RUN register is written before the
246 * core leaves reset.
247 */
248 wmb();
249 }
250
Paul Burton0ee958e2014-01-15 10:31:53 +0000251 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
Paul Burtona8c20612015-09-22 11:12:14 -0700252
253 timeout = 100;
254 while (true) {
255 stat = read_cpc_co_stat_conf();
256 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
257
258 /* U6 == coherent execution, ie. the core is up */
259 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
260 break;
261
262 /* Delay a little while before we start warning */
263 if (timeout) {
264 timeout--;
265 mdelay(10);
266 continue;
267 }
268
269 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
270 core, stat);
271 mdelay(1000);
272 }
273
Paul Burtondd9233d2014-03-07 10:42:52 +0000274 mips_cpc_unlock_other();
Paul Burton0ee958e2014-01-15 10:31:53 +0000275 } else {
276 /* Take the core out of reset */
277 write_gcr_co_reset_release(0);
278 }
279
Paul Burton4ede3162015-09-22 11:12:17 -0700280 mips_cm_unlock_other();
281
Paul Burton0ee958e2014-01-15 10:31:53 +0000282 /* The core is now powered up */
Paul Burton245a7862014-04-14 12:04:27 +0100283 bitmap_set(core_power, core, 1);
Paul Burton0ee958e2014-01-15 10:31:53 +0000284}
285
Paul Burton245a7862014-04-14 12:04:27 +0100286static void remote_vpe_boot(void *dummy)
Paul Burton0ee958e2014-01-15 10:31:53 +0000287{
Paul Burtonf12401d2016-02-03 03:15:31 +0000288 unsigned core = current_cpu_data.core;
289 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
290
291 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
Paul Burton0ee958e2014-01-15 10:31:53 +0000292}
293
294static void cps_boot_secondary(int cpu, struct task_struct *idle)
295{
Paul Burton245a7862014-04-14 12:04:27 +0100296 unsigned core = cpu_data[cpu].core;
297 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
298 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
299 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
Paul Burton5a3e7c02016-02-03 03:15:33 +0000300 unsigned long core_entry;
Paul Burton0ee958e2014-01-15 10:31:53 +0000301 unsigned int remote;
302 int err;
303
Paul Burton245a7862014-04-14 12:04:27 +0100304 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
305 vpe_cfg->sp = __KSTK_TOS(idle);
306 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
Paul Burton0ee958e2014-01-15 10:31:53 +0000307
Paul Burton245a7862014-04-14 12:04:27 +0100308 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
309
Paul Burton1d8f1f52014-04-14 14:13:57 +0100310 preempt_disable();
311
Paul Burton245a7862014-04-14 12:04:27 +0100312 if (!test_bit(core, core_power)) {
Paul Burton0ee958e2014-01-15 10:31:53 +0000313 /* Boot a VPE on a powered down core */
Matt Redfearn9736c612016-07-07 08:50:38 +0100314 boot_core(core, vpe_id);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100315 goto out;
Paul Burton0ee958e2014-01-15 10:31:53 +0000316 }
317
Paul Burton5a3e7c02016-02-03 03:15:33 +0000318 if (cpu_has_vp) {
319 mips_cm_lock_other(core, vpe_id);
320 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
321 write_gcr_co_reset_base(core_entry);
322 mips_cm_unlock_other();
323 }
324
Paul Burton245a7862014-04-14 12:04:27 +0100325 if (core != current_cpu_data.core) {
Paul Burton0ee958e2014-01-15 10:31:53 +0000326 /* Boot a VPE on another powered up core */
327 for (remote = 0; remote < NR_CPUS; remote++) {
Paul Burton245a7862014-04-14 12:04:27 +0100328 if (cpu_data[remote].core != core)
Paul Burton0ee958e2014-01-15 10:31:53 +0000329 continue;
330 if (cpu_online(remote))
331 break;
332 }
Matt Redfearn5b0093f32016-11-04 09:28:58 +0000333 if (remote >= NR_CPUS) {
334 pr_crit("No online CPU in core %u to start CPU%d\n",
335 core, cpu);
336 goto out;
337 }
Paul Burton0ee958e2014-01-15 10:31:53 +0000338
Paul Burton245a7862014-04-14 12:04:27 +0100339 err = smp_call_function_single(remote, remote_vpe_boot,
340 NULL, 1);
Paul Burton0ee958e2014-01-15 10:31:53 +0000341 if (err)
342 panic("Failed to call remote CPU\n");
Paul Burton1d8f1f52014-04-14 14:13:57 +0100343 goto out;
Paul Burton0ee958e2014-01-15 10:31:53 +0000344 }
345
Paul Burton5a3e7c02016-02-03 03:15:33 +0000346 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
Paul Burton0ee958e2014-01-15 10:31:53 +0000347
348 /* Boot a VPE on this core */
Paul Burtonf12401d2016-02-03 03:15:31 +0000349 mips_cps_boot_vpes(core_cfg, vpe_id);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100350out:
351 preempt_enable();
Paul Burton0ee958e2014-01-15 10:31:53 +0000352}
353
354static void cps_init_secondary(void)
355{
356 /* Disable MT - we only want to run 1 TC per VPE */
357 if (cpu_has_mipsmt)
358 dmt();
359
Paul Burtonba1c0a42016-02-03 03:15:29 +0000360 if (mips_cm_revision() >= CM_REV_CM3) {
361 unsigned ident = gic_read_local_vp_id();
362
363 /*
364 * Ensure that our calculation of the VP ID matches up with
365 * what the GIC reports, otherwise we'll have configured
366 * interrupts incorrectly.
367 */
368 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
369 }
370
Paul Burtond642e4e2016-05-17 15:31:05 +0100371 if (cpu_has_veic)
372 clear_c0_status(ST0_IM);
373 else
374 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
375 STATUSF_IP4 | STATUSF_IP5 |
376 STATUSF_IP6 | STATUSF_IP7);
Paul Burton0ee958e2014-01-15 10:31:53 +0000377}
378
379static void cps_smp_finish(void)
380{
381 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
382
383#ifdef CONFIG_MIPS_MT_FPAFF
384 /* If we have an FPU, enroll ourselves in the FPU-full mask */
385 if (cpu_has_fpu)
Rusty Russell8dd92892015-03-05 10:49:17 +1030386 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
Paul Burton0ee958e2014-01-15 10:31:53 +0000387#endif /* CONFIG_MIPS_MT_FPAFF */
388
389 local_irq_enable();
390}
391
Paul Burton1d8f1f52014-04-14 14:13:57 +0100392#ifdef CONFIG_HOTPLUG_CPU
393
394static int cps_cpu_disable(void)
395{
396 unsigned cpu = smp_processor_id();
397 struct core_boot_config *core_cfg;
398
399 if (!cpu)
400 return -EBUSY;
401
402 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
403 return -EINVAL;
404
405 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
406 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
Paul Burtone114ba22014-06-11 11:00:56 +0100407 smp_mb__after_atomic();
Paul Burton1d8f1f52014-04-14 14:13:57 +0100408 set_cpu_online(cpu, false);
James Hogan826e99b2016-07-13 14:12:45 +0100409 calculate_cpu_foreign_map();
Paul Burton1d8f1f52014-04-14 14:13:57 +0100410
411 return 0;
412}
413
Paul Burton1d8f1f52014-04-14 14:13:57 +0100414static unsigned cpu_death_sibling;
415static enum {
416 CPU_DEATH_HALT,
417 CPU_DEATH_POWER,
418} cpu_death;
419
420void play_dead(void)
421{
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100422 unsigned int cpu, core, vpe_id;
Paul Burton1d8f1f52014-04-14 14:13:57 +0100423
424 local_irq_disable();
425 idle_task_exit();
426 cpu = smp_processor_id();
Matt Redfearnbac06cf2017-03-31 11:51:08 +0100427 core = cpu_data[cpu].core;
Paul Burton1d8f1f52014-04-14 14:13:57 +0100428 cpu_death = CPU_DEATH_POWER;
429
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100430 pr_debug("CPU%d going offline\n", cpu);
431
432 if (cpu_has_mipsmt || cpu_has_vp) {
Paul Burton1d8f1f52014-04-14 14:13:57 +0100433 /* Look for another online VPE within the core */
434 for_each_online_cpu(cpu_death_sibling) {
435 if (cpu_data[cpu_death_sibling].core != core)
436 continue;
437
438 /*
439 * There is an online VPE within the core. Just halt
440 * this TC and leave the core alone.
441 */
442 cpu_death = CPU_DEATH_HALT;
443 break;
444 }
445 }
446
447 /* This CPU has chosen its way out */
Marcin Nowakowski1f83f5e2017-04-07 13:40:28 +0200448 (void)cpu_report_death();
Paul Burton1d8f1f52014-04-14 14:13:57 +0100449
450 if (cpu_death == CPU_DEATH_HALT) {
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100451 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
452
453 pr_debug("Halting core %d VP%d\n", core, vpe_id);
454 if (cpu_has_mipsmt) {
455 /* Halt this TC */
456 write_c0_tchalt(TCHALT_H);
457 instruction_hazard();
458 } else if (cpu_has_vp) {
459 write_cpc_cl_vp_stop(1 << vpe_id);
460
461 /* Ensure that the VP_STOP register is written */
462 wmb();
463 }
Paul Burton1d8f1f52014-04-14 14:13:57 +0100464 } else {
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100465 pr_debug("Gating power to core %d\n", core);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100466 /* Power down the core */
467 cps_pm_enter_state(CPS_PM_POWER_GATED);
468 }
469
470 /* This should never be reached */
471 panic("Failed to offline CPU %u", cpu);
472}
473
474static void wait_for_sibling_halt(void *ptr_cpu)
475{
Markos Chandrasfd5ed302015-07-01 09:13:28 +0100476 unsigned cpu = (unsigned long)ptr_cpu;
Paul Burtonc90e49f2014-07-09 12:48:21 +0100477 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100478 unsigned halted;
479 unsigned long flags;
480
481 do {
482 local_irq_save(flags);
483 settc(vpe_id);
484 halted = read_tc_c0_tchalt();
485 local_irq_restore(flags);
486 } while (!(halted & TCHALT_H));
487}
488
489static void cps_cpu_die(unsigned int cpu)
490{
491 unsigned core = cpu_data[cpu].core;
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100492 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100493 unsigned stat;
494 int err;
495
496 /* Wait for the cpu to choose its way out */
Marcin Nowakowski1f83f5e2017-04-07 13:40:28 +0200497 if (!cpu_wait_death(cpu, 5)) {
Paul Burton1d8f1f52014-04-14 14:13:57 +0100498 pr_err("CPU%u: didn't offline\n", cpu);
499 return;
500 }
501
502 /*
503 * Now wait for the CPU to actually offline. Without doing this that
504 * offlining may race with one or more of:
505 *
506 * - Onlining the CPU again.
507 * - Powering down the core if another VPE within it is offlined.
508 * - A sibling VPE entering a non-coherent state.
509 *
510 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
511 * with which we could race, so do nothing.
512 */
513 if (cpu_death == CPU_DEATH_POWER) {
514 /*
515 * Wait for the core to enter a powered down or clock gated
516 * state, the latter happening when a JTAG probe is connected
517 * in which case the CPC will refuse to power down the core.
518 */
519 do {
Matt Redfearn6ca8ac72016-09-22 11:59:47 +0100520 mips_cm_lock_other(core, 0);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100521 mips_cpc_lock_other(core);
522 stat = read_cpc_co_stat_conf();
523 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
524 mips_cpc_unlock_other();
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100525 mips_cm_unlock_other();
Paul Burton1d8f1f52014-04-14 14:13:57 +0100526 } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
527 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
528 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
529
530 /* Indicate the core is powered off */
531 bitmap_clear(core_power, core, 1);
532 } else if (cpu_has_mipsmt) {
533 /*
534 * Have a CPU with access to the offlined CPUs registers wait
535 * for its TC to halt.
536 */
537 err = smp_call_function_single(cpu_death_sibling,
538 wait_for_sibling_halt,
Markos Chandrasfd5ed302015-07-01 09:13:28 +0100539 (void *)(unsigned long)cpu, 1);
Paul Burton1d8f1f52014-04-14 14:13:57 +0100540 if (err)
541 panic("Failed to call remote sibling CPU\n");
Matt Redfearn0d2808f2016-07-07 08:50:39 +0100542 } else if (cpu_has_vp) {
543 do {
544 mips_cm_lock_other(core, vpe_id);
545 stat = read_cpc_co_vp_running();
546 mips_cm_unlock_other();
547 } while (stat & (1 << vpe_id));
Paul Burton1d8f1f52014-04-14 14:13:57 +0100548 }
549}
550
551#endif /* CONFIG_HOTPLUG_CPU */
552
Paul Burton0ee958e2014-01-15 10:31:53 +0000553static struct plat_smp_ops cps_smp_ops = {
554 .smp_setup = cps_smp_setup,
555 .prepare_cpus = cps_prepare_cpus,
556 .boot_secondary = cps_boot_secondary,
557 .init_secondary = cps_init_secondary,
558 .smp_finish = cps_smp_finish,
Qais Yousefbb11cff2015-12-08 13:20:28 +0000559 .send_ipi_single = mips_smp_send_ipi_single,
560 .send_ipi_mask = mips_smp_send_ipi_mask,
Paul Burton1d8f1f52014-04-14 14:13:57 +0100561#ifdef CONFIG_HOTPLUG_CPU
562 .cpu_disable = cps_cpu_disable,
563 .cpu_die = cps_cpu_die,
564#endif
Paul Burton0ee958e2014-01-15 10:31:53 +0000565};
566
Paul Burton68c12322014-03-14 16:06:16 +0000567bool mips_cps_smp_in_use(void)
568{
569 extern struct plat_smp_ops *mp_ops;
570 return mp_ops == &cps_smp_ops;
571}
572
Paul Burton0ee958e2014-01-15 10:31:53 +0000573int register_cps_smp_ops(void)
574{
575 if (!mips_cm_present()) {
576 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
577 return -ENODEV;
578 }
579
580 /* check we have a GIC - we need one for IPIs */
581 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
582 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
583 return -ENODEV;
584 }
585
586 register_smp_ops(&cps_smp_ops);
587 return 0;
588}