blob: 4705b3c11e5feb52b944945b87255c845c6b3e31 [file] [log] [blame]
Ralf Baechle41c594a2006-04-05 09:45:45 +01001/* Copyright (C) 2004 Mips Technologies, Inc */
2
Ralf Baechleea580402007-10-11 23:46:09 +01003#include <linux/clockchips.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +01004#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/cpumask.h>
7#include <linux/interrupt.h>
Ralf Baechleae036b72007-03-27 15:11:54 +01008#include <linux/kernel_stat.h>
Ralf Baechleec43c012007-01-24 19:23:21 +00009#include <linux/module.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010010
11#include <asm/cpu.h>
12#include <asm/processor.h>
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <asm/hardirq.h>
16#include <asm/hazards.h>
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +010017#include <asm/irq.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010018#include <asm/mmu_context.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010019#include <asm/mipsregs.h>
20#include <asm/cacheflush.h>
21#include <asm/time.h>
22#include <asm/addrspace.h>
23#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h>
26
27/*
Ralf Baechle1146fe32007-09-21 17:13:55 +010028 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
29 * in do_IRQ. These are passed in setup_irq_smtc() and stored
30 * in this table.
Ralf Baechle41c594a2006-04-05 09:45:45 +010031 */
Ralf Baechle1146fe32007-09-21 17:13:55 +010032unsigned long irq_hwmask[NR_IRQS];
Ralf Baechle41c594a2006-04-05 09:45:45 +010033
Ralf Baechle41c594a2006-04-05 09:45:45 +010034#define LOCK_MT_PRA() \
35 local_irq_save(flags); \
36 mtflags = dmt()
37
38#define UNLOCK_MT_PRA() \
39 emt(mtflags); \
40 local_irq_restore(flags)
41
42#define LOCK_CORE_PRA() \
43 local_irq_save(flags); \
44 mtflags = dvpe()
45
46#define UNLOCK_CORE_PRA() \
47 evpe(mtflags); \
48 local_irq_restore(flags)
49
50/*
51 * Data structures purely associated with SMTC parallelism
52 */
53
54
55/*
56 * Table for tracking ASIDs whose lifetime is prolonged.
57 */
58
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
Ralf Baechleea580402007-10-11 23:46:09 +010065static atomic_t ipi_timer_latch[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +010066
67/*
Joe Perches603e82e2008-02-03 16:54:53 +020068 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
Ralf Baechle41c594a2006-04-05 09:45:45 +010069 */
70
71#define IPIBUF_PER_CPU 4
72
Ralf Baechle58687562007-02-05 00:33:21 +000073static struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq;
Ralf Baechle41c594a2006-04-05 09:45:45 +010075
76
77/* Forward declarations */
78
Ralf Baechle937a8012006-10-07 19:44:33 +010079void ipi_decode(struct smtc_ipi *);
Ralf Baechle58687562007-02-05 00:33:21 +000080static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
Ralf Baechle20bb25d2007-03-27 15:19:58 +010081static void setup_cross_vpe_interrupts(unsigned int nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +010082void init_smtc_stats(void);
83
84/* Global SMTC Status */
85
86unsigned int smtc_status = 0;
87
88/* Boot command line configuration overrides */
89
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +010090static int vpe0limit;
Ralf Baechle41c594a2006-04-05 09:45:45 +010091static int ipibuffers = 0;
92static int nostlb = 0;
93static int asidmask = 0;
94unsigned long smtc_asid_mask = 0xff;
95
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +010096static int __init vpe0tcs(char *str)
97{
98 get_option(&str, &vpe0limit);
99
100 return 1;
101}
102
Ralf Baechle41c594a2006-04-05 09:45:45 +0100103static int __init ipibufs(char *str)
104{
105 get_option(&str, &ipibuffers);
106 return 1;
107}
108
109static int __init stlb_disable(char *s)
110{
111 nostlb = 1;
112 return 1;
113}
114
115static int __init asidmask_set(char *str)
116{
117 get_option(&str, &asidmask);
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100118 switch (asidmask) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100119 case 0x1:
120 case 0x3:
121 case 0x7:
122 case 0xf:
123 case 0x1f:
124 case 0x3f:
125 case 0x7f:
126 case 0xff:
127 smtc_asid_mask = (unsigned long)asidmask;
128 break;
129 default:
130 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
131 }
132 return 1;
133}
134
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100135__setup("vpe0tcs=", vpe0tcs);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100136__setup("ipibufs=", ipibufs);
137__setup("nostlb", stlb_disable);
138__setup("asidmask=", asidmask_set);
139
Ralf Baechlec68644d2007-02-26 20:46:34 +0000140#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100141
142static int hang_trig = 0;
143
144static int __init hangtrig_enable(char *s)
145{
146 hang_trig = 1;
147 return 1;
148}
149
150
151__setup("hangtrig", hangtrig_enable);
152
153#define DEFAULT_BLOCKED_IPI_LIMIT 32
154
155static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
156
157static int __init tintq(char *str)
158{
159 get_option(&str, &timerq_limit);
160 return 1;
161}
162
163__setup("tintq=", tintq);
164
Ralf Baechle97aef632007-07-27 18:36:32 +0100165static int imstuckcount[2][8];
Ralf Baechle41c594a2006-04-05 09:45:45 +0100166/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
Ralf Baechle97aef632007-07-27 18:36:32 +0100167static int vpemask[2][8] = {
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100168 {0, 0, 1, 0, 0, 0, 0, 1},
169 {0, 0, 0, 0, 0, 0, 0, 1}
170};
Ralf Baechle41c594a2006-04-05 09:45:45 +0100171int tcnoprog[NR_CPUS];
172static atomic_t idle_hook_initialized = {0};
173static int clock_hang_reported[NR_CPUS];
174
Ralf Baechlec68644d2007-02-26 20:46:34 +0000175#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100176
Ralf Baechle41c594a2006-04-05 09:45:45 +0100177/*
178 * Configure shared TLB - VPC configuration bit must be set by caller
179 */
180
Ralf Baechle58687562007-02-05 00:33:21 +0000181static void smtc_configure_tlb(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100182{
Ralf Baechle21a151d2007-10-11 23:46:15 +0100183 int i, tlbsiz, vpes;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100184 unsigned long mvpconf0;
185 unsigned long config1val;
186
187 /* Set up ASID preservation table */
188 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
189 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
190 smtc_live_asid[vpes][i] = 0;
191 }
192 }
193 mvpconf0 = read_c0_mvpconf0();
194
195 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
196 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
197 /* If we have multiple VPEs, try to share the TLB */
198 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
199 /*
200 * If TLB sizing is programmable, shared TLB
201 * size is the total available complement.
202 * Otherwise, we have to take the sum of all
203 * static VPE TLB entries.
204 */
205 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
206 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
207 /*
208 * If there's more than one VPE, there had better
209 * be more than one TC, because we need one to bind
210 * to each VPE in turn to be able to read
211 * its configuration state!
212 */
213 settc(1);
214 /* Stop the TC from doing anything foolish */
215 write_tc_c0_tchalt(TCHALT_H);
216 mips_ihb();
217 /* No need to un-Halt - that happens later anyway */
218 for (i=0; i < vpes; i++) {
219 write_tc_c0_tcbind(i);
220 /*
221 * To be 100% sure we're really getting the right
222 * information, we exit the configuration state
223 * and do an IHB after each rebinding.
224 */
225 write_c0_mvpcontrol(
226 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
227 mips_ihb();
228 /*
229 * Only count if the MMU Type indicated is TLB
230 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100231 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100232 config1val = read_vpe_c0_config1();
233 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
234 }
235
236 /* Put core back in configuration state */
237 write_c0_mvpcontrol(
238 read_c0_mvpcontrol() | MVPCONTROL_VPC );
239 mips_ihb();
240 }
241 }
242 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
Ralf Baechlec80697b2007-01-17 18:58:44 +0000243 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100244
245 /*
246 * Setup kernel data structures to use software total,
247 * rather than read the per-VPE Config1 value. The values
248 * for "CPU 0" gets copied to all the other CPUs as part
249 * of their initialization in smtc_cpu_setup().
250 */
251
Ralf Baechlea0b62182007-01-19 14:35:14 +0000252 /* MIPS32 limits TLB indices to 64 */
253 if (tlbsiz > 64)
254 tlbsiz = 64;
255 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100256 smtc_status |= SMTC_TLB_SHARED;
Ralf Baechlea0b62182007-01-19 14:35:14 +0000257 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100258
259 printk("TLB of %d entry pairs shared by %d VPEs\n",
260 tlbsiz, vpes);
261 } else {
262 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
263 }
264 }
265}
266
267
268/*
269 * Incrementally build the CPU map out of constituent MIPS MT cores,
270 * using the specified available VPEs and TCs. Plaform code needs
271 * to ensure that each MIPS MT core invokes this routine on reset,
272 * one at a time(!).
273 *
274 * This version of the build_cpu_map and prepare_cpus routines assumes
275 * that *all* TCs of a MIPS MT core will be used for Linux, and that
276 * they will be spread across *all* available VPEs (to minimise the
277 * loss of efficiency due to exception service serialization).
278 * An improved version would pick up configuration information and
279 * possibly leave some TCs/VPEs as "slave" processors.
280 *
281 * Use c0_MVPConf0 to find out how many TCs are available, setting up
282 * phys_cpu_present_map and the logical/physical mappings.
283 */
284
285int __init mipsmt_build_cpu_map(int start_cpu_slot)
286{
287 int i, ntcs;
288
289 /*
290 * The CPU map isn't actually used for anything at this point,
291 * so it's not clear what else we should do apart from set
292 * everything up so that "logical" = "physical".
293 */
294 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
295 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
296 cpu_set(i, phys_cpu_present_map);
297 __cpu_number_map[i] = i;
298 __cpu_logical_map[i] = i;
299 }
Ralf Baechleea580402007-10-11 23:46:09 +0100300#ifdef CONFIG_MIPS_MT_FPAFF
Ralf Baechle41c594a2006-04-05 09:45:45 +0100301 /* Initialize map of CPUs with FPUs */
302 cpus_clear(mt_fpu_cpumask);
Ralf Baechleea580402007-10-11 23:46:09 +0100303#endif
Ralf Baechle41c594a2006-04-05 09:45:45 +0100304
305 /* One of those TC's is the one booting, and not a secondary... */
306 printk("%i available secondary CPU TC(s)\n", i - 1);
307
308 return i;
309}
310
311/*
312 * Common setup before any secondaries are started
313 * Make sure all CPU's are in a sensible state before we boot any of the
314 * secondaries.
315 *
316 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
317 * as possible across the available VPEs.
318 */
319
320static void smtc_tc_setup(int vpe, int tc, int cpu)
321{
322 settc(tc);
323 write_tc_c0_tchalt(TCHALT_H);
324 mips_ihb();
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327 | TCSTATUS_A);
328 write_tc_c0_tccontext(0);
329 /* Bind tc to vpe */
330 write_tc_c0_tcbind(vpe);
331 /* In general, all TCs should have the same cpu_data indications */
332 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
333 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
334 if (cpu_data[0].cputype == CPU_34K)
335 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
336 cpu_data[cpu].vpe_id = vpe;
337 cpu_data[cpu].tc_id = tc;
338}
339
340
341void mipsmt_prepare_cpus(void)
342{
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100343 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100344 unsigned long flags;
345 unsigned long val;
346 int nipi;
347 struct smtc_ipi *pipi;
348
349 /* disable interrupts so we can disable MT */
350 local_irq_save(flags);
351 /* disable MT so we can configure */
352 dvpe();
353 dmt();
354
Ingo Molnar34af9462006-06-27 02:53:55 -0700355 spin_lock_init(&freeIPIq.lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100356
357 /*
358 * We probably don't have as many VPEs as we do SMP "CPUs",
359 * but it's possible - and in any case we'll never use more!
360 */
361 for (i=0; i<NR_CPUS; i++) {
362 IPIQ[i].head = IPIQ[i].tail = NULL;
Ingo Molnar34af9462006-06-27 02:53:55 -0700363 spin_lock_init(&IPIQ[i].lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100364 IPIQ[i].depth = 0;
Ralf Baechleea580402007-10-11 23:46:09 +0100365 atomic_set(&ipi_timer_latch[i], 0);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100366 }
367
368 /* cpu_data index starts at zero */
369 cpu = 0;
370 cpu_data[cpu].vpe_id = 0;
371 cpu_data[cpu].tc_id = 0;
372 cpu++;
373
374 /* Report on boot-time options */
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100375 mips_mt_set_cpuoptions();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100376 if (vpelimit > 0)
377 printk("Limit of %d VPEs set\n", vpelimit);
378 if (tclimit > 0)
379 printk("Limit of %d TCs set\n", tclimit);
380 if (nostlb) {
381 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
382 }
383 if (asidmask)
384 printk("ASID mask value override to 0x%x\n", asidmask);
385
386 /* Temporary */
Ralf Baechlec68644d2007-02-26 20:46:34 +0000387#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100388 if (hang_trig)
389 printk("Logic Analyser Trigger on suspected TC hang\n");
Ralf Baechlec68644d2007-02-26 20:46:34 +0000390#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100391
392 /* Put MVPE's into 'configuration state' */
393 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
394
395 val = read_c0_mvpconf0();
396 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
397 if (vpelimit > 0 && nvpe > vpelimit)
398 nvpe = vpelimit;
399 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
400 if (ntc > NR_CPUS)
401 ntc = NR_CPUS;
402 if (tclimit > 0 && ntc > tclimit)
403 ntc = tclimit;
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100404 slop = ntc % nvpe;
405 for (i = 0; i < nvpe; i++) {
406 tcpervpe[i] = ntc / nvpe;
407 if (slop) {
408 if((slop - i) > 0) tcpervpe[i]++;
409 }
410 }
411 /* Handle command line override for VPE0 */
412 if (vpe0limit > ntc) vpe0limit = ntc;
413 if (vpe0limit > 0) {
414 int slopslop;
415 if (vpe0limit < tcpervpe[0]) {
416 /* Reducing TC count - distribute to others */
417 slop = tcpervpe[0] - vpe0limit;
418 slopslop = slop % (nvpe - 1);
419 tcpervpe[0] = vpe0limit;
420 for (i = 1; i < nvpe; i++) {
421 tcpervpe[i] += slop / (nvpe - 1);
422 if(slopslop && ((slopslop - (i - 1) > 0)))
423 tcpervpe[i]++;
424 }
425 } else if (vpe0limit > tcpervpe[0]) {
426 /* Increasing TC count - steal from others */
427 slop = vpe0limit - tcpervpe[0];
428 slopslop = slop % (nvpe - 1);
429 tcpervpe[0] = vpe0limit;
430 for (i = 1; i < nvpe; i++) {
431 tcpervpe[i] -= slop / (nvpe - 1);
432 if(slopslop && ((slopslop - (i - 1) > 0)))
433 tcpervpe[i]--;
434 }
435 }
436 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100437
438 /* Set up shared TLB */
439 smtc_configure_tlb();
440
441 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
442 /*
443 * Set the MVP bits.
444 */
445 settc(tc);
446 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
447 if (vpe != 0)
448 printk(", ");
449 printk("VPE %d: TC", vpe);
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100450 for (i = 0; i < tcpervpe[vpe]; i++) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100451 /*
452 * TC 0 is bound to VPE 0 at reset,
453 * and is presumably executing this
454 * code. Leave it alone!
455 */
456 if (tc != 0) {
Ralf Baechle21a151d2007-10-11 23:46:15 +0100457 smtc_tc_setup(vpe, tc, cpu);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100458 cpu++;
459 }
460 printk(" %d", tc);
461 tc++;
462 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100463 if (vpe != 0) {
464 /*
465 * Clear any stale software interrupts from VPE's Cause
466 */
467 write_vpe_c0_cause(0);
468
469 /*
470 * Clear ERL/EXL of VPEs other than 0
471 * and set restricted interrupt enable/mask.
472 */
473 write_vpe_c0_status((read_vpe_c0_status()
474 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
475 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
476 | ST0_IE));
477 /*
478 * set config to be the same as vpe0,
479 * particularly kseg0 coherency alg
480 */
481 write_vpe_c0_config(read_c0_config());
482 /* Clear any pending timer interrupt */
483 write_vpe_c0_compare(0);
484 /* Propagate Config7 */
485 write_vpe_c0_config7(read_c0_config7());
Ralf Baechle64c590b2006-11-01 00:22:00 +0000486 write_vpe_c0_count(read_c0_count());
Ralf Baechle41c594a2006-04-05 09:45:45 +0100487 }
488 /* enable multi-threading within VPE */
489 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
490 /* enable the VPE */
491 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
492 }
493
494 /*
495 * Pull any physically present but unused TCs out of circulation.
496 */
497 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
498 cpu_clear(tc, phys_cpu_present_map);
499 cpu_clear(tc, cpu_present_map);
500 tc++;
501 }
502
503 /* release config state */
504 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
505
506 printk("\n");
507
508 /* Set up coprocessor affinity CPU mask(s) */
509
Ralf Baechleea580402007-10-11 23:46:09 +0100510#ifdef CONFIG_MIPS_MT_FPAFF
Ralf Baechle41c594a2006-04-05 09:45:45 +0100511 for (tc = 0; tc < ntc; tc++) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100512 if (cpu_data[tc].options & MIPS_CPU_FPU)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100513 cpu_set(tc, mt_fpu_cpumask);
514 }
Ralf Baechleea580402007-10-11 23:46:09 +0100515#endif
Ralf Baechle41c594a2006-04-05 09:45:45 +0100516
517 /* set up ipi interrupts... */
518
519 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
520
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100521 setup_cross_vpe_interrupts(nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100522
523 /* Set up queue of free IPI "messages". */
524 nipi = NR_CPUS * IPIBUF_PER_CPU;
525 if (ipibuffers > 0)
526 nipi = ipibuffers;
527
528 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
529 if (pipi == NULL)
530 panic("kmalloc of IPI message buffers failed\n");
531 else
532 printk("IPI buffer pool of %d buffers\n", nipi);
533 for (i = 0; i < nipi; i++) {
534 smtc_ipi_nq(&freeIPIq, pipi);
535 pipi++;
536 }
537
538 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
539 emt(EMT_ENABLE);
540 evpe(EVPE_ENABLE);
541 local_irq_restore(flags);
542 /* Initialize SMTC /proc statistics/diagnostics */
543 init_smtc_stats();
544}
545
546
547/*
548 * Setup the PC, SP, and GP of a secondary processor and start it
549 * running!
550 * smp_bootstrap is the place to resume from
551 * __KSTK_TOS(idle) is apparently the stack pointer
552 * (unsigned long)idle->thread_info the gp
553 *
554 */
Ralf Baechlee119d492007-07-28 00:54:32 +0100555void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100556{
557 extern u32 kernelsp[NR_CPUS];
558 long flags;
559 int mtflags;
560
561 LOCK_MT_PRA();
562 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
563 dvpe();
564 }
565 settc(cpu_data[cpu].tc_id);
566
567 /* pc */
568 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
569
570 /* stack pointer */
571 kernelsp[cpu] = __KSTK_TOS(idle);
572 write_tc_gpr_sp(__KSTK_TOS(idle));
573
574 /* global pointer */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700575 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
Ralf Baechle41c594a2006-04-05 09:45:45 +0100576
577 smtc_status |= SMTC_MTC_ACTIVE;
578 write_tc_c0_tchalt(0);
579 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
580 evpe(EVPE_ENABLE);
581 }
582 UNLOCK_MT_PRA();
583}
584
585void smtc_init_secondary(void)
586{
587 /*
588 * Start timer on secondary VPEs if necessary.
Ralf Baechle54d0a212006-07-09 21:38:56 +0100589 * plat_timer_setup has already have been invoked by init/main
Ralf Baechle41c594a2006-04-05 09:45:45 +0100590 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
591 * SMTC init code assigns TCs consdecutively and in ascending order
592 * to across available VPEs.
593 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100594 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
595 ((read_c0_tcbind() & TCBIND_CURVPE)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100596 != cpu_data[smp_processor_id() - 1].vpe_id)){
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100597 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100598 }
599
600 local_irq_enable();
601}
602
603void smtc_smp_finish(void)
604{
605 printk("TC %d going on-line as CPU %d\n",
606 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
607}
608
609void smtc_cpus_done(void)
610{
611}
612
613/*
614 * Support for SMTC-optimized driver IRQ registration
615 */
616
617/*
618 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
619 * in do_IRQ. These are passed in setup_irq_smtc() and stored
620 * in this table.
621 */
622
623int setup_irq_smtc(unsigned int irq, struct irqaction * new,
624 unsigned long hwmask)
625{
Ralf Baechleef36fc32007-05-31 13:36:57 +0100626#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100627 unsigned int vpe = current_cpu_data.vpe_id;
628
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100629 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100630#endif
Ralf Baechleef36fc32007-05-31 13:36:57 +0100631 irq_hwmask[irq] = hwmask;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100632
633 return setup_irq(irq, new);
634}
635
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200636#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
637/*
638 * Support for IRQ affinity to TCs
639 */
640
641void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
642{
643 /*
644 * If a "fast path" cache of quickly decodable affinity state
645 * is maintained, this is where it gets done, on a call up
646 * from the platform affinity code.
647 */
648}
649
650void smtc_forward_irq(unsigned int irq)
651{
652 int target;
653
654 /*
655 * OK wise guy, now figure out how to get the IRQ
656 * to be serviced on an authorized "CPU".
657 *
658 * Ideally, to handle the situation where an IRQ has multiple
659 * eligible CPUS, we would maintain state per IRQ that would
660 * allow a fair distribution of service requests. Since the
661 * expected use model is any-or-only-one, for simplicity
662 * and efficiency, we just pick the easiest one to find.
663 */
664
665 target = first_cpu(irq_desc[irq].affinity);
666
667 /*
668 * We depend on the platform code to have correctly processed
669 * IRQ affinity change requests to ensure that the IRQ affinity
670 * mask has been purged of bits corresponding to nonexistent and
671 * offline "CPUs", and to TCs bound to VPEs other than the VPE
672 * connected to the physical interrupt input for the interrupt
673 * in question. Otherwise we have a nasty problem with interrupt
674 * mask management. This is best handled in non-performance-critical
675 * platform IRQ affinity setting code, to minimize interrupt-time
676 * checks.
677 */
678
679 /* If no one is eligible, service locally */
680 if (target >= NR_CPUS) {
681 do_IRQ_no_affinity(irq);
682 return;
683 }
684
685 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
686}
687
688#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
689
Ralf Baechle41c594a2006-04-05 09:45:45 +0100690/*
691 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
692 * Within a VPE one TC can interrupt another by different approaches.
693 * The easiest to get right would probably be to make all TCs except
694 * the target IXMT and set a software interrupt, but an IXMT-based
695 * scheme requires that a handler must run before a new IPI could
696 * be sent, which would break the "broadcast" loops in MIPS MT.
697 * A more gonzo approach within a VPE is to halt the TC, extract
698 * its Restart, Status, and a couple of GPRs, and program the Restart
699 * address to emulate an interrupt.
700 *
701 * Within a VPE, one can be confident that the target TC isn't in
702 * a critical EXL state when halted, since the write to the Halt
703 * register could not have issued on the writing thread if the
704 * halting thread had EXL set. So k0 and k1 of the target TC
705 * can be used by the injection code. Across VPEs, one can't
706 * be certain that the target TC isn't in a critical exception
707 * state. So we try a two-step process of sending a software
708 * interrupt to the target VPE, which either handles the event
709 * itself (if it was the target) or injects the event within
710 * the VPE.
711 */
712
Ralf Baechle58687562007-02-05 00:33:21 +0000713static void smtc_ipi_qdump(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100714{
715 int i;
716
717 for (i = 0; i < NR_CPUS ;i++) {
718 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
719 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
720 IPIQ[i].depth);
721 }
722}
723
724/*
725 * The standard atomic.h primitives don't quite do what we want
726 * here: We need an atomic add-and-return-previous-value (which
727 * could be done with atomic_add_return and a decrement) and an
728 * atomic set/zero-and-return-previous-value (which can't really
729 * be done with the atomic.h primitives). And since this is
730 * MIPS MT, we can assume that we have LL/SC.
731 */
Ralf Baechleea580402007-10-11 23:46:09 +0100732static inline int atomic_postincrement(atomic_t *v)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100733{
734 unsigned long result;
735
736 unsigned long temp;
737
738 __asm__ __volatile__(
739 "1: ll %0, %2 \n"
740 " addu %1, %0, 1 \n"
741 " sc %1, %2 \n"
742 " beqz %1, 1b \n"
Ralf Baechled87d0c92007-10-11 23:45:58 +0100743 __WEAK_LLSC_MB
Ralf Baechleea580402007-10-11 23:46:09 +0100744 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
745 : "m" (v->counter)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100746 : "memory");
747
748 return result;
749}
750
Ralf Baechle41c594a2006-04-05 09:45:45 +0100751void smtc_send_ipi(int cpu, int type, unsigned int action)
752{
753 int tcstatus;
754 struct smtc_ipi *pipi;
755 long flags;
756 int mtflags;
757
758 if (cpu == smp_processor_id()) {
759 printk("Cannot Send IPI to self!\n");
760 return;
761 }
762 /* Set up a descriptor, to be delivered either promptly or queued */
763 pipi = smtc_ipi_dq(&freeIPIq);
764 if (pipi == NULL) {
765 bust_spinlocks(1);
766 mips_mt_regdump(dvpe());
767 panic("IPI Msg. Buffers Depleted\n");
768 }
769 pipi->type = type;
770 pipi->arg = (void *)action;
771 pipi->dest = cpu;
772 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
Ralf Baechleea580402007-10-11 23:46:09 +0100773 if (type == SMTC_CLOCK_TICK)
774 atomic_inc(&ipi_timer_latch[cpu]);
Joe Perches603e82e2008-02-03 16:54:53 +0200775 /* If not on same VPE, enqueue and send cross-VPE interrupt */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100776 smtc_ipi_nq(&IPIQ[cpu], pipi);
777 LOCK_CORE_PRA();
778 settc(cpu_data[cpu].tc_id);
779 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
780 UNLOCK_CORE_PRA();
781 } else {
782 /*
783 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
784 * since ASID shootdown on the other VPE may
785 * collide with this operation.
786 */
787 LOCK_CORE_PRA();
788 settc(cpu_data[cpu].tc_id);
789 /* Halt the targeted TC */
790 write_tc_c0_tchalt(TCHALT_H);
791 mips_ihb();
792
793 /*
794 * Inspect TCStatus - if IXMT is set, we have to queue
795 * a message. Otherwise, we set up the "interrupt"
796 * of the other TC
797 */
798 tcstatus = read_tc_c0_tcstatus();
799
800 if ((tcstatus & TCSTATUS_IXMT) != 0) {
801 /*
802 * Spin-waiting here can deadlock,
803 * so we queue the message for the target TC.
804 */
805 write_tc_c0_tchalt(0);
806 UNLOCK_CORE_PRA();
807 /* Try to reduce redundant timer interrupt messages */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100808 if (type == SMTC_CLOCK_TICK) {
809 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
Ralf Baechle41c594a2006-04-05 09:45:45 +0100810 smtc_ipi_nq(&freeIPIq, pipi);
811 return;
812 }
813 }
814 smtc_ipi_nq(&IPIQ[cpu], pipi);
815 } else {
Ralf Baechleea580402007-10-11 23:46:09 +0100816 if (type == SMTC_CLOCK_TICK)
817 atomic_inc(&ipi_timer_latch[cpu]);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100818 post_direct_ipi(cpu, pipi);
819 write_tc_c0_tchalt(0);
820 UNLOCK_CORE_PRA();
821 }
822 }
823}
824
825/*
826 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
827 */
Ralf Baechle58687562007-02-05 00:33:21 +0000828static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100829{
830 struct pt_regs *kstack;
831 unsigned long tcstatus;
832 unsigned long tcrestart;
833 extern u32 kernelsp[NR_CPUS];
834 extern void __smtc_ipi_vector(void);
Ralf Baechleea580402007-10-11 23:46:09 +0100835//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100836
837 /* Extract Status, EPC from halted TC */
838 tcstatus = read_tc_c0_tcstatus();
839 tcrestart = read_tc_c0_tcrestart();
840 /* If TCRestart indicates a WAIT instruction, advance the PC */
841 if ((tcrestart & 0x80000000)
842 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
843 tcrestart += 4;
844 }
845 /*
846 * Save on TC's future kernel stack
847 *
848 * CU bit of Status is indicator that TC was
849 * already running on a kernel stack...
850 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100851 if (tcstatus & ST0_CU0) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100852 /* Note that this "- 1" is pointer arithmetic */
853 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
854 } else {
855 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
856 }
857
858 kstack->cp0_epc = (long)tcrestart;
859 /* Save TCStatus */
860 kstack->cp0_tcstatus = tcstatus;
861 /* Pass token of operation to be performed kernel stack pad area */
862 kstack->pad0[4] = (unsigned long)pipi;
863 /* Pass address of function to be called likewise */
864 kstack->pad0[5] = (unsigned long)&ipi_decode;
865 /* Set interrupt exempt and kernel mode */
866 tcstatus |= TCSTATUS_IXMT;
867 tcstatus &= ~TCSTATUS_TKSU;
868 write_tc_c0_tcstatus(tcstatus);
869 ehb();
870 /* Set TC Restart address to be SMTC IPI vector */
871 write_tc_c0_tcrestart(__smtc_ipi_vector);
872}
873
Ralf Baechle937a8012006-10-07 19:44:33 +0100874static void ipi_resched_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100875{
876 /* Return from interrupt should be enough to cause scheduler check */
877}
878
879
Ralf Baechle937a8012006-10-07 19:44:33 +0100880static void ipi_call_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100881{
882 /* Invoke generic function invocation code in smp.c */
883 smp_call_function_interrupt();
884}
885
Ralf Baechleea580402007-10-11 23:46:09 +0100886DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
887
Ralf Baechle937a8012006-10-07 19:44:33 +0100888void ipi_decode(struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100889{
Ralf Baechleea580402007-10-11 23:46:09 +0100890 unsigned int cpu = smp_processor_id();
891 struct clock_event_device *cd;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100892 void *arg_copy = pipi->arg;
893 int type_copy = pipi->type;
Ralf Baechleea580402007-10-11 23:46:09 +0100894 int ticks;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100895
896 smtc_ipi_nq(&freeIPIq, pipi);
897 switch (type_copy) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100898 case SMTC_CLOCK_TICK:
Ralf Baechleae036b72007-03-27 15:11:54 +0100899 irq_enter();
Ralf Baechleea580402007-10-11 23:46:09 +0100900 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
901 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
902 ticks = atomic_read(&ipi_timer_latch[cpu]);
903 atomic_sub(ticks, &ipi_timer_latch[cpu]);
904 while (ticks) {
905 cd->event_handler(cd);
906 ticks--;
907 }
Ralf Baechleae036b72007-03-27 15:11:54 +0100908 irq_exit();
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100909 break;
Ralf Baechleea580402007-10-11 23:46:09 +0100910
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100911 case LINUX_SMP_IPI:
912 switch ((int)arg_copy) {
913 case SMP_RESCHEDULE_YOURSELF:
Ralf Baechle937a8012006-10-07 19:44:33 +0100914 ipi_resched_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100915 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100916 case SMP_CALL_FUNCTION:
Ralf Baechle937a8012006-10-07 19:44:33 +0100917 ipi_call_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100918 break;
919 default:
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100920 printk("Impossible SMTC IPI Argument 0x%x\n",
921 (int)arg_copy);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100922 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100923 }
924 break;
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200925#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
926 case IRQ_AFFINITY_IPI:
927 /*
928 * Accept a "forwarded" interrupt that was initially
929 * taken by a TC who doesn't have affinity for the IRQ.
930 */
931 do_IRQ_no_affinity((int)arg_copy);
932 break;
933#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100934 default:
935 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
936 break;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100937 }
938}
939
Ralf Baechle937a8012006-10-07 19:44:33 +0100940void deferred_smtc_ipi(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100941{
942 struct smtc_ipi *pipi;
943 unsigned long flags;
944/* DEBUG */
945 int q = smp_processor_id();
946
947 /*
948 * Test is not atomic, but much faster than a dequeue,
949 * and the vast majority of invocations will have a null queue.
950 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100951 if (IPIQ[q].head != NULL) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100952 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
953 /* ipi_decode() should be called with interrupts off */
954 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100955 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100956 local_irq_restore(flags);
957 }
958 }
959}
960
961/*
Ralf Baechle41c594a2006-04-05 09:45:45 +0100962 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
963 * set via cross-VPE MTTR manipulation of the Cause register. It would be
964 * in some regards preferable to have external logic for "doorbell" hardware
965 * interrupts.
966 */
967
Atsushi Nemoto97dcb822007-01-08 02:14:29 +0900968static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100969
Ralf Baechle937a8012006-10-07 19:44:33 +0100970static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100971{
972 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
973 int my_tc = cpu_data[smp_processor_id()].tc_id;
974 int cpu;
975 struct smtc_ipi *pipi;
976 unsigned long tcstatus;
977 int sent;
978 long flags;
979 unsigned int mtflags;
980 unsigned int vpflags;
981
982 /*
983 * So long as cross-VPE interrupts are done via
984 * MFTR/MTTR read-modify-writes of Cause, we need
985 * to stop other VPEs whenever the local VPE does
986 * anything similar.
987 */
988 local_irq_save(flags);
989 vpflags = dvpe();
990 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
991 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
992 irq_enable_hazard();
993 evpe(vpflags);
994 local_irq_restore(flags);
995
996 /*
997 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
998 * queued for TCs on this VPE other than the current one.
999 * Return-from-interrupt should cause us to drain the queue
1000 * for the current TC, so we ought not to have to do it explicitly here.
1001 */
1002
1003 for_each_online_cpu(cpu) {
1004 if (cpu_data[cpu].vpe_id != my_vpe)
1005 continue;
1006
1007 pipi = smtc_ipi_dq(&IPIQ[cpu]);
1008 if (pipi != NULL) {
1009 if (cpu_data[cpu].tc_id != my_tc) {
1010 sent = 0;
1011 LOCK_MT_PRA();
1012 settc(cpu_data[cpu].tc_id);
1013 write_tc_c0_tchalt(TCHALT_H);
1014 mips_ihb();
1015 tcstatus = read_tc_c0_tcstatus();
1016 if ((tcstatus & TCSTATUS_IXMT) == 0) {
1017 post_direct_ipi(cpu, pipi);
1018 sent = 1;
1019 }
1020 write_tc_c0_tchalt(0);
1021 UNLOCK_MT_PRA();
1022 if (!sent) {
1023 smtc_ipi_req(&IPIQ[cpu], pipi);
1024 }
1025 } else {
1026 /*
1027 * ipi_decode() should be called
1028 * with interrupts off
1029 */
1030 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +01001031 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001032 local_irq_restore(flags);
1033 }
1034 }
1035 }
1036
1037 return IRQ_HANDLED;
1038}
1039
Ralf Baechle937a8012006-10-07 19:44:33 +01001040static void ipi_irq_dispatch(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001041{
Ralf Baechle937a8012006-10-07 19:44:33 +01001042 do_IRQ(cpu_ipi_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001043}
1044
Ralf Baechle033890b2007-07-27 18:33:30 +01001045static struct irqaction irq_ipi = {
1046 .handler = ipi_interrupt,
1047 .flags = IRQF_DISABLED,
1048 .name = "SMTC_IPI",
1049 .flags = IRQF_PERCPU
1050};
Ralf Baechle41c594a2006-04-05 09:45:45 +01001051
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001052static void setup_cross_vpe_interrupts(unsigned int nvpe)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001053{
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001054 if (nvpe < 1)
1055 return;
1056
Ralf Baechle41c594a2006-04-05 09:45:45 +01001057 if (!cpu_has_vint)
Joe Perches603e82e2008-02-03 16:54:53 +02001058 panic("SMTC Kernel requires Vectored Interrupt support");
Ralf Baechle41c594a2006-04-05 09:45:45 +01001059
1060 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1061
Ralf Baechle41c594a2006-04-05 09:45:45 +01001062 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1063
Atsushi Nemoto14178362006-11-14 01:13:18 +09001064 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001065}
1066
1067/*
1068 * SMTC-specific hacks invoked from elsewhere in the kernel.
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001069 *
1070 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1071 * called with interrupts disabled. We do rely on interrupts being disabled
1072 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1073 * result in a recursive call to raw_local_irq_restore().
Ralf Baechle41c594a2006-04-05 09:45:45 +01001074 */
1075
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001076static void __smtc_ipi_replay(void)
Ralf Baechleac8be952007-01-20 00:18:01 +00001077{
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001078 unsigned int cpu = smp_processor_id();
1079
Ralf Baechleac8be952007-01-20 00:18:01 +00001080 /*
1081 * To the extent that we've ever turned interrupts off,
1082 * we may have accumulated deferred IPIs. This is subtle.
1083 * If we use the smtc_ipi_qdepth() macro, we'll get an
1084 * exact number - but we'll also disable interrupts
1085 * and create a window of failure where a new IPI gets
1086 * queued after we test the depth but before we re-enable
1087 * interrupts. So long as IXMT never gets set, however,
1088 * we should be OK: If we pick up something and dispatch
1089 * it here, that's great. If we see nothing, but concurrent
1090 * with this operation, another TC sends us an IPI, IXMT
1091 * is clear, and we'll handle it as a real pseudo-interrupt
1092 * and not a pseudo-pseudo interrupt.
1093 */
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001094 if (IPIQ[cpu].depth > 0) {
1095 while (1) {
1096 struct smtc_ipi_q *q = &IPIQ[cpu];
1097 struct smtc_ipi *pipi;
1098 extern void self_ipi(struct smtc_ipi *);
Ralf Baechleac8be952007-01-20 00:18:01 +00001099
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001100 spin_lock(&q->lock);
1101 pipi = __smtc_ipi_dq(q);
1102 spin_unlock(&q->lock);
1103 if (!pipi)
1104 break;
1105
Ralf Baechleac8be952007-01-20 00:18:01 +00001106 self_ipi(pipi);
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001107 smtc_cpu_stats[cpu].selfipis++;
Ralf Baechleac8be952007-01-20 00:18:01 +00001108 }
1109 }
1110}
1111
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001112void smtc_ipi_replay(void)
1113{
1114 raw_local_irq_disable();
1115 __smtc_ipi_replay();
1116}
1117
Ralf Baechleec43c012007-01-24 19:23:21 +00001118EXPORT_SYMBOL(smtc_ipi_replay);
1119
Ralf Baechle41c594a2006-04-05 09:45:45 +01001120void smtc_idle_loop_hook(void)
1121{
Ralf Baechlec68644d2007-02-26 20:46:34 +00001122#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +01001123 int im;
1124 int flags;
1125 int mtflags;
1126 int bit;
1127 int vpe;
1128 int tc;
1129 int hook_ntcs;
1130 /*
1131 * printk within DMT-protected regions can deadlock,
1132 * so buffer diagnostic messages for later output.
1133 */
1134 char *pdb_msg;
1135 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1136
1137 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1138 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1139 int mvpconf0;
1140 /* Tedious stuff to just do once */
1141 mvpconf0 = read_c0_mvpconf0();
1142 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1143 if (hook_ntcs > NR_CPUS)
1144 hook_ntcs = NR_CPUS;
1145 for (tc = 0; tc < hook_ntcs; tc++) {
1146 tcnoprog[tc] = 0;
1147 clock_hang_reported[tc] = 0;
1148 }
1149 for (vpe = 0; vpe < 2; vpe++)
1150 for (im = 0; im < 8; im++)
1151 imstuckcount[vpe][im] = 0;
1152 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1153 atomic_set(&idle_hook_initialized, 1000);
1154 } else {
1155 /* Someone else is initializing in parallel - let 'em finish */
1156 while (atomic_read(&idle_hook_initialized) < 1000)
1157 ;
1158 }
1159 }
1160
1161 /* Have we stupidly left IXMT set somewhere? */
1162 if (read_c0_tcstatus() & 0x400) {
1163 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1164 ehb();
1165 printk("Dangling IXMT in cpu_idle()\n");
1166 }
1167
1168 /* Have we stupidly left an IM bit turned off? */
1169#define IM_LIMIT 2000
1170 local_irq_save(flags);
1171 mtflags = dmt();
1172 pdb_msg = &id_ho_db_msg[0];
1173 im = read_c0_status();
Ralf Baechle8f8771a2007-07-10 17:32:56 +01001174 vpe = current_cpu_data.vpe_id;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001175 for (bit = 0; bit < 8; bit++) {
1176 /*
1177 * In current prototype, I/O interrupts
1178 * are masked for VPE > 0
1179 */
1180 if (vpemask[vpe][bit]) {
1181 if (!(im & (0x100 << bit)))
1182 imstuckcount[vpe][bit]++;
1183 else
1184 imstuckcount[vpe][bit] = 0;
1185 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1186 set_c0_status(0x100 << bit);
1187 ehb();
1188 imstuckcount[vpe][bit] = 0;
1189 pdb_msg += sprintf(pdb_msg,
1190 "Dangling IM %d fixed for VPE %d\n", bit,
1191 vpe);
1192 }
1193 }
1194 }
1195
1196 /*
1197 * Now that we limit outstanding timer IPIs, check for hung TC
1198 */
1199 for (tc = 0; tc < NR_CPUS; tc++) {
1200 /* Don't check ourself - we'll dequeue IPIs just below */
1201 if ((tc != smp_processor_id()) &&
Ralf Baechleea580402007-10-11 23:46:09 +01001202 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001203 if (clock_hang_reported[tc] == 0) {
1204 pdb_msg += sprintf(pdb_msg,
1205 "TC %d looks hung with timer latch at %d\n",
Ralf Baechleea580402007-10-11 23:46:09 +01001206 tc, atomic_read(&ipi_timer_latch[tc]));
Ralf Baechle41c594a2006-04-05 09:45:45 +01001207 clock_hang_reported[tc]++;
1208 }
1209 }
1210 }
1211 emt(mtflags);
1212 local_irq_restore(flags);
1213 if (pdb_msg != &id_ho_db_msg[0])
1214 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
Ralf Baechlec68644d2007-02-26 20:46:34 +00001215#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001216
Ralf Baechleac8be952007-01-20 00:18:01 +00001217 /*
1218 * Replay any accumulated deferred IPIs. If "Instant Replay"
1219 * is in use, there should never be any.
1220 */
1221#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001222 {
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226 __smtc_ipi_replay();
1227 local_irq_restore(flags);
1228 }
Ralf Baechleac8be952007-01-20 00:18:01 +00001229#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001230}
1231
1232void smtc_soft_dump(void)
1233{
1234 int i;
1235
1236 printk("Counter Interrupts taken per CPU (TC)\n");
1237 for (i=0; i < NR_CPUS; i++) {
1238 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1239 }
1240 printk("Self-IPI invocations:\n");
1241 for (i=0; i < NR_CPUS; i++) {
1242 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1243 }
1244 smtc_ipi_qdump();
1245 printk("Timer IPI Backlogs:\n");
1246 for (i=0; i < NR_CPUS; i++) {
Ralf Baechleea580402007-10-11 23:46:09 +01001247 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
Ralf Baechle41c594a2006-04-05 09:45:45 +01001248 }
1249 printk("%d Recoveries of \"stolen\" FPU\n",
1250 atomic_read(&smtc_fpu_recoveries));
1251}
1252
1253
1254/*
1255 * TLB management routines special to SMTC
1256 */
1257
1258void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1259{
1260 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1261 int tlb, i;
1262
1263 /*
1264 * It would be nice to be able to use a spinlock here,
1265 * but this is invoked from within TLB flush routines
1266 * that protect themselves with DVPE, so if a lock is
Ralf Baechlee0daad42007-02-05 00:10:11 +00001267 * held by another TC, it'll never be freed.
Ralf Baechle41c594a2006-04-05 09:45:45 +01001268 *
1269 * DVPE/DMT must not be done with interrupts enabled,
1270 * so even so most callers will already have disabled
1271 * them, let's be really careful...
1272 */
1273
1274 local_irq_save(flags);
1275 if (smtc_status & SMTC_TLB_SHARED) {
1276 mtflags = dvpe();
1277 tlb = 0;
1278 } else {
1279 mtflags = dmt();
1280 tlb = cpu_data[cpu].vpe_id;
1281 }
1282 asid = asid_cache(cpu);
1283
1284 do {
1285 if (!((asid += ASID_INC) & ASID_MASK) ) {
1286 if (cpu_has_vtag_icache)
1287 flush_icache_all();
1288 /* Traverse all online CPUs (hack requires contigous range) */
Ralf Baechleb5eb5512007-10-03 19:16:57 +01001289 for_each_online_cpu(i) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001290 /*
1291 * We don't need to worry about our own CPU, nor those of
1292 * CPUs who don't share our TLB.
1293 */
1294 if ((i != smp_processor_id()) &&
1295 ((smtc_status & SMTC_TLB_SHARED) ||
1296 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1297 settc(cpu_data[i].tc_id);
1298 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1299 if (!prevhalt) {
1300 write_tc_c0_tchalt(TCHALT_H);
1301 mips_ihb();
1302 }
1303 tcstat = read_tc_c0_tcstatus();
1304 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1305 if (!prevhalt)
1306 write_tc_c0_tchalt(0);
1307 }
1308 }
1309 if (!asid) /* fix version if needed */
1310 asid = ASID_FIRST_VERSION;
1311 local_flush_tlb_all(); /* start new asid cycle */
1312 }
1313 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1314
1315 /*
1316 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1317 */
Ralf Baechleb5eb5512007-10-03 19:16:57 +01001318 for_each_online_cpu(i) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001319 if ((smtc_status & SMTC_TLB_SHARED) ||
1320 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1321 cpu_context(i, mm) = asid_cache(i) = asid;
1322 }
1323
1324 if (smtc_status & SMTC_TLB_SHARED)
1325 evpe(mtflags);
1326 else
1327 emt(mtflags);
1328 local_irq_restore(flags);
1329}
1330
1331/*
1332 * Invoked from macros defined in mmu_context.h
1333 * which must already have disabled interrupts
1334 * and done a DVPE or DMT as appropriate.
1335 */
1336
1337void smtc_flush_tlb_asid(unsigned long asid)
1338{
1339 int entry;
1340 unsigned long ehi;
1341
1342 entry = read_c0_wired();
1343
1344 /* Traverse all non-wired entries */
1345 while (entry < current_cpu_data.tlbsize) {
1346 write_c0_index(entry);
1347 ehb();
1348 tlb_read();
1349 ehb();
1350 ehi = read_c0_entryhi();
Ralf Baechle4bf42d42006-07-08 11:32:58 +01001351 if ((ehi & ASID_MASK) == asid) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001352 /*
1353 * Invalidate only entries with specified ASID,
1354 * makiing sure all entries differ.
1355 */
1356 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1357 write_c0_entrylo0(0);
1358 write_c0_entrylo1(0);
1359 mtc0_tlbw_hazard();
1360 tlb_write_indexed();
1361 }
1362 entry++;
1363 }
1364 write_c0_index(PARKED_INDEX);
1365 tlbw_use_hazard();
1366}
1367
1368/*
1369 * Support for single-threading cache flush operations.
1370 */
1371
Ralf Baechle58687562007-02-05 00:33:21 +00001372static int halt_state_save[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +01001373
1374/*
1375 * To really, really be sure that nothing is being done
1376 * by other TCs, halt them all. This code assumes that
1377 * a DVPE has already been done, so while their Halted
1378 * state is theoretically architecturally unstable, in
1379 * practice, it's not going to change while we're looking
1380 * at it.
1381 */
1382
1383void smtc_cflush_lockdown(void)
1384{
1385 int cpu;
1386
1387 for_each_online_cpu(cpu) {
1388 if (cpu != smp_processor_id()) {
1389 settc(cpu_data[cpu].tc_id);
1390 halt_state_save[cpu] = read_tc_c0_tchalt();
1391 write_tc_c0_tchalt(TCHALT_H);
1392 }
1393 }
1394 mips_ihb();
1395}
1396
1397/* It would be cheating to change the cpu_online states during a flush! */
1398
1399void smtc_cflush_release(void)
1400{
1401 int cpu;
1402
1403 /*
1404 * Start with a hazard barrier to ensure
1405 * that all CACHE ops have played through.
1406 */
1407 mips_ihb();
1408
1409 for_each_online_cpu(cpu) {
1410 if (cpu != smp_processor_id()) {
1411 settc(cpu_data[cpu].tc_id);
1412 write_tc_c0_tchalt(halt_state_save[cpu]);
1413 }
1414 }
1415 mips_ihb();
1416}