1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * linux/arch/i386/kernel/voyager_smp.c
9 * This file provides all the same external entries as smp.c but uses
10 * the voyager hal to provide the functionality
12 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/cache.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/bootmem.h>
22 #include <linux/completion.h>
24 #include <asm/voyager.h>
27 #include <asm/pgalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/arch_hooks.h>
31 /* TLB state -- visible externally, indexed physically */
32 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
34 /* CPU IRQ affinity -- set to all ones initially */
35 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
36 {[0 ... NR_CPUS-1] = ~0UL };
38 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
39 * indexed physically */
40 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
41 EXPORT_PER_CPU_SYMBOL(cpu_info);
43 /* physical ID of the CPU used to boot the system */
44 unsigned char boot_cpu_id;
46 /* The memory line addresses for the Quad CPIs */
47 struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
49 /* The masks for the Extended VIC processors, filled in by cat_init */
50 __u32 voyager_extended_vic_processors = 0;
52 /* Masks for the extended Quad processors which cannot be VIC booted */
53 __u32 voyager_allowed_boot_processors = 0;
55 /* The mask for the Quad Processors (both extended and non-extended) */
56 __u32 voyager_quad_processors = 0;
58 /* Total count of live CPUs, used in process.c to display
59 * the CPU information and in irq.c for the per CPU irq
60 * activity count. Finally exported by i386_ksyms.c */
61 static int voyager_extended_cpus = 1;
63 /* Have we found an SMP box - used by time.c to do the profiling
64 interrupt for timeslicing; do not set to 1 until the per CPU timer
65 interrupt is active */
66 int smp_found_config = 0;
68 /* Used for the invalidate map that's also checked in the spinlock */
69 static volatile unsigned long smp_invalidate_needed;
71 /* Bitmask of currently online CPUs - used by setup.c for
72 /proc/cpuinfo, visible externally but still physical */
73 cpumask_t cpu_online_map = CPU_MASK_NONE;
74 EXPORT_SYMBOL(cpu_online_map);
76 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
77 * by scheduler but indexed physically */
78 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
80 /* The internal functions */
81 static void send_CPI(__u32 cpuset, __u8 cpi);
82 static void ack_CPI(__u8 cpi);
83 static int ack_QIC_CPI(__u8 cpi);
84 static void ack_special_QIC_CPI(__u8 cpi);
85 static void ack_VIC_CPI(__u8 cpi);
86 static void send_CPI_allbutself(__u8 cpi);
87 static void mask_vic_irq(unsigned int irq);
88 static void unmask_vic_irq(unsigned int irq);
89 static unsigned int startup_vic_irq(unsigned int irq);
90 static void enable_local_vic_irq(unsigned int irq);
91 static void disable_local_vic_irq(unsigned int irq);
92 static void before_handle_vic_irq(unsigned int irq);
93 static void after_handle_vic_irq(unsigned int irq);
94 static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
95 static void ack_vic_irq(unsigned int irq);
96 static void vic_enable_cpi(void);
97 static void do_boot_cpu(__u8 cpuid);
98 static void do_quad_bootstrap(void);
100 int hard_smp_processor_id(void);
101 int safe_smp_processor_id(void);
103 /* Inline functions */
104 static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
106 voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
107 (smp_processor_id() << 16) + cpi;
110 static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
114 for_each_online_cpu(cpu) {
115 if (cpuset & (1 << cpu)) {
117 if (!cpu_isset(cpu, cpu_online_map))
118 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
120 hard_smp_processor_id(), cpi, cpu));
122 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
127 static inline void wrapper_smp_local_timer_interrupt(void)
130 smp_local_timer_interrupt();
134 static inline void send_one_CPI(__u8 cpu, __u8 cpi)
136 if (voyager_quad_processors & (1 << cpu))
137 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
139 send_CPI(1 << cpu, cpi);
142 static inline void send_CPI_allbutself(__u8 cpi)
144 __u8 cpu = smp_processor_id();
145 __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
149 static inline int is_cpu_quad(void)
151 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
152 return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
155 static inline int is_cpu_extended(void)
157 __u8 cpu = hard_smp_processor_id();
159 return (voyager_extended_vic_processors & (1 << cpu));
162 static inline int is_cpu_vic_boot(void)
164 __u8 cpu = hard_smp_processor_id();
166 return (voyager_extended_vic_processors
167 & voyager_allowed_boot_processors & (1 << cpu));
170 static inline void ack_CPI(__u8 cpi)
173 case VIC_CPU_BOOT_CPI:
174 if (is_cpu_quad() && !is_cpu_vic_boot())
181 /* These are slightly strange. Even on the Quad card,
182 * They are vectored as VIC CPIs */
184 ack_special_QIC_CPI(cpi);
189 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
194 /* local variables */
196 /* The VIC IRQ descriptors -- these look almost identical to the
197 * 8259 IRQs except that masks and things must be kept per processor
199 static struct irq_chip vic_chip = {
201 .startup = startup_vic_irq,
202 .mask = mask_vic_irq,
203 .unmask = unmask_vic_irq,
204 .set_affinity = set_vic_irq_affinity,
207 /* used to count up as CPUs are brought on line (starts at 0) */
208 static int cpucount = 0;
210 /* steal a page from the bottom of memory for the trampoline and
211 * squirrel its address away here. This will be in kernel virtual
213 static __u32 trampoline_base;
215 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
216 static DEFINE_PER_CPU(int, prof_multiplier) = 1;
217 static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
218 static DEFINE_PER_CPU(int, prof_counter) = 1;
220 /* the map used to check if a CPU has booted */
221 static __u32 cpu_booted_map;
223 /* the synchronize flag used to hold all secondary CPUs spinning in
224 * a tight loop until the boot sequence is ready for them */
225 static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
227 /* This is for the new dynamic CPU boot code */
228 cpumask_t cpu_callin_map = CPU_MASK_NONE;
229 cpumask_t cpu_callout_map = CPU_MASK_NONE;
230 cpumask_t cpu_possible_map = CPU_MASK_NONE;
231 EXPORT_SYMBOL(cpu_possible_map);
233 /* The per processor IRQ masks (these are usually kept in sync) */
234 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
236 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
237 static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
239 /* Lock for enable/disable of VIC interrupts */
240 static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
242 /* The boot processor is correctly set up in PC mode when it
243 * comes up, but the secondaries need their master/slave 8259
244 * pairs initializing correctly */
246 /* Interrupt counters (per cpu) and total - used to try to
247 * even up the interrupt handling routines */
248 static long vic_intr_total = 0;
249 static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
250 static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
252 /* Since we can only use CPI0, we fake all the other CPIs */
253 static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
255 /* debugging routine to read the isr of the cpu's pic */
256 static inline __u16 vic_read_isr(void)
261 isr = inb(0xa0) << 8;
268 static __init void qic_setup(void)
270 if (!is_cpu_quad()) {
271 /* not a quad, no setup */
274 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
275 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
277 if (is_cpu_extended()) {
278 /* the QIC duplicate of the VIC base register */
279 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
280 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
282 /* FIXME: should set up the QIC timer and memory parity
283 * error vectors here */
287 static __init void vic_setup_pic(void)
289 outb(1, VIC_REDIRECT_REGISTER_1);
290 /* clear the claim registers for dynamic routing */
291 outb(0, VIC_CLAIM_REGISTER_0);
292 outb(0, VIC_CLAIM_REGISTER_1);
294 outb(0, VIC_PRIORITY_REGISTER);
295 /* Set the Primary and Secondary Microchannel vector
296 * bases to be the same as the ordinary interrupts
298 * FIXME: This would be more efficient using separate
300 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
301 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
302 /* Now initiallise the master PIC belonging to this CPU by
303 * sending the four ICWs */
305 /* ICW1: level triggered, ICW4 needed */
308 /* ICW2: vector base */
309 outb(FIRST_EXTERNAL_VECTOR, 0x21);
311 /* ICW3: slave at line 2 */
314 /* ICW4: 8086 mode */
317 /* now the same for the slave PIC */
319 /* ICW1: level trigger, ICW4 needed */
322 /* ICW2: slave vector base */
323 outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
328 /* ICW4: 8086 mode */
332 static void do_quad_bootstrap(void)
334 if (is_cpu_quad() && is_cpu_vic_boot()) {
337 __u8 cpuid = hard_smp_processor_id();
339 local_irq_save(flags);
341 for (i = 0; i < 4; i++) {
342 /* FIXME: this would be >>3 &0x7 on the 32 way */
343 if (((cpuid >> 2) & 0x03) == i)
344 /* don't lower our own mask! */
347 /* masquerade as local Quad CPU */
348 outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
349 /* enable the startup CPI */
350 outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
352 outb(0, QIC_PROCESSOR_ID);
354 local_irq_restore(flags);
358 /* Set up all the basic stuff: read the SMP config and make all the
359 * SMP information reflect only the boot cpu. All others will be
360 * brought on-line later. */
361 void __init find_smp_config(void)
365 boot_cpu_id = hard_smp_processor_id();
367 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
369 /* initialize the CPU structures (moved from smp_boot_cpus) */
370 for (i = 0; i < NR_CPUS; i++) {
371 cpu_irq_affinity[i] = ~0;
373 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
375 /* The boot CPU must be extended */
376 voyager_extended_vic_processors = 1 << boot_cpu_id;
377 /* initially, all of the first 8 CPUs can boot */
378 voyager_allowed_boot_processors = 0xff;
379 /* set up everything for just this CPU, we can alter
380 * this as we start the other CPUs later */
381 /* now get the CPU disposition from the extended CMOS */
382 cpus_addr(phys_cpu_present_map)[0] =
383 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
384 cpus_addr(phys_cpu_present_map)[0] |=
385 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
386 cpus_addr(phys_cpu_present_map)[0] |=
387 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
389 cpus_addr(phys_cpu_present_map)[0] |=
390 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
392 cpu_possible_map = phys_cpu_present_map;
393 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
394 cpus_addr(phys_cpu_present_map)[0]);
395 /* Here we set up the VIC to enable SMP */
396 /* enable the CPIs by writing the base vector to their register */
397 outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
398 outb(1, VIC_REDIRECT_REGISTER_1);
399 /* set the claim registers for static routing --- Boot CPU gets
400 * all interrupts untill all other CPUs started */
401 outb(0xff, VIC_CLAIM_REGISTER_0);
402 outb(0xff, VIC_CLAIM_REGISTER_1);
403 /* Set the Primary and Secondary Microchannel vector
404 * bases to be the same as the ordinary interrupts
406 * FIXME: This would be more efficient using separate
408 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
409 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
411 /* Finally tell the firmware that we're driving */
412 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
413 VOYAGER_SUS_IN_CONTROL_PORT);
415 current_thread_info()->cpu = boot_cpu_id;
416 x86_write_percpu(cpu_number, boot_cpu_id);
420 * The bootstrap kernel entry code has set these up. Save them
421 * for a given CPU, id is physical */
422 void __init smp_store_cpu_info(int id)
424 struct cpuinfo_x86 *c = &cpu_data(id);
428 identify_secondary_cpu(c);
431 /* set up the trampoline and return the physical address of the code */
432 static __u32 __init setup_trampoline(void)
434 /* these two are global symbols in trampoline.S */
435 extern const __u8 trampoline_end[];
436 extern const __u8 trampoline_data[];
438 memcpy((__u8 *) trampoline_base, trampoline_data,
439 trampoline_end - trampoline_data);
440 return virt_to_phys((__u8 *) trampoline_base);
443 /* Routine initially called when a non-boot CPU is brought online */
444 static void __init start_secondary(void *unused)
446 __u8 cpuid = hard_smp_processor_id();
447 /* external functions not defined in the headers */
448 extern void calibrate_delay(void);
452 /* OK, we're in the routine */
453 ack_CPI(VIC_CPU_BOOT_CPI);
455 /* setup the 8259 master slave pair belonging to this CPU ---
456 * we won't actually receive any until the boot CPU
457 * relinquishes it's static routing mask */
462 if (is_cpu_quad() && !is_cpu_vic_boot()) {
463 /* clear the boot CPI */
467 voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
468 printk("read dummy %d\n", dummy);
471 /* lower the mask to receive CPIs */
474 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
476 /* enable interrupts */
479 /* get our bogomips */
482 /* save our processor parameters */
483 smp_store_cpu_info(cpuid);
485 /* if we're a quad, we may need to bootstrap other CPUs */
488 /* FIXME: this is rather a poor hack to prevent the CPU
489 * activating softirqs while it's supposed to be waiting for
490 * permission to proceed. Without this, the new per CPU stuff
491 * in the softirqs will fail */
493 cpu_set(cpuid, cpu_callin_map);
495 /* signal that we're done */
498 while (!cpu_isset(cpuid, smp_commenced_mask))
504 cpu_set(cpuid, cpu_online_map);
509 /* Routine to kick start the given CPU and wait for it to report ready
510 * (or timeout in startup). When this routine returns, the requested
511 * CPU is either fully running and configured or known to be dead.
513 * We call this routine sequentially 1 CPU at a time, so no need for
516 static void __init do_boot_cpu(__u8 cpu)
518 struct task_struct *idle;
521 int quad_boot = (1 << cpu) & voyager_quad_processors
522 & ~(voyager_extended_vic_processors
523 & voyager_allowed_boot_processors);
525 /* This is an area in head.S which was used to set up the
526 * initial kernel stack. We need to alter this to give the
527 * booting CPU a new stack (taken from its idle process) */
532 /* This is the format of the CPI IDT gate (in real mode) which
533 * we're hijacking to boot the CPU */
542 __u32 *hijack_vector;
543 __u32 start_phys_address = setup_trampoline();
545 /* There's a clever trick to this: The linux trampoline is
546 * compiled to begin at absolute location zero, so make the
547 * address zero but have the data segment selector compensate
548 * for the actual address */
549 hijack_source.idt.Offset = start_phys_address & 0x000F;
550 hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
553 alternatives_smp_switch(1);
555 idle = fork_idle(cpu);
557 panic("failed fork for CPU%d", cpu);
558 idle->thread.eip = (unsigned long)start_secondary;
559 /* init_tasks (in sched.c) is indexed logically */
560 stack_start.esp = (void *)idle->thread.esp;
563 per_cpu(current_task, cpu) = idle;
564 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
567 /* Note: Don't modify initial ss override */
568 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
569 (unsigned long)hijack_source.val, hijack_source.idt.Segment,
570 hijack_source.idt.Offset, stack_start.esp));
572 /* init lowmem identity mapping */
573 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
574 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
578 printk("CPU %d: non extended Quad boot\n", cpu);
581 phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
582 *hijack_vector = hijack_source.val;
584 printk("CPU%d: extended VIC boot\n", cpu);
587 phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
588 *hijack_vector = hijack_source.val;
589 /* VIC errata, may also receive interrupt at this address */
592 phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
593 VIC_DEFAULT_CPI_BASE) * 4);
594 *hijack_vector = hijack_source.val;
596 /* All non-boot CPUs start with interrupts fully masked. Need
597 * to lower the mask of the CPI we're about to send. We do
598 * this in the VIC by masquerading as the processor we're
599 * about to boot and lowering its interrupt mask */
600 local_irq_save(flags);
602 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
604 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
605 /* here we're altering registers belonging to `cpu' */
607 outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
608 /* now go back to our original identity */
609 outb(boot_cpu_id, VIC_PROCESSOR_ID);
611 /* and boot the CPU */
613 send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
616 local_irq_restore(flags);
618 /* now wait for it to become ready (or timeout) */
619 for (timeout = 0; timeout < 50000; timeout++) {
624 /* reset the page table */
627 if (cpu_booted_map) {
628 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
629 cpu, smp_processor_id()));
631 printk("CPU%d: ", cpu);
632 print_cpu_info(&cpu_data(cpu));
634 cpu_set(cpu, cpu_callout_map);
635 cpu_set(cpu, cpu_present_map);
637 printk("CPU%d FAILED TO BOOT: ", cpu);
639 ((volatile unsigned char *)phys_to_virt(start_phys_address))
643 printk("Not responding.\n");
649 void __init smp_boot_cpus(void)
653 /* CAT BUS initialisation must be done after the memory */
654 /* FIXME: The L4 has a catbus too, it just needs to be
655 * accessed in a totally different way */
656 if (voyager_level == 5) {
659 /* now that the cat has probed the Voyager System Bus, sanity
660 * check the cpu map */
661 if (((voyager_quad_processors | voyager_extended_vic_processors)
662 & cpus_addr(phys_cpu_present_map)[0]) !=
663 cpus_addr(phys_cpu_present_map)[0]) {
665 printk("\n\n***WARNING*** "
666 "Sanity check of CPU present map FAILED\n");
668 } else if (voyager_level == 4)
669 voyager_extended_vic_processors =
670 cpus_addr(phys_cpu_present_map)[0];
672 /* this sets up the idle task to run on the current cpu */
673 voyager_extended_cpus = 1;
674 /* Remove the global_irq_holder setting, it triggers a BUG() on
675 * schedule at the moment */
676 //global_irq_holder = boot_cpu_id;
678 /* FIXME: Need to do something about this but currently only works
679 * on CPUs with a tsc which none of mine have.
680 smp_tune_scheduling();
682 smp_store_cpu_info(boot_cpu_id);
683 printk("CPU%d: ", boot_cpu_id);
684 print_cpu_info(&cpu_data(boot_cpu_id));
687 /* booting on a Quad CPU */
688 printk("VOYAGER SMP: Boot CPU is Quad\n");
693 /* enable our own CPIs */
696 cpu_set(boot_cpu_id, cpu_online_map);
697 cpu_set(boot_cpu_id, cpu_callout_map);
699 /* loop over all the extended VIC CPUs and boot them. The
700 * Quad CPUs must be bootstrapped by their extended VIC cpu */
701 for (i = 0; i < NR_CPUS; i++) {
702 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
705 /* This udelay seems to be needed for the Quad boots
706 * don't remove unless you know what you're doing */
709 /* we could compute the total bogomips here, but why bother?,
710 * Code added from smpboot.c */
712 unsigned long bogosum = 0;
713 for (i = 0; i < NR_CPUS; i++)
714 if (cpu_isset(i, cpu_online_map))
715 bogosum += cpu_data(i).loops_per_jiffy;
716 printk(KERN_INFO "Total of %d processors activated "
717 "(%lu.%02lu BogoMIPS).\n",
718 cpucount + 1, bogosum / (500000 / HZ),
719 (bogosum / (5000 / HZ)) % 100);
721 voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
722 printk("VOYAGER: Extended (interrupt handling CPUs): "
723 "%d, non-extended: %d\n", voyager_extended_cpus,
724 num_booting_cpus() - voyager_extended_cpus);
725 /* that's it, switch to symmetric mode */
726 outb(0, VIC_PRIORITY_REGISTER);
727 outb(0, VIC_CLAIM_REGISTER_0);
728 outb(0, VIC_CLAIM_REGISTER_1);
730 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
733 /* Reload the secondary CPUs task structure (this function does not
735 void __init initialize_secondary(void)
739 set_current(hard_get_current());
743 * We don't actually need to load the full TSS,
744 * basically just the stack pointer and the eip.
747 asm volatile ("movl %0,%%esp\n\t"
748 "jmp *%1"::"r" (current->thread.esp),
749 "r"(current->thread.eip));
752 /* handle a Voyager SYS_INT -- If we don't, the base board will
755 * System interrupts occur because some problem was detected on the
756 * various busses. To find out what you have to probe all the
757 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
758 fastcall void smp_vic_sys_interrupt(struct pt_regs *regs)
760 ack_CPI(VIC_SYS_INT);
761 printk("Voyager SYSTEM INTERRUPT\n");
764 /* Handle a voyager CMN_INT; These interrupts occur either because of
765 * a system status change or because a single bit memory error
766 * occurred. FIXME: At the moment, ignore all this. */
767 fastcall void smp_vic_cmn_interrupt(struct pt_regs *regs)
769 static __u8 in_cmn_int = 0;
770 static DEFINE_SPINLOCK(cmn_int_lock);
772 /* common ints are broadcast, so make sure we only do this once */
773 _raw_spin_lock(&cmn_int_lock);
778 _raw_spin_unlock(&cmn_int_lock);
780 VDEBUG(("Voyager COMMON INTERRUPT\n"));
782 if (voyager_level == 5)
783 voyager_cat_do_common_interrupt();
785 _raw_spin_lock(&cmn_int_lock);
788 _raw_spin_unlock(&cmn_int_lock);
789 ack_CPI(VIC_CMN_INT);
793 * Reschedule call back. Nothing to do, all the work is done
794 * automatically when we return from the interrupt. */
795 static void smp_reschedule_interrupt(void)
800 static struct mm_struct *flush_mm;
801 static unsigned long flush_va;
802 static DEFINE_SPINLOCK(tlbstate_lock);
803 #define FLUSH_ALL 0xffffffff
806 * We cannot call mmdrop() because we are in interrupt context,
807 * instead update mm->cpu_vm_mask.
809 * We need to reload %cr3 since the page tables may be going
810 * away from under us..
812 static inline void leave_mm(unsigned long cpu)
814 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
816 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
817 load_cr3(swapper_pg_dir);
821 * Invalidate call-back
823 static void smp_invalidate_interrupt(void)
825 __u8 cpu = smp_processor_id();
827 if (!test_bit(cpu, &smp_invalidate_needed))
829 /* This will flood messages. Don't uncomment unless you see
830 * Problems with cross cpu invalidation
831 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
832 smp_processor_id()));
835 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
836 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
837 if (flush_va == FLUSH_ALL)
840 __flush_tlb_one(flush_va);
844 smp_mb__before_clear_bit();
845 clear_bit(cpu, &smp_invalidate_needed);
846 smp_mb__after_clear_bit();
849 /* All the new flush operations for 2.4 */
851 /* This routine is called with a physical cpu mask */
853 voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
860 if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
862 if (cpumask & (1 << smp_processor_id()))
867 spin_lock(&tlbstate_lock);
871 atomic_set_mask(cpumask, &smp_invalidate_needed);
873 * We have to send the CPI only to
876 send_CPI(cpumask, VIC_INVALIDATE_CPI);
878 while (smp_invalidate_needed) {
881 printk("***WARNING*** Stuck doing invalidate CPI "
882 "(CPU%d)\n", smp_processor_id());
887 /* Uncomment only to debug invalidation problems
888 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
893 spin_unlock(&tlbstate_lock);
896 void flush_tlb_current_task(void)
898 struct mm_struct *mm = current->mm;
899 unsigned long cpu_mask;
903 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
906 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
911 void flush_tlb_mm(struct mm_struct *mm)
913 unsigned long cpu_mask;
917 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
919 if (current->active_mm == mm) {
923 leave_mm(smp_processor_id());
926 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
931 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
933 struct mm_struct *mm = vma->vm_mm;
934 unsigned long cpu_mask;
938 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
939 if (current->active_mm == mm) {
943 leave_mm(smp_processor_id());
947 voyager_flush_tlb_others(cpu_mask, mm, va);
952 EXPORT_SYMBOL(flush_tlb_page);
954 /* enable the requested IRQs */
955 static void smp_enable_irq_interrupt(void)
958 __u8 cpu = get_cpu();
960 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
961 vic_irq_enable_mask[cpu]));
963 spin_lock(&vic_irq_lock);
964 for (irq = 0; irq < 16; irq++) {
965 if (vic_irq_enable_mask[cpu] & (1 << irq))
966 enable_local_vic_irq(irq);
968 vic_irq_enable_mask[cpu] = 0;
969 spin_unlock(&vic_irq_lock);
971 put_cpu_no_resched();
977 static void smp_stop_cpu_function(void *dummy)
979 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
980 cpu_clear(smp_processor_id(), cpu_online_map);
986 static DEFINE_SPINLOCK(call_lock);
988 struct call_data_struct {
989 void (*func) (void *info);
991 volatile unsigned long started;
992 volatile unsigned long finished;
996 static struct call_data_struct *call_data;
998 /* execute a thread on a new CPU. The function to be called must be
999 * previously set up. This is used to schedule a function for
1000 * execution on all CPUs - set up the function then broadcast a
1001 * function_interrupt CPI to come here on each CPU */
1002 static void smp_call_function_interrupt(void)
1004 void (*func) (void *info) = call_data->func;
1005 void *info = call_data->info;
1006 /* must take copy of wait because call_data may be replaced
1007 * unless the function is waiting for us to finish */
1008 int wait = call_data->wait;
1009 __u8 cpu = smp_processor_id();
1012 * Notify initiating CPU that I've grabbed the data and am
1013 * about to execute the function
1016 if (!test_and_clear_bit(cpu, &call_data->started)) {
1017 /* If the bit wasn't set, this could be a replay */
1018 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
1019 " with no call pending\n", cpu);
1023 * At this point the info structure may be out of scope unless wait==1
1027 __get_cpu_var(irq_stat).irq_call_count++;
1031 clear_bit(cpu, &call_data->finished);
1036 voyager_smp_call_function_mask(cpumask_t cpumask,
1037 void (*func) (void *info), void *info, int wait)
1039 struct call_data_struct data;
1040 u32 mask = cpus_addr(cpumask)[0];
1042 mask &= ~(1 << smp_processor_id());
1047 /* Can deadlock when called with interrupts disabled */
1048 WARN_ON(irqs_disabled());
1052 data.started = mask;
1055 data.finished = mask;
1057 spin_lock(&call_lock);
1060 /* Send a message to all other CPUs and wait for them to respond */
1061 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1063 /* Wait for response */
1064 while (data.started)
1068 while (data.finished)
1071 spin_unlock(&call_lock);
1076 /* Sorry about the name. In an APIC based system, the APICs
1077 * themselves are programmed to send a timer interrupt. This is used
1078 * by linux to reschedule the processor. Voyager doesn't have this,
1079 * so we use the system clock to interrupt one processor, which in
1080 * turn, broadcasts a timer CPI to all the others --- we receive that
1081 * CPI here. We don't use this actually for counting so losing
1082 * ticks doesn't matter
1084 * FIXME: For those CPUs which actually have a local APIC, we could
1085 * try to use it to trigger this interrupt instead of having to
1086 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1087 * no local APIC, so I can't do this
1089 * This function is currently a placeholder and is unused in the code */
1090 fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1092 struct pt_regs *old_regs = set_irq_regs(regs);
1093 wrapper_smp_local_timer_interrupt();
1094 set_irq_regs(old_regs);
1097 /* All of the QUAD interrupt GATES */
1098 fastcall void smp_qic_timer_interrupt(struct pt_regs *regs)
1100 struct pt_regs *old_regs = set_irq_regs(regs);
1101 ack_QIC_CPI(QIC_TIMER_CPI);
1102 wrapper_smp_local_timer_interrupt();
1103 set_irq_regs(old_regs);
1106 fastcall void smp_qic_invalidate_interrupt(struct pt_regs *regs)
1108 ack_QIC_CPI(QIC_INVALIDATE_CPI);
1109 smp_invalidate_interrupt();
1112 fastcall void smp_qic_reschedule_interrupt(struct pt_regs *regs)
1114 ack_QIC_CPI(QIC_RESCHEDULE_CPI);
1115 smp_reschedule_interrupt();
1118 fastcall void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
1120 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
1121 smp_enable_irq_interrupt();
1124 fastcall void smp_qic_call_function_interrupt(struct pt_regs *regs)
1126 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
1127 smp_call_function_interrupt();
1130 fastcall void smp_vic_cpi_interrupt(struct pt_regs *regs)
1132 struct pt_regs *old_regs = set_irq_regs(regs);
1133 __u8 cpu = smp_processor_id();
1136 ack_QIC_CPI(VIC_CPI_LEVEL0);
1138 ack_VIC_CPI(VIC_CPI_LEVEL0);
1140 if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
1141 wrapper_smp_local_timer_interrupt();
1142 if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
1143 smp_invalidate_interrupt();
1144 if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
1145 smp_reschedule_interrupt();
1146 if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
1147 smp_enable_irq_interrupt();
1148 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1149 smp_call_function_interrupt();
1150 set_irq_regs(old_regs);
1153 static void do_flush_tlb_all(void *info)
1155 unsigned long cpu = smp_processor_id();
1158 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
1162 /* flush the TLB of every active CPU in the system */
1163 void flush_tlb_all(void)
1165 on_each_cpu(do_flush_tlb_all, 0, 1, 1);
1168 /* used to set up the trampoline for other CPUs when the memory manager
1170 void __init smp_alloc_memory(void)
1172 trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE);
1173 if (__pa(trampoline_base) >= 0x93000)
1177 /* send a reschedule CPI to one CPU by physical CPU number*/
1178 static void voyager_smp_send_reschedule(int cpu)
1180 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1183 int hard_smp_processor_id(void)
1186 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
1187 if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
1188 return cpumask & 0x1F;
1190 for (i = 0; i < 8; i++) {
1191 if (cpumask & (1 << i))
1194 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
1198 int safe_smp_processor_id(void)
1200 return hard_smp_processor_id();
1203 /* broadcast a halt to all other CPUs */
1204 static void voyager_smp_send_stop(void)
1206 smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1209 /* this function is triggered in time.c when a clock tick fires
1210 * we need to re-broadcast the tick to all CPUs */
1211 void smp_vic_timer_interrupt(void)
1213 send_CPI_allbutself(VIC_TIMER_CPI);
1214 smp_local_timer_interrupt();
1217 /* local (per CPU) timer interrupt. It does both profiling and
1218 * process statistics/rescheduling.
1220 * We do profiling in every local tick, statistics/rescheduling
1221 * happen only every 'profiling multiplier' ticks. The default
1222 * multiplier is 1 and it can be changed by writing the new multiplier
1223 * value into /proc/profile.
1225 void smp_local_timer_interrupt(void)
1227 int cpu = smp_processor_id();
1230 profile_tick(CPU_PROFILING);
1231 if (--per_cpu(prof_counter, cpu) <= 0) {
1233 * The multiplier may have changed since the last time we got
1234 * to this point as a result of the user writing to
1235 * /proc/profile. In this case we need to adjust the APIC
1236 * timer accordingly.
1238 * Interrupts are already masked off at this point.
1240 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
1241 if (per_cpu(prof_counter, cpu) !=
1242 per_cpu(prof_old_multiplier, cpu)) {
1243 /* FIXME: need to update the vic timer tick here */
1244 per_cpu(prof_old_multiplier, cpu) =
1245 per_cpu(prof_counter, cpu);
1248 update_process_times(user_mode_vm(get_irq_regs()));
1251 if (((1 << cpu) & voyager_extended_vic_processors) == 0)
1252 /* only extended VIC processors participate in
1253 * interrupt distribution */
1257 * We take the 'long' return path, and there every subsystem
1258 * grabs the appropriate locks (kernel lock/ irq lock).
1260 * we might want to decouple profiling from the 'long path',
1261 * and do the profiling totally in assembly.
1263 * Currently this isn't too much of an issue (performance wise),
1264 * we can take more than 100K local irqs per second on a 100 MHz P5.
1267 if ((++vic_tick[cpu] & 0x7) != 0)
1269 /* get here every 16 ticks (about every 1/6 of a second) */
1271 /* Change our priority to give someone else a chance at getting
1272 * the IRQ. The algorithm goes like this:
1274 * In the VIC, the dynamically routed interrupt is always
1275 * handled by the lowest priority eligible (i.e. receiving
1276 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1277 * lowest processor number gets it.
1279 * The priority of a CPU is controlled by a special per-CPU
1280 * VIC priority register which is 3 bits wide 0 being lowest
1281 * and 7 highest priority..
1283 * Therefore we subtract the average number of interrupts from
1284 * the number we've fielded. If this number is negative, we
1285 * lower the activity count and if it is positive, we raise
1288 * I'm afraid this still leads to odd looking interrupt counts:
1289 * the totals are all roughly equal, but the individual ones
1290 * look rather skewed.
1292 * FIXME: This algorithm is total crap when mixed with SMP
1293 * affinity code since we now try to even up the interrupt
1294 * counts when an affinity binding is keeping them on a
1296 weight = (vic_intr_count[cpu] * voyager_extended_cpus
1297 - vic_intr_total) >> 4;
1304 outb((__u8) weight, VIC_PRIORITY_REGISTER);
1306 #ifdef VOYAGER_DEBUG
1307 if ((vic_tick[cpu] & 0xFFF) == 0) {
1308 /* print this message roughly every 25 secs */
1309 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1310 cpu, vic_tick[cpu], weight);
1315 /* setup the profiling timer */
1316 int setup_profiling_timer(unsigned int multiplier)
1324 * Set the new multiplier for each CPU. CPUs don't start using the
1325 * new values until the next timer interrupt in which they do process
1328 for (i = 0; i < NR_CPUS; ++i)
1329 per_cpu(prof_multiplier, i) = multiplier;
1334 /* This is a bit of a mess, but forced on us by the genirq changes
1335 * there's no genirq handler that really does what voyager wants
1336 * so hack it up with the simple IRQ handler */
1337 static void fastcall handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1339 before_handle_vic_irq(irq);
1340 handle_simple_irq(irq, desc);
1341 after_handle_vic_irq(irq);
1344 /* The CPIs are handled in the per cpu 8259s, so they must be
1345 * enabled to be received: FIX: enabling the CPIs in the early
1346 * boot sequence interferes with bug checking; enable them later
1348 #define VIC_SET_GATE(cpi, vector) \
1349 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1350 #define QIC_SET_GATE(cpi, vector) \
1351 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1353 void __init smp_intr_init(void)
1357 /* initialize the per cpu irq mask to all disabled */
1358 for (i = 0; i < NR_CPUS; i++)
1359 vic_irq_mask[i] = 0xFFFF;
1361 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
1363 VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
1364 VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
1366 QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
1367 QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
1368 QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
1369 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
1370 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
1372 /* now put the VIC descriptor into the first 48 IRQs
1374 * This is for later: first 16 correspond to PC IRQs; next 16
1375 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1376 for (i = 0; i < 48; i++)
1377 set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
1380 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1381 * processor to receive CPI */
1382 static void send_CPI(__u32 cpuset, __u8 cpi)
1385 __u32 quad_cpuset = (cpuset & voyager_quad_processors);
1387 if (cpi < VIC_START_FAKE_CPI) {
1388 /* fake CPI are only used for booting, so send to the
1389 * extended quads as well---Quads must be VIC booted */
1390 outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
1394 send_QIC_CPI(quad_cpuset, cpi);
1395 cpuset &= ~quad_cpuset;
1396 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1399 for_each_online_cpu(cpu) {
1400 if (cpuset & (1 << cpu))
1401 set_bit(cpi, &vic_cpi_mailbox[cpu]);
1404 outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
1407 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1408 * set the cache line to shared by reading it.
1410 * DON'T make this inline otherwise the cache line read will be
1413 static int ack_QIC_CPI(__u8 cpi)
1415 __u8 cpu = hard_smp_processor_id();
1419 outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
1420 return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
1423 static void ack_special_QIC_CPI(__u8 cpi)
1427 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
1430 outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
1433 /* also clear at the VIC, just in case (nop for non-extended proc) */
1437 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1438 static void ack_VIC_CPI(__u8 cpi)
1440 #ifdef VOYAGER_DEBUG
1441 unsigned long flags;
1443 __u8 cpu = smp_processor_id();
1445 local_irq_save(flags);
1446 isr = vic_read_isr();
1447 if ((isr & (1 << (cpi & 7))) == 0) {
1448 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
1451 /* send specific EOI; the two system interrupts have
1452 * bit 4 set for a separate vector but behave as the
1453 * corresponding 3 bit intr */
1454 outb_p(0x60 | (cpi & 7), 0x20);
1456 #ifdef VOYAGER_DEBUG
1457 if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
1458 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
1460 local_irq_restore(flags);
1464 /* cribbed with thanks from irq.c */
1465 #define __byte(x,y) (((unsigned char *)&(y))[x])
1466 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1467 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1469 static unsigned int startup_vic_irq(unsigned int irq)
1471 unmask_vic_irq(irq);
1476 /* The enable and disable routines. This is where we run into
1477 * conflicting architectural philosophy. Fundamentally, the voyager
1478 * architecture does not expect to have to disable interrupts globally
1479 * (the IRQ controllers belong to each CPU). The processor masquerade
1480 * which is used to start the system shouldn't be used in a running OS
1481 * since it will cause great confusion if two separate CPUs drive to
1482 * the same IRQ controller (I know, I've tried it).
1484 * The solution is a variant on the NCR lazy SPL design:
1486 * 1) To disable an interrupt, do nothing (other than set the
1487 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1489 * 2) If the interrupt dares to come in, raise the local mask against
1490 * it (this will result in all the CPU masks being raised
1493 * 3) To enable the interrupt, lower the mask on the local CPU and
1494 * broadcast an Interrupt enable CPI which causes all other CPUs to
1495 * adjust their masks accordingly. */
1497 static void unmask_vic_irq(unsigned int irq)
1499 /* linux doesn't to processor-irq affinity, so enable on
1500 * all CPUs we know about */
1501 int cpu = smp_processor_id(), real_cpu;
1502 __u16 mask = (1 << irq);
1503 __u32 processorList = 0;
1504 unsigned long flags;
1506 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1507 irq, cpu, cpu_irq_affinity[cpu]));
1508 spin_lock_irqsave(&vic_irq_lock, flags);
1509 for_each_online_cpu(real_cpu) {
1510 if (!(voyager_extended_vic_processors & (1 << real_cpu)))
1512 if (!(cpu_irq_affinity[real_cpu] & mask)) {
1513 /* irq has no affinity for this CPU, ignore */
1516 if (real_cpu == cpu) {
1517 enable_local_vic_irq(irq);
1518 } else if (vic_irq_mask[real_cpu] & mask) {
1519 vic_irq_enable_mask[real_cpu] |= mask;
1520 processorList |= (1 << real_cpu);
1523 spin_unlock_irqrestore(&vic_irq_lock, flags);
1525 send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
1528 static void mask_vic_irq(unsigned int irq)
1530 /* lazy disable, do nothing */
1533 static void enable_local_vic_irq(unsigned int irq)
1535 __u8 cpu = smp_processor_id();
1536 __u16 mask = ~(1 << irq);
1537 __u16 old_mask = vic_irq_mask[cpu];
1539 vic_irq_mask[cpu] &= mask;
1540 if (vic_irq_mask[cpu] == old_mask)
1543 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1547 outb_p(cached_A1(cpu), 0xA1);
1550 outb_p(cached_21(cpu), 0x21);
1555 static void disable_local_vic_irq(unsigned int irq)
1557 __u8 cpu = smp_processor_id();
1558 __u16 mask = (1 << irq);
1559 __u16 old_mask = vic_irq_mask[cpu];
1564 vic_irq_mask[cpu] |= mask;
1565 if (old_mask == vic_irq_mask[cpu])
1568 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1572 outb_p(cached_A1(cpu), 0xA1);
1575 outb_p(cached_21(cpu), 0x21);
1580 /* The VIC is level triggered, so the ack can only be issued after the
1581 * interrupt completes. However, we do Voyager lazy interrupt
1582 * handling here: It is an extremely expensive operation to mask an
1583 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1584 * this interrupt actually comes in, then we mask and ack here to push
1585 * the interrupt off to another CPU */
1586 static void before_handle_vic_irq(unsigned int irq)
1588 irq_desc_t *desc = irq_desc + irq;
1589 __u8 cpu = smp_processor_id();
1591 _raw_spin_lock(&vic_irq_lock);
1593 vic_intr_count[cpu]++;
1595 if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
1596 /* The irq is not in our affinity mask, push it off
1597 * onto another CPU */
1598 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
1599 "on cpu %d\n", irq, cpu));
1600 disable_local_vic_irq(irq);
1601 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1602 * actually calling the interrupt routine */
1603 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
1604 } else if (desc->status & IRQ_DISABLED) {
1605 /* Damn, the interrupt actually arrived, do the lazy
1606 * disable thing. The interrupt routine in irq.c will
1607 * not handle a IRQ_DISABLED interrupt, so nothing more
1608 * need be done here */
1609 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1611 disable_local_vic_irq(irq);
1612 desc->status |= IRQ_REPLAY;
1614 desc->status &= ~IRQ_REPLAY;
1617 _raw_spin_unlock(&vic_irq_lock);
1620 /* Finish the VIC interrupt: basically mask */
1621 static void after_handle_vic_irq(unsigned int irq)
1623 irq_desc_t *desc = irq_desc + irq;
1625 _raw_spin_lock(&vic_irq_lock);
1627 unsigned int status = desc->status & ~IRQ_INPROGRESS;
1628 #ifdef VOYAGER_DEBUG
1632 desc->status = status;
1633 if ((status & IRQ_DISABLED))
1634 disable_local_vic_irq(irq);
1635 #ifdef VOYAGER_DEBUG
1636 /* DEBUG: before we ack, check what's in progress */
1637 isr = vic_read_isr();
1638 if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
1640 __u8 cpu = smp_processor_id();
1642 int mask; /* Um... initialize me??? --RR */
1644 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1646 for_each_possible_cpu(real_cpu, mask) {
1648 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1650 isr = vic_read_isr();
1651 if (isr & (1 << irq)) {
1653 ("VOYAGER SMP: CPU%d ack irq %d\n",
1657 outb(cpu, VIC_PROCESSOR_ID);
1660 #endif /* VOYAGER_DEBUG */
1661 /* as soon as we ack, the interrupt is eligible for
1662 * receipt by another CPU so everything must be in
1665 if (status & IRQ_REPLAY) {
1666 /* replay is set if we disable the interrupt
1667 * in the before_handle_vic_irq() routine, so
1668 * clear the in progress bit here to allow the
1669 * next CPU to handle this correctly */
1670 desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
1672 #ifdef VOYAGER_DEBUG
1673 isr = vic_read_isr();
1674 if ((isr & (1 << irq)) != 0)
1675 printk("VOYAGER SMP: after_handle_vic_irq() after "
1676 "ack irq=%d, isr=0x%x\n", irq, isr);
1677 #endif /* VOYAGER_DEBUG */
1679 _raw_spin_unlock(&vic_irq_lock);
1681 /* All code after this point is out of the main path - the IRQ
1682 * may be intercepted by another CPU if reasserted */
1685 /* Linux processor - interrupt affinity manipulations.
1687 * For each processor, we maintain a 32 bit irq affinity mask.
1688 * Initially it is set to all 1's so every processor accepts every
1689 * interrupt. In this call, we change the processor's affinity mask:
1691 * Change from enable to disable:
1693 * If the interrupt ever comes in to the processor, we will disable it
1694 * and ack it to push it off to another CPU, so just accept the mask here.
1696 * Change from disable to enable:
1698 * change the mask and then do an interrupt enable CPI to re-enable on
1699 * the selected processors */
1701 void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1703 /* Only extended processors handle interrupts */
1704 unsigned long real_mask;
1705 unsigned long irq_mask = 1 << irq;
1708 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
1710 if (cpus_addr(mask)[0] == 0)
1711 /* can't have no CPUs to accept the interrupt -- extremely
1712 * bad things will happen */
1716 /* can't change the affinity of the timer IRQ. This
1717 * is due to the constraint in the voyager
1718 * architecture that the CPI also comes in on and IRQ
1719 * line and we have chosen IRQ0 for this. If you
1720 * raise the mask on this interrupt, the processor
1721 * will no-longer be able to accept VIC CPIs */
1725 /* You can only have 32 interrupts in a voyager system
1726 * (and 32 only if you have a secondary microchannel
1730 for_each_online_cpu(cpu) {
1731 unsigned long cpu_mask = 1 << cpu;
1733 if (cpu_mask & real_mask) {
1734 /* enable the interrupt for this cpu */
1735 cpu_irq_affinity[cpu] |= irq_mask;
1737 /* disable the interrupt for this cpu */
1738 cpu_irq_affinity[cpu] &= ~irq_mask;
1741 /* this is magic, we now have the correct affinity maps, so
1742 * enable the interrupt. This will send an enable CPI to
1743 * those CPUs who need to enable it in their local masks,
1744 * causing them to correct for the new affinity . If the
1745 * interrupt is currently globally disabled, it will simply be
1746 * disabled again as it comes in (voyager lazy disable). If
1747 * the affinity map is tightened to disable the interrupt on a
1748 * cpu, it will be pushed off when it comes in */
1749 unmask_vic_irq(irq);
1752 static void ack_vic_irq(unsigned int irq)
1755 outb(0x62, 0x20); /* Specific EOI to cascade */
1756 outb(0x60 | (irq & 7), 0xA0);
1758 outb(0x60 | (irq & 7), 0x20);
1762 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1763 * but are not vectored by it. This means that the 8259 mask must be
1764 * lowered to receive them */
1765 static __init void vic_enable_cpi(void)
1767 __u8 cpu = smp_processor_id();
1769 /* just take a copy of the current mask (nop for boot cpu) */
1770 vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
1772 enable_local_vic_irq(VIC_CPI_LEVEL0);
1773 enable_local_vic_irq(VIC_CPI_LEVEL1);
1774 /* for sys int and cmn int */
1775 enable_local_vic_irq(7);
1777 if (is_cpu_quad()) {
1778 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
1779 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
1780 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1781 cpu, QIC_CPI_ENABLE));
1784 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1785 cpu, vic_irq_mask[cpu]));
1788 void voyager_smp_dump()
1790 int old_cpu = smp_processor_id(), cpu;
1792 /* dump the interrupt masks of each processor */
1793 for_each_online_cpu(cpu) {
1794 __u16 imr, isr, irr;
1795 unsigned long flags;
1797 local_irq_save(flags);
1798 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
1799 imr = (inb(0xa1) << 8) | inb(0x21);
1801 irr = inb(0xa0) << 8;
1805 isr = inb(0xa0) << 8;
1808 outb(old_cpu, VIC_PROCESSOR_ID);
1809 local_irq_restore(flags);
1810 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1811 cpu, vic_irq_mask[cpu], imr, irr, isr);
1813 /* These lines are put in to try to unstick an un ack'd irq */
1816 for (irq = 0; irq < 16; irq++) {
1817 if (isr & (1 << irq)) {
1818 printk("\tCPU%d: ack irq %d\n",
1820 local_irq_save(flags);
1821 outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
1824 outb(old_cpu, VIC_PROCESSOR_ID);
1825 local_irq_restore(flags);
1833 void smp_voyager_power_off(void *dummy)
1835 if (smp_processor_id() == boot_cpu_id)
1836 voyager_power_off();
1838 smp_stop_cpu_function(NULL);
1841 static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1843 /* FIXME: ignore max_cpus for now */
1847 static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1849 init_gdt(smp_processor_id());
1850 switch_to_new_gdt();
1852 cpu_set(smp_processor_id(), cpu_online_map);
1853 cpu_set(smp_processor_id(), cpu_callout_map);
1854 cpu_set(smp_processor_id(), cpu_possible_map);
1855 cpu_set(smp_processor_id(), cpu_present_map);
1858 static int __cpuinit voyager_cpu_up(unsigned int cpu)
1860 /* This only works at boot for x86. See "rewrite" above. */
1861 if (cpu_isset(cpu, smp_commenced_mask))
1864 /* In case one didn't come up */
1865 if (!cpu_isset(cpu, cpu_callin_map))
1867 /* Unleash the CPU! */
1868 cpu_set(cpu, smp_commenced_mask);
1869 while (!cpu_isset(cpu, cpu_online_map))
1874 static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1879 void __init smp_setup_processor_id(void)
1881 current_thread_info()->cpu = hard_smp_processor_id();
1882 x86_write_percpu(cpu_number, hard_smp_processor_id());
1885 struct smp_ops smp_ops = {
1886 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1887 .smp_prepare_cpus = voyager_smp_prepare_cpus,
1888 .cpu_up = voyager_cpu_up,
1889 .smp_cpus_done = voyager_smp_cpus_done,
1891 .smp_send_stop = voyager_smp_send_stop,
1892 .smp_send_reschedule = voyager_smp_send_reschedule,
1893 .smp_call_function_mask = voyager_smp_call_function_mask,