/* smp.c: Sparc64 SMP support.
*
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/cpudata.h>
+#include <asm/hvtramp.h>
+#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/irq_regs.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/mdesc.h>
+#include <asm/ldc.h>
+#include <asm/hypervisor.h>
extern void calibrate_delay(void);
-/* Please don't make this stuff initdata!!! --DaveM */
-static unsigned char boot_cpu_id;
+int sparc64_multi_core __read_mostly;
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
+ { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+ { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
+EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_SYMBOL(cpu_core_map);
+
static cpumask_t smp_commenced_mask;
-static cpumask_t cpu_callout_map;
void smp_info(struct seq_file *m)
{
int i;
seq_printf(m, "State:\n");
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i))
- seq_printf(m,
- "CPU%d:\t\tonline\n", i);
- }
+ for_each_online_cpu(i)
+ seq_printf(m, "CPU%d:\t\tonline\n", i);
}
void smp_bogo(struct seq_file *m)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- seq_printf(m,
- "Cpu%dBogo\t: %lu.%02lu\n"
- "Cpu%dClkTck\t: %016lx\n",
- i, cpu_data(i).udelay_val / (500000/HZ),
- (cpu_data(i).udelay_val / (5000/HZ)) % 100,
- i, cpu_data(i).clock_tick);
+ for_each_online_cpu(i)
+ seq_printf(m,
+ "Cpu%dClkTck\t: %016lx\n",
+ i, cpu_data(i).clock_tick);
}
-void __init smp_store_cpu_info(int id)
-{
- int cpu_node;
-
- /* multiplier and counter set by
- smp_setup_percpu_timer() */
- cpu_data(id).udelay_val = loops_per_jiffy;
-
- cpu_find_by_mid(id, &cpu_node);
- cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
- "clock-frequency", 0);
-
- cpu_data(id).idle_volume = 1;
-
- cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
- 16 * 1024);
- cpu_data(id).dcache_line_size =
- prom_getintdefault(cpu_node, "dcache-line-size", 32);
- cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
- 16 * 1024);
- cpu_data(id).icache_line_size =
- prom_getintdefault(cpu_node, "icache-line-size", 32);
- cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
- 4 * 1024 * 1024);
- cpu_data(id).ecache_line_size =
- prom_getintdefault(cpu_node, "ecache-line-size", 64);
- printk("CPU[%d]: Caches "
- "D[sz(%d):line_sz(%d)] "
- "I[sz(%d):line_sz(%d)] "
- "E[sz(%d):line_sz(%d)]\n",
- id,
- cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
- cpu_data(id).icache_size, cpu_data(id).icache_line_size,
- cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
-}
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
-void __init smp_callin(void)
+void __devinit smp_callin(void)
{
int cpuid = hard_smp_processor_id();
__local_per_cpu_offset = __per_cpu_offset(cpuid);
- if (tlb_type == hypervisor) {
- sun4v_register_fault_status();
+ if (tlb_type == hypervisor)
sun4v_ktsb_register();
- }
__flush_tlb_all();
- smp_setup_percpu_timer();
+ setup_sparc64_timer();
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
local_irq_enable();
- calibrate_delay();
- smp_store_cpu_info(cpuid);
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
while (!cpu_isset(cpuid, smp_commenced_mask))
rmb();
+ spin_lock(&call_lock);
cpu_set(cpuid, cpu_online_map);
+ spin_unlock(&call_lock);
/* idle thread is expected to have preempt disabled */
preempt_disable();
panic("SMP bolixed\n");
}
-static unsigned long current_tick_offset __read_mostly;
-
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
} else
adj = -delta;
- tick_ops->add_tick(adj, current_tick_offset);
+ tick_ops->add_tick(adj);
}
#if DEBUG_TICK_SYNC
t[i].rt = rt;
spin_unlock_irqrestore(&itc_sync_lock, flags);
}
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+/* XXX Put this in some common place. XXX */
+static unsigned long kimage_addr_to_ra(void *p)
+{
+ unsigned long val = (unsigned long) p;
+
+ return kern_base + (val - KERNBASE);
+}
+
+static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+{
+ extern unsigned long sparc64_ttable_tl0;
+ extern unsigned long kern_locked_tte_data;
+ extern int bigkernel;
+ struct hvtramp_descr *hdesc;
+ unsigned long trampoline_ra;
+ struct trap_per_cpu *tb;
+ u64 tte_vaddr, tte_data;
+ unsigned long hv_err;
+
+ hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+ if (!hdesc) {
+ printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
+ "hvtramp_descr.\n");
+ return;
+ }
+
+ hdesc->cpu = cpu;
+ hdesc->num_mappings = (bigkernel ? 2 : 1);
+
+ tb = &trap_block[cpu];
+ tb->hdesc = hdesc;
+
+ hdesc->fault_info_va = (unsigned long) &tb->fault_info;
+ hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
+
+ hdesc->thread_reg = thread_reg;
+
+ tte_vaddr = (unsigned long) KERNBASE;
+ tte_data = kern_locked_tte_data;
+
+ hdesc->maps[0].vaddr = tte_vaddr;
+ hdesc->maps[0].tte = tte_data;
+ if (bigkernel) {
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
+ hdesc->maps[1].vaddr = tte_vaddr;
+ hdesc->maps[1].tte = tte_data;
+ }
+
+ trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
+
+ hv_err = sun4v_cpu_start(cpu, trampoline_ra,
+ kimage_addr_to_ra(&sparc64_ttable_tl0),
+ __pa(hdesc));
+ if (hv_err)
+ printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
+ "gives error %lu\n", hv_err);
+}
+#endif
+
extern unsigned long sparc64_cpu_startup;
/* The OBP cpu startup callback truncates the 3rd arg cookie to
static int __devinit smp_boot_one_cpu(unsigned int cpu)
{
+ struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
- int timeout, ret, cpu_node;
+ int timeout, ret;
p = fork_idle(cpu);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
callin_flag = 0;
cpu_new_thread = task_thread_info(p);
- cpu_set(cpu, cpu_callout_map);
- cpu_find_by_mid(cpu, &cpu_node);
- prom_startcpu(cpu_node, entry, cookie);
+ if (tlb_type == hypervisor) {
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+ if (ldom_domaining_enabled)
+ ldom_startcpu_cpuid(cpu,
+ (unsigned long) cpu_new_thread);
+ else
+#endif
+ prom_startcpu_cpuid(cpu, entry, cookie);
+ } else {
+ struct device_node *dp = of_find_node_by_cpuid(cpu);
+
+ prom_startcpu(dp->node, entry, cookie);
+ }
- for (timeout = 0; timeout < 5000000; timeout++) {
+ for (timeout = 0; timeout < 50000; timeout++) {
if (callin_flag)
break;
udelay(100);
}
+
if (callin_flag) {
ret = 0;
} else {
printk("Processor %d is stuck.\n", cpu);
- cpu_clear(cpu, cpu_callout_map);
ret = -ENODEV;
}
cpu_new_thread = NULL;
+ if (tb->hdesc) {
+ kfree(tb->hdesc);
+ tb->hdesc = NULL;
+ }
+
return ret;
}
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
{
u64 pstate, ver;
- int nack_busy_id, is_jbus;
+ int nack_busy_id, is_jbus, need_more;
if (cpus_empty(mask))
return;
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
retry:
+ need_more = 0;
__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
: : "r" (pstate), "i" (PSTATE_IE));
: /* no outputs */
: "r" (target), "i" (ASI_INTR_W));
nack_busy_id++;
+ if (nack_busy_id == 32) {
+ need_more = 1;
+ break;
+ }
}
}
if (dispatch_stat == 0UL) {
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
+ if (unlikely(need_more)) {
+ int i, cnt = 0;
+ for_each_cpu_mask(i, mask) {
+ cpu_clear(i, mask);
+ cnt++;
+ if (cnt == 32)
+ break;
+ }
+ goto retry;
+ }
return;
}
if (!--stuck)
if ((dispatch_stat & check_mask) == 0)
cpu_clear(i, mask);
this_busy_nack += 2;
+ if (this_busy_nack == 64)
+ break;
}
goto retry;
}
}
-#if 0
/* Multi-cpu list version. */
-static int init_cpu_list(u16 *list, cpumask_t mask)
-{
- int i, cnt;
-
- cnt = 0;
- for_each_cpu_mask(i, mask)
- list[cnt++] = i;
-
- return cnt;
-}
-
-static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
{
- int i;
+ struct trap_per_cpu *tb;
+ u16 *cpu_list;
+ u64 *mondo;
+ cpumask_t error_mask;
+ unsigned long flags, status;
+ int cnt, retries, this_cpu, prev_sent, i;
- for (i = 0; i < orig_cnt; i++) {
- if (list[i] == 0xffff)
- cpu_clear(i, mask);
- }
+ if (cpus_empty(mask))
+ return;
- return init_cpu_list(list, mask);
-}
+ /* We have to do this whole thing with interrupts fully disabled.
+ * Otherwise if we send an xcall from interrupt context it will
+ * corrupt both our mondo block and cpu list state.
+ *
+ * One consequence of this is that we cannot use timeout mechanisms
+ * that depend upon interrupts being delivered locally. So, for
+ * example, we cannot sample jiffies and expect it to advance.
+ *
+ * Fortunately, udelay() uses %stick/%tick so we can use that.
+ */
+ local_irq_save(flags);
-static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
-{
- int this_cpu = get_cpu();
- struct trap_per_cpu *tb = &trap_block[this_cpu];
- u64 *mondo = __va(tb->cpu_mondo_block_pa);
- u16 *cpu_list = __va(tb->cpu_list_pa);
- int cnt, retries;
+ this_cpu = smp_processor_id();
+ tb = &trap_block[this_cpu];
+ mondo = __va(tb->cpu_mondo_block_pa);
mondo[0] = data0;
mondo[1] = data1;
mondo[2] = data2;
wmb();
+ cpu_list = __va(tb->cpu_list_pa);
+
+ /* Setup the initial cpu list. */
+ cnt = 0;
+ for_each_cpu_mask(i, mask)
+ cpu_list[cnt++] = i;
+
+ cpus_clear(error_mask);
retries = 0;
- cnt = init_cpu_list(cpu_list, mask);
+ prev_sent = 0;
do {
- register unsigned long func __asm__("%o5");
- register unsigned long arg0 __asm__("%o0");
- register unsigned long arg1 __asm__("%o1");
- register unsigned long arg2 __asm__("%o2");
-
- func = HV_FAST_CPU_MONDO_SEND;
- arg0 = cnt;
- arg1 = tb->cpu_list_pa;
- arg2 = tb->cpu_mondo_block_pa;
-
- __asm__ __volatile__("ta %8"
- : "=&r" (func), "=&r" (arg0),
- "=&r" (arg1), "=&r" (arg2)
- : "0" (func), "1" (arg0),
- "2" (arg1), "3" (arg2),
- "i" (HV_FAST_TRAP)
- : "memory");
- if (likely(arg0 == HV_EOK))
- break;
+ int forward_progress, n_sent;
+
+ status = sun4v_cpu_mondo_send(cnt,
+ tb->cpu_list_pa,
+ tb->cpu_mondo_block_pa);
- if (unlikely(++retries > 100)) {
- printk("CPU[%d]: sun4v mondo error %lu\n",
- this_cpu, func);
+ /* HV_EOK means all cpus received the xcall, we're done. */
+ if (likely(status == HV_EOK))
break;
+
+ /* First, see if we made any forward progress.
+ *
+ * The hypervisor indicates successful sends by setting
+ * cpu list entries to the value 0xffff.
+ */
+ n_sent = 0;
+ for (i = 0; i < cnt; i++) {
+ if (likely(cpu_list[i] == 0xffff))
+ n_sent++;
}
- cnt = update_cpu_list(cpu_list, cnt, mask);
+ forward_progress = 0;
+ if (n_sent > prev_sent)
+ forward_progress = 1;
- udelay(2 * cnt);
- } while (1);
+ prev_sent = n_sent;
- put_cpu();
-}
-#else
-/* Single-cpu list version. */
-static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
-{
- int this_cpu = get_cpu();
- struct trap_per_cpu *tb = &trap_block[this_cpu];
- u64 *mondo = __va(tb->cpu_mondo_block_pa);
- u16 *cpu_list = __va(tb->cpu_list_pa);
- int i;
+ /* If we get a HV_ECPUERROR, then one or more of the cpus
+ * in the list are in error state. Use the cpu_state()
+ * hypervisor call to find out which cpus are in error state.
+ */
+ if (unlikely(status == HV_ECPUERROR)) {
+ for (i = 0; i < cnt; i++) {
+ long err;
+ u16 cpu;
+
+ cpu = cpu_list[i];
+ if (cpu == 0xffff)
+ continue;
+
+ err = sun4v_cpu_state(cpu);
+ if (err >= 0 &&
+ err == HV_CPU_STATE_ERROR) {
+ cpu_list[i] = 0xffff;
+ cpu_set(cpu, error_mask);
+ }
+ }
+ } else if (unlikely(status != HV_EWOULDBLOCK))
+ goto fatal_mondo_error;
+
+ /* Don't bother rewriting the CPU list, just leave the
+ * 0xffff and non-0xffff entries in there and the
+ * hypervisor will do the right thing.
+ *
+ * Only advance timeout state if we didn't make any
+ * forward progress.
+ */
+ if (unlikely(!forward_progress)) {
+ if (unlikely(++retries > 10000))
+ goto fatal_mondo_timeout;
- mondo[0] = data0;
- mondo[1] = data1;
- mondo[2] = data2;
- wmb();
+ /* Delay a little bit to let other cpus catch up
+ * on their cpu mondo queue work.
+ */
+ udelay(2 * cnt);
+ }
+ } while (1);
- for_each_cpu_mask(i, mask) {
- int retries = 0;
+ local_irq_restore(flags);
- do {
- register unsigned long func __asm__("%o5");
- register unsigned long arg0 __asm__("%o0");
- register unsigned long arg1 __asm__("%o1");
- register unsigned long arg2 __asm__("%o2");
-
- cpu_list[0] = i;
- func = HV_FAST_CPU_MONDO_SEND;
- arg0 = 1;
- arg1 = tb->cpu_list_pa;
- arg2 = tb->cpu_mondo_block_pa;
-
- __asm__ __volatile__("ta %8"
- : "=&r" (func), "=&r" (arg0),
- "=&r" (arg1), "=&r" (arg2)
- : "0" (func), "1" (arg0),
- "2" (arg1), "3" (arg2),
- "i" (HV_FAST_TRAP)
- : "memory");
- if (likely(arg0 == HV_EOK))
- break;
+ if (unlikely(!cpus_empty(error_mask)))
+ goto fatal_mondo_cpu_error;
- if (unlikely(++retries > 100)) {
- printk("CPU[%d]: sun4v mondo error %lu\n",
- this_cpu, func);
- break;
- }
+ return;
- udelay(2 * i);
- } while (1);
- }
+fatal_mondo_cpu_error:
+ printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+ "were in error state\n",
+ this_cpu);
+ printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
+ for_each_cpu_mask(i, error_mask)
+ printk("%d ", i);
+ printk("]\n");
+ return;
- put_cpu();
+fatal_mondo_timeout:
+ local_irq_restore(flags);
+ printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+ " progress after %d retries.\n",
+ this_cpu, retries);
+ goto dump_cpu_list_and_out;
+
+fatal_mondo_error:
+ local_irq_restore(flags);
+ printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+ this_cpu, status);
+ printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+ "mondo_block_pa(%lx)\n",
+ this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+
+dump_cpu_list_and_out:
+ printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+ for (i = 0; i < cnt; i++)
+ printk("%u ", cpu_list[i]);
+ printk("]\n");
}
-#endif
/* Send cross call to all processors mentioned in MASK
* except self.
int wait;
};
-static DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int nonatomic, int wait, cpumask_t mask)
{
struct call_data_struct data;
- int cpus = cpus_weight(mask) - 1;
- long timeout;
-
- if (!cpus)
- return 0;
+ int cpus;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
spin_lock(&call_lock);
+ cpu_clear(smp_processor_id(), mask);
+ cpus = cpus_weight(mask);
+ if (!cpus)
+ goto out_unlock;
+
call_data = &data;
+ mb();
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
- /*
- * Wait for other cpus to complete function or at
- * least snap the call data.
- */
- timeout = 1000000;
- while (atomic_read(&data.finished) != cpus) {
- if (--timeout <= 0)
- goto out_timeout;
- barrier();
- udelay(1);
- }
+ /* Wait for response */
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
+out_unlock:
spin_unlock(&call_lock);
return 0;
-
-out_timeout:
- spin_unlock(&call_lock);
- printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
- (long) num_online_cpus() - 1L,
- (long) atomic_read(&data.finished));
- return 0;
}
int smp_call_function(void (*func)(void *info), void *info,
static void tsb_sync(void *info)
{
+ struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
struct mm_struct *mm = info;
- if (current->active_mm == mm)
+ /* It is not valid to test "currrent->active_mm == mm" here.
+ *
+ * The value of "current" is not changed atomically with
+ * switch_mm(). But that's OK, we just need to check the
+ * current cpu's trap block PGD physical address.
+ */
+ if (tp->pgd_paddr == __pa(mm->pgd))
tsb_context_switch(mm);
}
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_report_regs;
extern unsigned long xcall_receive_signal;
+extern unsigned long xcall_new_mmu_context_version;
#ifdef DCACHE_ALIASING_POSSIBLE
extern unsigned long xcall_flush_dcache_page_cheetah;
put_cpu();
}
+static void __smp_receive_signal_mask(cpumask_t mask)
+{
+ smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
+}
+
void smp_receive_signal(int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
- if (cpu_online(cpu)) {
- u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
-
- if (tlb_type == spitfire)
- spitfire_xcall_deliver(data0, 0, 0, mask);
- else if (tlb_type == cheetah || tlb_type == cheetah_plus)
- cheetah_xcall_deliver(data0, 0, 0, mask);
- else if (tlb_type == hypervisor)
- hypervisor_xcall_deliver(data0, 0, 0, mask);
- }
+ if (cpu_online(cpu))
+ __smp_receive_signal_mask(mask);
}
void smp_receive_signal_client(int irq, struct pt_regs *regs)
{
- /* Just return, rtrap takes care of the rest. */
clear_softint(1 << irq);
}
+void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+{
+ struct mm_struct *mm;
+ unsigned long flags;
+
+ clear_softint(1 << irq);
+
+ /* See if we need to allocate a new TLB context because
+ * the version of the one we are using is now out of date.
+ */
+ mm = current->active_mm;
+ if (unlikely(!mm || (mm == &init_mm)))
+ return;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ if (unlikely(!CTX_VALID(mm->context)))
+ get_new_mmu_context(mm);
+
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+
+ load_secondary_context(mm);
+ __flush_tlb_mm(CTX_HWBITS(mm->context),
+ SECONDARY_CONTEXT);
+}
+
+void smp_new_mmu_context_version(void)
+{
+ smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
+}
+
void smp_report_regs(void)
{
smp_cross_call(&xcall_report_regs, 0, 0, 0);
preempt_enable();
}
-#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
-#define prof_counter(__cpu) cpu_data(__cpu).counter
+/* /proc/profile writes can call this, don't __init it please. */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+}
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
+void __devinit smp_prepare_boot_cpu(void)
{
- unsigned long compare, tick, pstate;
- int cpu = smp_processor_id();
- int user = user_mode(regs);
+}
- /*
- * Check for level 14 softint.
- */
- {
- unsigned long tick_mask = tick_ops->softint_mask;
+void __devinit smp_fill_in_sib_core_maps(void)
+{
+ unsigned int i;
- if (!(get_softint() & tick_mask)) {
- extern void handler_irq(int, struct pt_regs *);
+ for_each_present_cpu(i) {
+ unsigned int j;
- handler_irq(14, regs);
- return;
+ cpus_clear(cpu_core_map[i]);
+ if (cpu_data(i).core_id == 0) {
+ cpu_set(i, cpu_core_map[i]);
+ continue;
+ }
+
+ for_each_present_cpu(j) {
+ if (cpu_data(i).core_id ==
+ cpu_data(j).core_id)
+ cpu_set(j, cpu_core_map[i]);
}
- clear_softint(tick_mask);
}
- do {
- profile_tick(CPU_PROFILING, regs);
- if (!--prof_counter(cpu)) {
- irq_enter();
+ for_each_present_cpu(i) {
+ unsigned int j;
- if (cpu == boot_cpu_id) {
- kstat_this_cpu.irqs[0]++;
- timer_tick_interrupt(regs);
- }
+ cpus_clear(cpu_sibling_map[i]);
+ if (cpu_data(i).proc_id == -1) {
+ cpu_set(i, cpu_sibling_map[i]);
+ continue;
+ }
- update_process_times(user);
+ for_each_present_cpu(j) {
+ if (cpu_data(i).proc_id ==
+ cpu_data(j).proc_id)
+ cpu_set(j, cpu_sibling_map[i]);
+ }
+ }
+}
- irq_exit();
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+ int ret = smp_boot_one_cpu(cpu);
- prof_counter(cpu) = prof_multiplier(cpu);
+ if (!ret) {
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ if (!cpu_isset(cpu, cpu_online_map)) {
+ ret = -ENODEV;
+ } else {
+ /* On SUN4V, writes to %tick and %stick are
+ * not allowed.
+ */
+ if (tlb_type != hypervisor)
+ smp_synchronize_one_tick(cpu);
}
-
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
- compare = tick_ops->add_compare(current_tick_offset);
- tick = tick_ops->get_tick();
-
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
- } while (time_after_eq(tick, compare));
+ }
+ return ret;
}
-static void __init smp_setup_percpu_timer(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void cpu_play_dead(void)
{
int cpu = smp_processor_id();
unsigned long pstate;
- prof_counter(cpu) = prof_multiplier(cpu) = 1;
+ idle_task_exit();
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
+ if (tlb_type == hypervisor) {
+ struct trap_per_cpu *tb = &trap_block[cpu];
+
+ sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
+ tb->cpu_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
+ tb->dev_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
+ tb->resum_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
+ tb->nonresum_mondo_pa, 0);
+ }
- tick_ops->init_tick(current_tick_offset);
+ cpu_clear(cpu, smp_commenced_mask);
+ membar_safe("#Sync");
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
-}
+ local_irq_disable();
-void __init smp_tick_init(void)
-{
- boot_cpu_id = hard_smp_processor_id();
- current_tick_offset = timer_tick_offset;
+ __asm__ __volatile__(
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
- cpu_set(boot_cpu_id, cpu_online_map);
- prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
+ while (1)
+ barrier();
}
-/* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
-
-int setup_profiling_timer(unsigned int multiplier)
+int __cpu_disable(void)
{
- unsigned long flags;
+ int cpu = smp_processor_id();
+ cpuinfo_sparc *c;
int i;
- if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
- return -EINVAL;
-
- spin_lock_irqsave(&prof_setup_lock, flags);
- for (i = 0; i < NR_CPUS; i++)
- prof_multiplier(i) = multiplier;
- current_tick_offset = (timer_tick_offset / multiplier);
- spin_unlock_irqrestore(&prof_setup_lock, flags);
+ for_each_cpu_mask(i, cpu_core_map[cpu])
+ cpu_clear(cpu, cpu_core_map[i]);
+ cpus_clear(cpu_core_map[cpu]);
- return 0;
-}
-
-/* Constrain the number of cpus to max_cpus. */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- if (num_possible_cpus() > max_cpus) {
- int instance, mid;
-
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid != boot_cpu_id) {
- cpu_clear(mid, phys_cpu_present_map);
- if (num_possible_cpus() <= max_cpus)
- break;
- }
- instance++;
- }
- }
+ for_each_cpu_mask(i, cpu_sibling_map[cpu])
+ cpu_clear(cpu, cpu_sibling_map[i]);
+ cpus_clear(cpu_sibling_map[cpu]);
- smp_store_cpu_info(boot_cpu_id);
-}
+ c = &cpu_data(cpu);
-/* Set this up early so that things like the scheduler can init
- * properly. We use the same cpu mask for both the present and
- * possible cpu map.
- */
-void __init smp_setup_cpu_possible_map(void)
-{
- int instance, mid;
+ c->core_id = 0;
+ c->proc_id = -1;
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid < NR_CPUS)
- cpu_set(mid, phys_cpu_present_map);
- instance++;
- }
-}
+ spin_lock(&call_lock);
+ cpu_clear(cpu, cpu_online_map);
+ spin_unlock(&call_lock);
-void __devinit smp_prepare_boot_cpu(void)
-{
- int cpu = hard_smp_processor_id();
+ smp_wmb();
- if (cpu >= NR_CPUS) {
- prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
- prom_halt();
- }
+ /* Make sure no interrupts point to this cpu. */
+ fixup_irqs();
- current_thread_info()->cpu = cpu;
- __local_per_cpu_offset = __per_cpu_offset(cpu);
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), phys_cpu_present_map);
+ return 0;
}
-int __devinit __cpu_up(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
- int ret = smp_boot_one_cpu(cpu);
+ int i;
- if (!ret) {
- cpu_set(cpu, smp_commenced_mask);
- while (!cpu_isset(cpu, cpu_online_map))
- mb();
- if (!cpu_isset(cpu, cpu_online_map)) {
- ret = -ENODEV;
- } else {
- /* On SUN4V, writes to %tick and %stick are
- * not allowed.
- */
- if (tlb_type != hypervisor)
- smp_synchronize_one_tick(cpu);
+ for (i = 0; i < 100; i++) {
+ smp_rmb();
+ if (!cpu_isset(cpu, smp_commenced_mask))
+ break;
+ msleep(100);
+ }
+ if (cpu_isset(cpu, smp_commenced_mask)) {
+ printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+ } else {
+#if defined(CONFIG_SUN_LDOMS)
+ unsigned long hv_err;
+ int limit = 100;
+
+ do {
+ hv_err = sun4v_cpu_stop(cpu);
+ if (hv_err == HV_EOK) {
+ cpu_clear(cpu, cpu_present_map);
+ break;
+ }
+ } while (--limit > 0);
+ if (limit <= 0) {
+ printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
+ hv_err);
}
+#endif
}
- return ret;
}
+#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
- unsigned long bogosum = 0;
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i))
- bogosum += cpu_data(i).udelay_val;
- }
- printk("Total of %ld processors activated "
- "(%lu.%02lu BogoMIPS).\n",
- (long) num_online_cpus(),
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
}
void smp_send_reschedule(int cpu)
EXPORT_SYMBOL(__per_cpu_base);
EXPORT_SYMBOL(__per_cpu_shift);
-void __init setup_per_cpu_areas(void)
+void __init real_setup_per_cpu_areas(void)
{
unsigned long goal, size, i;
char *ptr;
/* Copy section for each CPU (we discard the original) */
- goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-#ifdef CONFIG_MODULES
- if (goal < PERCPU_ENOUGH_ROOM)
- goal = PERCPU_ENOUGH_ROOM;
-#endif
- __per_cpu_shift = 0;
- for (size = 1UL; size < goal; size <<= 1UL)
+ goal = PERCPU_ENOUGH_ROOM;
+
+ __per_cpu_shift = PAGE_SHIFT;
+ for (size = PAGE_SIZE; size < goal; size <<= 1UL)
__per_cpu_shift++;
- ptr = alloc_bootmem(size * NR_CPUS);
+ ptr = alloc_bootmem_pages(size * NR_CPUS);
__per_cpu_base = ptr - __per_cpu_start;
for (i = 0; i < NR_CPUS; i++, ptr += size)
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+ /* Setup %g5 for the boot cpu. */
+ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
}