]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - arch/sparc64/kernel/smp.c
sched: zap the migration init / cache-hot balancing code
[linux-3.10.git] / arch / sparc64 / kernel / smp.c
index 2dbe008d6b7a4822c373c7a0517fc050771cbdf0..40e40f968d61abd9ef8192aeb1a6f2a9b318c7e7 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/pagemap.h>
 #include <linux/threads.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/delay.h>
@@ -31,6 +30,7 @@
 #include <asm/cpudata.h>
 
 #include <asm/irq.h>
+#include <asm/irq_regs.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/oplib.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/mdesc.h>
 
 extern void calibrate_delay(void);
 
+int sparc64_multi_core __read_mostly;
+
 /* Please don't make this stuff initdata!!!  --DaveM */
-static unsigned char boot_cpu_id;
+unsigned char boot_cpu_id;
 
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -55,64 +63,24 @@ void smp_info(struct seq_file *m)
        int i;
        
        seq_printf(m, "State:\n");
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "CPU%d:\t\tonline\n", i);
-       }
+       for_each_online_cpu(i)
+               seq_printf(m, "CPU%d:\t\tonline\n", i);
 }
 
 void smp_bogo(struct seq_file *m)
 {
        int i;
        
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "Cpu%dBogo\t: %lu.%02lu\n"
-                                  "Cpu%dClkTck\t: %016lx\n",
-                                  i, cpu_data(i).udelay_val / (500000/HZ),
-                                  (cpu_data(i).udelay_val / (5000/HZ)) % 100,
-                                  i, cpu_data(i).clock_tick);
-}
-
-void __init smp_store_cpu_info(int id)
-{
-       int cpu_node;
-
-       /* multiplier and counter set by
-          smp_setup_percpu_timer()  */
-       cpu_data(id).udelay_val                 = loops_per_jiffy;
-
-       cpu_find_by_mid(id, &cpu_node);
-       cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
-                                                    "clock-frequency", 0);
-
-       cpu_data(id).idle_volume                = 1;
-
-       cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
-                                                     16 * 1024);
-       cpu_data(id).dcache_line_size =
-               prom_getintdefault(cpu_node, "dcache-line-size", 32);
-       cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
-                                                     16 * 1024);
-       cpu_data(id).icache_line_size =
-               prom_getintdefault(cpu_node, "icache-line-size", 32);
-       cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
-                                                     4 * 1024 * 1024);
-       cpu_data(id).ecache_line_size =
-               prom_getintdefault(cpu_node, "ecache-line-size", 64);
-       printk("CPU[%d]: Caches "
-              "D[sz(%d):line_sz(%d)] "
-              "I[sz(%d):line_sz(%d)] "
-              "E[sz(%d):line_sz(%d)]\n",
-              id,
-              cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
-              cpu_data(id).icache_size, cpu_data(id).icache_line_size,
-              cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
+       for_each_online_cpu(i)
+               seq_printf(m,
+                          "Cpu%dBogo\t: %lu.%02lu\n"
+                          "Cpu%dClkTck\t: %016lx\n",
+                          i, cpu_data(i).udelay_val / (500000/HZ),
+                          (cpu_data(i).udelay_val / (5000/HZ)) % 100,
+                          i, cpu_data(i).clock_tick);
 }
 
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
 
 static volatile unsigned long callin_flag = 0;
 
@@ -122,14 +90,12 @@ void __init smp_callin(void)
 
        __local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-       if (tlb_type == hypervisor) {
-               sun4v_register_fault_status();
+       if (tlb_type == hypervisor)
                sun4v_ktsb_register();
-       }
 
        __flush_tlb_all();
 
-       smp_setup_percpu_timer();
+       setup_sparc64_timer();
 
        if (cheetah_pcache_forced_on)
                cheetah_enable_pcache();
@@ -137,7 +103,7 @@ void __init smp_callin(void)
        local_irq_enable();
 
        calibrate_delay();
-       smp_store_cpu_info(cpuid);
+       cpu_data(cpuid).udelay_val = loops_per_jiffy;
        callin_flag = 1;
        __asm__ __volatile__("membar #Sync\n\t"
                             "flush  %%g6" : : : "memory");
@@ -166,8 +132,6 @@ void cpu_panic(void)
        panic("SMP bolixed\n");
 }
 
-static unsigned long current_tick_offset __read_mostly;
-
 /* This tick register synchronization scheme is taken entirely from
  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  *
@@ -250,7 +214,7 @@ void smp_synchronize_tick_client(void)
                                } else
                                        adj = -delta;
 
-                               tick_ops->add_tick(adj, current_tick_offset);
+                               tick_ops->add_tick(adj);
                        }
 #if DEBUG_TICK_SYNC
                        t[i].rt = rt;
@@ -304,6 +268,8 @@ static void smp_synchronize_one_tick(int cpu)
        spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
+extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
+
 extern unsigned long sparc64_cpu_startup;
 
 /* The OBP cpu startup callback truncates the 3rd arg cookie to
@@ -319,21 +285,30 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
        unsigned long cookie =
                (unsigned long)(&cpu_new_thread);
        struct task_struct *p;
-       int timeout, ret, cpu_node;
+       int timeout, ret;
 
        p = fork_idle(cpu);
        callin_flag = 0;
        cpu_new_thread = task_thread_info(p);
        cpu_set(cpu, cpu_callout_map);
 
-       cpu_find_by_mid(cpu, &cpu_node);
-       prom_startcpu(cpu_node, entry, cookie);
+       if (tlb_type == hypervisor) {
+               /* Alloc the mondo queues, cpu will load them.  */
+               sun4v_init_mondo_queues(0, cpu, 1, 0);
+
+               prom_startcpu_cpuid(cpu, entry, cookie);
+       } else {
+               struct device_node *dp = of_find_node_by_cpuid(cpu);
+
+               prom_startcpu(dp->node, entry, cookie);
+       }
 
        for (timeout = 0; timeout < 5000000; timeout++) {
                if (callin_flag)
                        break;
                udelay(100);
        }
+
        if (callin_flag) {
                ret = 0;
        } else {
@@ -429,7 +404,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
        u64 pstate, ver;
-       int nack_busy_id, is_jbus;
+       int nack_busy_id, is_jbus, need_more;
 
        if (cpus_empty(mask))
                return;
@@ -445,6 +420,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
        __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 
 retry:
+       need_more = 0;
        __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
                             : : "r" (pstate), "i" (PSTATE_IE));
 
@@ -473,6 +449,10 @@ retry:
                                : /* no outputs */
                                : "r" (target), "i" (ASI_INTR_W));
                        nack_busy_id++;
+                       if (nack_busy_id == 32) {
+                               need_more = 1;
+                               break;
+                       }
                }
        }
 
@@ -489,6 +469,16 @@ retry:
                        if (dispatch_stat == 0UL) {
                                __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                                                     : : "r" (pstate));
+                               if (unlikely(need_more)) {
+                                       int i, cnt = 0;
+                                       for_each_cpu_mask(i, mask) {
+                                               cpu_clear(i, mask);
+                                               cnt++;
+                                               if (cnt == 32)
+                                                       break;
+                                       }
+                                       goto retry;
+                               }
                                return;
                        }
                        if (!--stuck)
@@ -526,6 +516,8 @@ retry:
                                if ((dispatch_stat & check_mask) == 0)
                                        cpu_clear(i, mask);
                                this_busy_nack += 2;
+                               if (this_busy_nack == 64)
+                                       break;
                        }
 
                        goto retry;
@@ -533,133 +525,157 @@ retry:
        }
 }
 
-#if 0
 /* Multi-cpu list version.  */
-static int init_cpu_list(u16 *list, cpumask_t mask)
-{
-       int i, cnt;
-
-       cnt = 0;
-       for_each_cpu_mask(i, mask)
-               list[cnt++] = i;
-
-       return cnt;
-}
-
-static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-       int i;
+       struct trap_per_cpu *tb;
+       u16 *cpu_list;
+       u64 *mondo;
+       cpumask_t error_mask;
+       unsigned long flags, status;
+       int cnt, retries, this_cpu, prev_sent, i;
 
-       for (i = 0; i < orig_cnt; i++) {
-               if (list[i] == 0xffff)
-                       cpu_clear(i, mask);
-       }
+       if (cpus_empty(mask))
+               return;
 
-       return init_cpu_list(list, mask);
-}
+       /* We have to do this whole thing with interrupts fully disabled.
+        * Otherwise if we send an xcall from interrupt context it will
+        * corrupt both our mondo block and cpu list state.
+        *
+        * One consequence of this is that we cannot use timeout mechanisms
+        * that depend upon interrupts being delivered locally.  So, for
+        * example, we cannot sample jiffies and expect it to advance.
+        *
+        * Fortunately, udelay() uses %stick/%tick so we can use that.
+        */
+       local_irq_save(flags);
 
-static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
-{
-       int this_cpu = get_cpu();
-       struct trap_per_cpu *tb = &trap_block[this_cpu];
-       u64 *mondo = __va(tb->cpu_mondo_block_pa);
-       u16 *cpu_list = __va(tb->cpu_list_pa);
-       int cnt, retries;
+       this_cpu = smp_processor_id();
+       tb = &trap_block[this_cpu];
 
+       mondo = __va(tb->cpu_mondo_block_pa);
        mondo[0] = data0;
        mondo[1] = data1;
        mondo[2] = data2;
        wmb();
 
+       cpu_list = __va(tb->cpu_list_pa);
+
+       /* Setup the initial cpu list.  */
+       cnt = 0;
+       for_each_cpu_mask(i, mask)
+               cpu_list[cnt++] = i;
+
+       cpus_clear(error_mask);
        retries = 0;
-       cnt = init_cpu_list(cpu_list, mask);
+       prev_sent = 0;
        do {
-               register unsigned long func __asm__("%o5");
-               register unsigned long arg0 __asm__("%o0");
-               register unsigned long arg1 __asm__("%o1");
-               register unsigned long arg2 __asm__("%o2");
-
-               func = HV_FAST_CPU_MONDO_SEND;
-               arg0 = cnt;
-               arg1 = tb->cpu_list_pa;
-               arg2 = tb->cpu_mondo_block_pa;
-
-               __asm__ __volatile__("ta        %8"
-                                    : "=&r" (func), "=&r" (arg0),
-                                      "=&r" (arg1), "=&r" (arg2)
-                                    : "0" (func), "1" (arg0),
-                                      "2" (arg1), "3" (arg2),
-                                      "i" (HV_FAST_TRAP)
-                                    : "memory");
-               if (likely(arg0 == HV_EOK))
-                       break;
+               int forward_progress, n_sent;
 
-               if (unlikely(++retries > 100)) {
-                       printk("CPU[%d]: sun4v mondo error %lu\n",
-                              this_cpu, func);
+               status = sun4v_cpu_mondo_send(cnt,
+                                             tb->cpu_list_pa,
+                                             tb->cpu_mondo_block_pa);
+
+               /* HV_EOK means all cpus received the xcall, we're done.  */
+               if (likely(status == HV_EOK))
                        break;
+
+               /* First, see if we made any forward progress.
+                *
+                * The hypervisor indicates successful sends by setting
+                * cpu list entries to the value 0xffff.
+                */
+               n_sent = 0;
+               for (i = 0; i < cnt; i++) {
+                       if (likely(cpu_list[i] == 0xffff))
+                               n_sent++;
                }
 
-               cnt = update_cpu_list(cpu_list, cnt, mask);
+               forward_progress = 0;
+               if (n_sent > prev_sent)
+                       forward_progress = 1;
 
-               udelay(2 * cnt);
-       } while (1);
+               prev_sent = n_sent;
 
-       put_cpu();
-}
-#else
-/* Single-cpu list version.  */
-static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
-{
-       int this_cpu = get_cpu();
-       struct trap_per_cpu *tb = &trap_block[this_cpu];
-       u64 *mondo = __va(tb->cpu_mondo_block_pa);
-       u16 *cpu_list = __va(tb->cpu_list_pa);
-       int i;
+               /* If we get a HV_ECPUERROR, then one or more of the cpus
+                * in the list are in error state.  Use the cpu_state()
+                * hypervisor call to find out which cpus are in error state.
+                */
+               if (unlikely(status == HV_ECPUERROR)) {
+                       for (i = 0; i < cnt; i++) {
+                               long err;
+                               u16 cpu;
+
+                               cpu = cpu_list[i];
+                               if (cpu == 0xffff)
+                                       continue;
+
+                               err = sun4v_cpu_state(cpu);
+                               if (err >= 0 &&
+                                   err == HV_CPU_STATE_ERROR) {
+                                       cpu_list[i] = 0xffff;
+                                       cpu_set(cpu, error_mask);
+                               }
+                       }
+               } else if (unlikely(status != HV_EWOULDBLOCK))
+                       goto fatal_mondo_error;
+
+               /* Don't bother rewriting the CPU list, just leave the
+                * 0xffff and non-0xffff entries in there and the
+                * hypervisor will do the right thing.
+                *
+                * Only advance timeout state if we didn't make any
+                * forward progress.
+                */
+               if (unlikely(!forward_progress)) {
+                       if (unlikely(++retries > 10000))
+                               goto fatal_mondo_timeout;
 
-       mondo[0] = data0;
-       mondo[1] = data1;
-       mondo[2] = data2;
-       wmb();
+                       /* Delay a little bit to let other cpus catch up
+                        * on their cpu mondo queue work.
+                        */
+                       udelay(2 * cnt);
+               }
+       } while (1);
 
-       for_each_cpu_mask(i, mask) {
-               int retries = 0;
+       local_irq_restore(flags);
 
-               do {
-                       register unsigned long func __asm__("%o5");
-                       register unsigned long arg0 __asm__("%o0");
-                       register unsigned long arg1 __asm__("%o1");
-                       register unsigned long arg2 __asm__("%o2");
-
-                       cpu_list[0] = i;
-                       func = HV_FAST_CPU_MONDO_SEND;
-                       arg0 = 1;
-                       arg1 = tb->cpu_list_pa;
-                       arg2 = tb->cpu_mondo_block_pa;
-
-                       __asm__ __volatile__("ta        %8"
-                                            : "=&r" (func), "=&r" (arg0),
-                                              "=&r" (arg1), "=&r" (arg2)
-                                            : "0" (func), "1" (arg0),
-                                              "2" (arg1), "3" (arg2),
-                                              "i" (HV_FAST_TRAP)
-                                            : "memory");
-                       if (likely(arg0 == HV_EOK))
-                               break;
+       if (unlikely(!cpus_empty(error_mask)))
+               goto fatal_mondo_cpu_error;
 
-                       if (unlikely(++retries > 100)) {
-                               printk("CPU[%d]: sun4v mondo error %lu\n",
-                                      this_cpu, func);
-                               break;
-                       }
+       return;
 
-                       udelay(2 * i);
-               } while (1);
-       }
+fatal_mondo_cpu_error:
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+              "were in error state\n",
+              this_cpu);
+       printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
+       for_each_cpu_mask(i, error_mask)
+               printk("%d ", i);
+       printk("]\n");
+       return;
 
-       put_cpu();
+fatal_mondo_timeout:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+              " progress after %d retries.\n",
+              this_cpu, retries);
+       goto dump_cpu_list_and_out;
+
+fatal_mondo_error:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+              this_cpu, status);
+       printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+              "mondo_block_pa(%lx)\n",
+              this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+
+dump_cpu_list_and_out:
+       printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+       for (i = 0; i < cnt; i++)
+               printk("%u ", cpu_list[i]);
+       printk("]\n");
 }
-#endif
 
 /* Send cross call to all processors mentioned in MASK
  * except self.
@@ -704,12 +720,21 @@ struct call_data_struct {
        int wait;
 };
 
-static DEFINE_SPINLOCK(call_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
 static struct call_data_struct *call_data;
 
 extern unsigned long xcall_call_function;
 
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
@@ -717,11 +742,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
                                  int nonatomic, int wait, cpumask_t mask)
 {
        struct call_data_struct data;
-       int cpus = cpus_weight(mask) - 1;
-       long timeout;
-
-       if (!cpus)
-               return 0;
+       int cpus;
 
        /* Can deadlock when called with interrupts disabled */
        WARN_ON(irqs_disabled());
@@ -733,32 +754,24 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 
        spin_lock(&call_lock);
 
+       cpu_clear(smp_processor_id(), mask);
+       cpus = cpus_weight(mask);
+       if (!cpus)
+               goto out_unlock;
+
        call_data = &data;
+       mb();
 
        smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
 
-       /* 
-        * Wait for other cpus to complete function or at
-        * least snap the call data.
-        */
-       timeout = 1000000;
-       while (atomic_read(&data.finished) != cpus) {
-               if (--timeout <= 0)
-                       goto out_timeout;
-               barrier();
-               udelay(1);
-       }
+       /* Wait for response */
+       while (atomic_read(&data.finished) != cpus)
+               cpu_relax();
 
+out_unlock:
        spin_unlock(&call_lock);
 
        return 0;
-
-out_timeout:
-       spin_unlock(&call_lock);
-       printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
-              (long) num_online_cpus() - 1L,
-              (long) atomic_read(&data.finished));
-       return 0;
 }
 
 int smp_call_function(void (*func)(void *info), void *info,
@@ -787,9 +800,16 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
 
 static void tsb_sync(void *info)
 {
+       struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
        struct mm_struct *mm = info;
 
-       if (current->active_mm == mm)
+       /* It is not valid to test "currrent->active_mm == mm" here.
+        *
+        * The value of "current" is not changed atomically with
+        * switch_mm().  But that's OK, we just need to check the
+        * current cpu's trap block PGD physical address.
+        */
+       if (tp->pgd_paddr == __pa(mm->pgd))
                tsb_context_switch(mm);
 }
 
@@ -803,6 +823,7 @@ extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
+extern unsigned long xcall_new_mmu_context_version;
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -917,28 +938,55 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        put_cpu();
 }
 
+static void __smp_receive_signal_mask(cpumask_t mask)
+{
+       smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
+}
+
 void smp_receive_signal(int cpu)
 {
        cpumask_t mask = cpumask_of_cpu(cpu);
 
-       if (cpu_online(cpu)) {
-               u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
-
-               if (tlb_type == spitfire)
-                       spitfire_xcall_deliver(data0, 0, 0, mask);
-               else if (tlb_type == cheetah || tlb_type == cheetah_plus)
-                       cheetah_xcall_deliver(data0, 0, 0, mask);
-               else if (tlb_type == hypervisor)
-                       hypervisor_xcall_deliver(data0, 0, 0, mask);
-       }
+       if (cpu_online(cpu))
+               __smp_receive_signal_mask(mask);
 }
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
-       /* Just return, rtrap takes care of the rest. */
        clear_softint(1 << irq);
 }
 
+void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+{
+       struct mm_struct *mm;
+       unsigned long flags;
+
+       clear_softint(1 << irq);
+
+       /* See if we need to allocate a new TLB context because
+        * the version of the one we are using is now out of date.
+        */
+       mm = current->active_mm;
+       if (unlikely(!mm || (mm == &init_mm)))
+               return;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       if (unlikely(!CTX_VALID(mm->context)))
+               get_new_mmu_context(mm);
+
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+
+       load_secondary_context(mm);
+       __flush_tlb_mm(CTX_HWBITS(mm->context),
+                      SECONDARY_CONTEXT);
+}
+
+void smp_new_mmu_context_version(void)
+{
+       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
+}
+
 void smp_report_regs(void)
 {
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
@@ -1104,170 +1152,76 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
        preempt_enable();
 }
 
-#define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
-#define prof_counter(__cpu)            cpu_data(__cpu).counter
-
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
-{
-       unsigned long compare, tick, pstate;
-       int cpu = smp_processor_id();
-       int user = user_mode(regs);
-
-       /*
-        * Check for level 14 softint.
-        */
-       {
-               unsigned long tick_mask = tick_ops->softint_mask;
-
-               if (!(get_softint() & tick_mask)) {
-                       extern void handler_irq(int, struct pt_regs *);
-
-                       handler_irq(14, regs);
-                       return;
-               }
-               clear_softint(tick_mask);
-       }
-
-       do {
-               profile_tick(CPU_PROFILING, regs);
-               if (!--prof_counter(cpu)) {
-                       irq_enter();
-
-                       if (cpu == boot_cpu_id) {
-                               kstat_this_cpu.irqs[0]++;
-                               timer_tick_interrupt(regs);
-                       }
-
-                       update_process_times(user);
-
-                       irq_exit();
-
-                       prof_counter(cpu) = prof_multiplier(cpu);
-               }
-
-               /* Guarantee that the following sequences execute
-                * uninterrupted.
-                */
-               __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                                    "wrpr      %0, %1, %%pstate"
-                                    : "=r" (pstate)
-                                    : "i" (PSTATE_IE));
-
-               compare = tick_ops->add_compare(current_tick_offset);
-               tick = tick_ops->get_tick();
-
-               /* Restore PSTATE_IE. */
-               __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                                    : /* no outputs */
-                                    : "r" (pstate));
-       } while (time_after_eq(tick, compare));
-}
-
-static void __init smp_setup_percpu_timer(void)
-{
-       int cpu = smp_processor_id();
-       unsigned long pstate;
-
-       prof_counter(cpu) = prof_multiplier(cpu) = 1;
-
-       /* Guarantee that the following sequences execute
-        * uninterrupted.
-        */
-       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                            "wrpr      %0, %1, %%pstate"
-                            : "=r" (pstate)
-                            : "i" (PSTATE_IE));
-
-       tick_ops->init_tick(current_tick_offset);
-
-       /* Restore PSTATE_IE. */
-       __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                            : /* no outputs */
-                            : "r" (pstate));
-}
-
 void __init smp_tick_init(void)
 {
        boot_cpu_id = hard_smp_processor_id();
-       current_tick_offset = timer_tick_offset;
-
-       cpu_set(boot_cpu_id, cpu_online_map);
-       prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
 }
 
 /* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
-
 int setup_profiling_timer(unsigned int multiplier)
 {
-       unsigned long flags;
-       int i;
-
-       if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
-               return -EINVAL;
-
-       spin_lock_irqsave(&prof_setup_lock, flags);
-       for (i = 0; i < NR_CPUS; i++)
-               prof_multiplier(i) = multiplier;
-       current_tick_offset = (timer_tick_offset / multiplier);
-       spin_unlock_irqrestore(&prof_setup_lock, flags);
-
-       return 0;
+       return -EINVAL;
 }
 
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       if (num_possible_cpus() > max_cpus) {
-               int instance, mid;
+       int i;
 
-               instance = 0;
-               while (!cpu_find_by_instance(instance, NULL, &mid)) {
-                       if (mid != boot_cpu_id) {
-                               cpu_clear(mid, phys_cpu_present_map);
+       if (num_possible_cpus() > max_cpus) {
+               for_each_possible_cpu(i) {
+                       if (i != boot_cpu_id) {
+                               cpu_clear(i, phys_cpu_present_map);
+                               cpu_clear(i, cpu_present_map);
                                if (num_possible_cpus() <= max_cpus)
                                        break;
                        }
-                       instance++;
                }
        }
 
-       smp_store_cpu_info(boot_cpu_id);
+       cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
 }
 
-/* Set this up early so that things like the scheduler can init
- * properly.  We use the same cpu mask for both the present and
- * possible cpu map.
- */
-void __init smp_setup_cpu_possible_map(void)
+void __devinit smp_prepare_boot_cpu(void)
 {
-       int instance, mid;
-
-       instance = 0;
-       while (!cpu_find_by_instance(instance, NULL, &mid)) {
-               if (mid < NR_CPUS)
-                       cpu_set(mid, phys_cpu_present_map);
-               instance++;
-       }
 }
 
-void __devinit smp_prepare_boot_cpu(void)
+void __devinit smp_fill_in_sib_core_maps(void)
 {
-       int cpu = hard_smp_processor_id();
+       unsigned int i;
+
+       for_each_possible_cpu(i) {
+               unsigned int j;
+
+               if (cpu_data(i).core_id == 0) {
+                       cpu_set(i, cpu_core_map[i]);
+                       continue;
+               }
 
-       if (cpu >= NR_CPUS) {
-               prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
-               prom_halt();
+               for_each_possible_cpu(j) {
+                       if (cpu_data(i).core_id ==
+                           cpu_data(j).core_id)
+                               cpu_set(j, cpu_core_map[i]);
+               }
        }
 
-       current_thread_info()->cpu = cpu;
-       __local_per_cpu_offset = __per_cpu_offset(cpu);
+       for_each_possible_cpu(i) {
+               unsigned int j;
 
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), phys_cpu_present_map);
+               if (cpu_data(i).proc_id == -1) {
+                       cpu_set(i, cpu_sibling_map[i]);
+                       continue;
+               }
+
+               for_each_possible_cpu(j) {
+                       if (cpu_data(i).proc_id ==
+                           cpu_data(j).proc_id)
+                               cpu_set(j, cpu_sibling_map[i]);
+               }
+       }
 }
 
-int __devinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu)
 {
        int ret = smp_boot_one_cpu(cpu);
 
@@ -1293,10 +1247,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
        unsigned long bogosum = 0;
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       bogosum += cpu_data(i).udelay_val;
-       }
+       for_each_online_cpu(i)
+               bogosum += cpu_data(i).udelay_val;
        printk("Total of %ld processors activated "
               "(%lu.%02lu BogoMIPS).\n",
               (long) num_online_cpus(),
@@ -1322,25 +1274,25 @@ unsigned long __per_cpu_shift __read_mostly;
 EXPORT_SYMBOL(__per_cpu_base);
 EXPORT_SYMBOL(__per_cpu_shift);
 
-void __init setup_per_cpu_areas(void)
+void __init real_setup_per_cpu_areas(void)
 {
        unsigned long goal, size, i;
        char *ptr;
 
        /* Copy section for each CPU (we discard the original) */
-       goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-#ifdef CONFIG_MODULES
-       if (goal < PERCPU_ENOUGH_ROOM)
-               goal = PERCPU_ENOUGH_ROOM;
-#endif
-       __per_cpu_shift = 0;
-       for (size = 1UL; size < goal; size <<= 1UL)
+       goal = PERCPU_ENOUGH_ROOM;
+
+       __per_cpu_shift = PAGE_SHIFT;
+       for (size = PAGE_SIZE; size < goal; size <<= 1UL)
                __per_cpu_shift++;
 
-       ptr = alloc_bootmem(size * NR_CPUS);
+       ptr = alloc_bootmem_pages(size * NR_CPUS);
 
        __per_cpu_base = ptr - __per_cpu_start;
 
        for (i = 0; i < NR_CPUS; i++, ptr += size)
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+       /* Setup %g5 for the boot cpu.  */
+       __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
 }