Convert cpu_sibling_map to be a per cpu variable
[linux-3.10.git] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24
25 #include <asm/head.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpudata.h>
31 #include <asm/hvtramp.h>
32 #include <asm/io.h>
33
34 #include <asm/irq.h>
35 #include <asm/irq_regs.h>
36 #include <asm/page.h>
37 #include <asm/pgtable.h>
38 #include <asm/oplib.h>
39 #include <asm/uaccess.h>
40 #include <asm/timer.h>
41 #include <asm/starfire.h>
42 #include <asm/tlb.h>
43 #include <asm/sections.h>
44 #include <asm/prom.h>
45 #include <asm/mdesc.h>
46 #include <asm/ldc.h>
47 #include <asm/hypervisor.h>
48
49 extern void calibrate_delay(void);
50
51 int sparc64_multi_core __read_mostly;
52
53 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
57         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
58
59 EXPORT_SYMBOL(cpu_possible_map);
60 EXPORT_SYMBOL(cpu_online_map);
61 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
62 EXPORT_SYMBOL(cpu_core_map);
63
64 static cpumask_t smp_commenced_mask;
65
66 void smp_info(struct seq_file *m)
67 {
68         int i;
69         
70         seq_printf(m, "State:\n");
71         for_each_online_cpu(i)
72                 seq_printf(m, "CPU%d:\t\tonline\n", i);
73 }
74
75 void smp_bogo(struct seq_file *m)
76 {
77         int i;
78         
79         for_each_online_cpu(i)
80                 seq_printf(m,
81                            "Cpu%dClkTck\t: %016lx\n",
82                            i, cpu_data(i).clock_tick);
83 }
84
85 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
86
87 extern void setup_sparc64_timer(void);
88
89 static volatile unsigned long callin_flag = 0;
90
91 void __devinit smp_callin(void)
92 {
93         int cpuid = hard_smp_processor_id();
94
95         __local_per_cpu_offset = __per_cpu_offset(cpuid);
96
97         if (tlb_type == hypervisor)
98                 sun4v_ktsb_register();
99
100         __flush_tlb_all();
101
102         setup_sparc64_timer();
103
104         if (cheetah_pcache_forced_on)
105                 cheetah_enable_pcache();
106
107         local_irq_enable();
108
109         callin_flag = 1;
110         __asm__ __volatile__("membar #Sync\n\t"
111                              "flush  %%g6" : : : "memory");
112
113         /* Clear this or we will die instantly when we
114          * schedule back to this idler...
115          */
116         current_thread_info()->new_child = 0;
117
118         /* Attach to the address space of init_task. */
119         atomic_inc(&init_mm.mm_count);
120         current->active_mm = &init_mm;
121
122         while (!cpu_isset(cpuid, smp_commenced_mask))
123                 rmb();
124
125         spin_lock(&call_lock);
126         cpu_set(cpuid, cpu_online_map);
127         spin_unlock(&call_lock);
128
129         /* idle thread is expected to have preempt disabled */
130         preempt_disable();
131 }
132
133 void cpu_panic(void)
134 {
135         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
136         panic("SMP bolixed\n");
137 }
138
139 /* This tick register synchronization scheme is taken entirely from
140  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
141  *
142  * The only change I've made is to rework it so that the master
143  * initiates the synchonization instead of the slave. -DaveM
144  */
145
146 #define MASTER  0
147 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
148
149 #define NUM_ROUNDS      64      /* magic value */
150 #define NUM_ITERS       5       /* likewise */
151
152 static DEFINE_SPINLOCK(itc_sync_lock);
153 static unsigned long go[SLAVE + 1];
154
155 #define DEBUG_TICK_SYNC 0
156
157 static inline long get_delta (long *rt, long *master)
158 {
159         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
160         unsigned long tcenter, t0, t1, tm;
161         unsigned long i;
162
163         for (i = 0; i < NUM_ITERS; i++) {
164                 t0 = tick_ops->get_tick();
165                 go[MASTER] = 1;
166                 membar_storeload();
167                 while (!(tm = go[SLAVE]))
168                         rmb();
169                 go[SLAVE] = 0;
170                 wmb();
171                 t1 = tick_ops->get_tick();
172
173                 if (t1 - t0 < best_t1 - best_t0)
174                         best_t0 = t0, best_t1 = t1, best_tm = tm;
175         }
176
177         *rt = best_t1 - best_t0;
178         *master = best_tm - best_t0;
179
180         /* average best_t0 and best_t1 without overflow: */
181         tcenter = (best_t0/2 + best_t1/2);
182         if (best_t0 % 2 + best_t1 % 2 == 2)
183                 tcenter++;
184         return tcenter - best_tm;
185 }
186
187 void smp_synchronize_tick_client(void)
188 {
189         long i, delta, adj, adjust_latency = 0, done = 0;
190         unsigned long flags, rt, master_time_stamp, bound;
191 #if DEBUG_TICK_SYNC
192         struct {
193                 long rt;        /* roundtrip time */
194                 long master;    /* master's timestamp */
195                 long diff;      /* difference between midpoint and master's timestamp */
196                 long lat;       /* estimate of itc adjustment latency */
197         } t[NUM_ROUNDS];
198 #endif
199
200         go[MASTER] = 1;
201
202         while (go[MASTER])
203                 rmb();
204
205         local_irq_save(flags);
206         {
207                 for (i = 0; i < NUM_ROUNDS; i++) {
208                         delta = get_delta(&rt, &master_time_stamp);
209                         if (delta == 0) {
210                                 done = 1;       /* let's lock on to this... */
211                                 bound = rt;
212                         }
213
214                         if (!done) {
215                                 if (i > 0) {
216                                         adjust_latency += -delta;
217                                         adj = -delta + adjust_latency/4;
218                                 } else
219                                         adj = -delta;
220
221                                 tick_ops->add_tick(adj);
222                         }
223 #if DEBUG_TICK_SYNC
224                         t[i].rt = rt;
225                         t[i].master = master_time_stamp;
226                         t[i].diff = delta;
227                         t[i].lat = adjust_latency/4;
228 #endif
229                 }
230         }
231         local_irq_restore(flags);
232
233 #if DEBUG_TICK_SYNC
234         for (i = 0; i < NUM_ROUNDS; i++)
235                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
237 #endif
238
239         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
240                "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
241 }
242
243 static void smp_start_sync_tick_client(int cpu);
244
245 static void smp_synchronize_one_tick(int cpu)
246 {
247         unsigned long flags, i;
248
249         go[MASTER] = 0;
250
251         smp_start_sync_tick_client(cpu);
252
253         /* wait for client to be ready */
254         while (!go[MASTER])
255                 rmb();
256
257         /* now let the client proceed into his loop */
258         go[MASTER] = 0;
259         membar_storeload();
260
261         spin_lock_irqsave(&itc_sync_lock, flags);
262         {
263                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
264                         while (!go[MASTER])
265                                 rmb();
266                         go[MASTER] = 0;
267                         wmb();
268                         go[SLAVE] = tick_ops->get_tick();
269                         membar_storeload();
270                 }
271         }
272         spin_unlock_irqrestore(&itc_sync_lock, flags);
273 }
274
275 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
276 /* XXX Put this in some common place. XXX */
277 static unsigned long kimage_addr_to_ra(void *p)
278 {
279         unsigned long val = (unsigned long) p;
280
281         return kern_base + (val - KERNBASE);
282 }
283
284 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
285 {
286         extern unsigned long sparc64_ttable_tl0;
287         extern unsigned long kern_locked_tte_data;
288         extern int bigkernel;
289         struct hvtramp_descr *hdesc;
290         unsigned long trampoline_ra;
291         struct trap_per_cpu *tb;
292         u64 tte_vaddr, tte_data;
293         unsigned long hv_err;
294
295         hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
296         if (!hdesc) {
297                 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
298                        "hvtramp_descr.\n");
299                 return;
300         }
301
302         hdesc->cpu = cpu;
303         hdesc->num_mappings = (bigkernel ? 2 : 1);
304
305         tb = &trap_block[cpu];
306         tb->hdesc = hdesc;
307
308         hdesc->fault_info_va = (unsigned long) &tb->fault_info;
309         hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
310
311         hdesc->thread_reg = thread_reg;
312
313         tte_vaddr = (unsigned long) KERNBASE;
314         tte_data = kern_locked_tte_data;
315
316         hdesc->maps[0].vaddr = tte_vaddr;
317         hdesc->maps[0].tte   = tte_data;
318         if (bigkernel) {
319                 tte_vaddr += 0x400000;
320                 tte_data  += 0x400000;
321                 hdesc->maps[1].vaddr = tte_vaddr;
322                 hdesc->maps[1].tte   = tte_data;
323         }
324
325         trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
326
327         hv_err = sun4v_cpu_start(cpu, trampoline_ra,
328                                  kimage_addr_to_ra(&sparc64_ttable_tl0),
329                                  __pa(hdesc));
330         if (hv_err)
331                 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
332                        "gives error %lu\n", hv_err);
333 }
334 #endif
335
336 extern unsigned long sparc64_cpu_startup;
337
338 /* The OBP cpu startup callback truncates the 3rd arg cookie to
339  * 32-bits (I think) so to be safe we have it read the pointer
340  * contained here so we work on >4GB machines. -DaveM
341  */
342 static struct thread_info *cpu_new_thread = NULL;
343
344 static int __devinit smp_boot_one_cpu(unsigned int cpu)
345 {
346         struct trap_per_cpu *tb = &trap_block[cpu];
347         unsigned long entry =
348                 (unsigned long)(&sparc64_cpu_startup);
349         unsigned long cookie =
350                 (unsigned long)(&cpu_new_thread);
351         struct task_struct *p;
352         int timeout, ret;
353
354         p = fork_idle(cpu);
355         if (IS_ERR(p))
356                 return PTR_ERR(p);
357         callin_flag = 0;
358         cpu_new_thread = task_thread_info(p);
359
360         if (tlb_type == hypervisor) {
361 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
362                 if (ldom_domaining_enabled)
363                         ldom_startcpu_cpuid(cpu,
364                                             (unsigned long) cpu_new_thread);
365                 else
366 #endif
367                         prom_startcpu_cpuid(cpu, entry, cookie);
368         } else {
369                 struct device_node *dp = of_find_node_by_cpuid(cpu);
370
371                 prom_startcpu(dp->node, entry, cookie);
372         }
373
374         for (timeout = 0; timeout < 50000; timeout++) {
375                 if (callin_flag)
376                         break;
377                 udelay(100);
378         }
379
380         if (callin_flag) {
381                 ret = 0;
382         } else {
383                 printk("Processor %d is stuck.\n", cpu);
384                 ret = -ENODEV;
385         }
386         cpu_new_thread = NULL;
387
388         if (tb->hdesc) {
389                 kfree(tb->hdesc);
390                 tb->hdesc = NULL;
391         }
392
393         return ret;
394 }
395
396 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
397 {
398         u64 result, target;
399         int stuck, tmp;
400
401         if (this_is_starfire) {
402                 /* map to real upaid */
403                 cpu = (((cpu & 0x3c) << 1) |
404                         ((cpu & 0x40) >> 4) |
405                         (cpu & 0x3));
406         }
407
408         target = (cpu << 14) | 0x70;
409 again:
410         /* Ok, this is the real Spitfire Errata #54.
411          * One must read back from a UDB internal register
412          * after writes to the UDB interrupt dispatch, but
413          * before the membar Sync for that write.
414          * So we use the high UDB control register (ASI 0x7f,
415          * ADDR 0x20) for the dummy read. -DaveM
416          */
417         tmp = 0x40;
418         __asm__ __volatile__(
419         "wrpr   %1, %2, %%pstate\n\t"
420         "stxa   %4, [%0] %3\n\t"
421         "stxa   %5, [%0+%8] %3\n\t"
422         "add    %0, %8, %0\n\t"
423         "stxa   %6, [%0+%8] %3\n\t"
424         "membar #Sync\n\t"
425         "stxa   %%g0, [%7] %3\n\t"
426         "membar #Sync\n\t"
427         "mov    0x20, %%g1\n\t"
428         "ldxa   [%%g1] 0x7f, %%g0\n\t"
429         "membar #Sync"
430         : "=r" (tmp)
431         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
432           "r" (data0), "r" (data1), "r" (data2), "r" (target),
433           "r" (0x10), "0" (tmp)
434         : "g1");
435
436         /* NOTE: PSTATE_IE is still clear. */
437         stuck = 100000;
438         do {
439                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
440                         : "=r" (result)
441                         : "i" (ASI_INTR_DISPATCH_STAT));
442                 if (result == 0) {
443                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
444                                              : : "r" (pstate));
445                         return;
446                 }
447                 stuck -= 1;
448                 if (stuck == 0)
449                         break;
450         } while (result & 0x1);
451         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452                              : : "r" (pstate));
453         if (stuck == 0) {
454                 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
455                        smp_processor_id(), result);
456         } else {
457                 udelay(2);
458                 goto again;
459         }
460 }
461
462 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
463 {
464         u64 pstate;
465         int i;
466
467         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468         for_each_cpu_mask(i, mask)
469                 spitfire_xcall_helper(data0, data1, data2, pstate, i);
470 }
471
472 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
473  * packet, but we have no use for that.  However we do take advantage of
474  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
475  */
476 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
477 {
478         u64 pstate, ver;
479         int nack_busy_id, is_jbus, need_more;
480
481         if (cpus_empty(mask))
482                 return;
483
484         /* Unfortunately, someone at Sun had the brilliant idea to make the
485          * busy/nack fields hard-coded by ITID number for this Ultra-III
486          * derivative processor.
487          */
488         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
489         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
490                    (ver >> 32) == __SERRANO_ID);
491
492         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
493
494 retry:
495         need_more = 0;
496         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
497                              : : "r" (pstate), "i" (PSTATE_IE));
498
499         /* Setup the dispatch data registers. */
500         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
501                              "stxa      %1, [%4] %6\n\t"
502                              "stxa      %2, [%5] %6\n\t"
503                              "membar    #Sync\n\t"
504                              : /* no outputs */
505                              : "r" (data0), "r" (data1), "r" (data2),
506                                "r" (0x40), "r" (0x50), "r" (0x60),
507                                "i" (ASI_INTR_W));
508
509         nack_busy_id = 0;
510         {
511                 int i;
512
513                 for_each_cpu_mask(i, mask) {
514                         u64 target = (i << 14) | 0x70;
515
516                         if (!is_jbus)
517                                 target |= (nack_busy_id << 24);
518                         __asm__ __volatile__(
519                                 "stxa   %%g0, [%0] %1\n\t"
520                                 "membar #Sync\n\t"
521                                 : /* no outputs */
522                                 : "r" (target), "i" (ASI_INTR_W));
523                         nack_busy_id++;
524                         if (nack_busy_id == 32) {
525                                 need_more = 1;
526                                 break;
527                         }
528                 }
529         }
530
531         /* Now, poll for completion. */
532         {
533                 u64 dispatch_stat;
534                 long stuck;
535
536                 stuck = 100000 * nack_busy_id;
537                 do {
538                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
539                                              : "=r" (dispatch_stat)
540                                              : "i" (ASI_INTR_DISPATCH_STAT));
541                         if (dispatch_stat == 0UL) {
542                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
543                                                      : : "r" (pstate));
544                                 if (unlikely(need_more)) {
545                                         int i, cnt = 0;
546                                         for_each_cpu_mask(i, mask) {
547                                                 cpu_clear(i, mask);
548                                                 cnt++;
549                                                 if (cnt == 32)
550                                                         break;
551                                         }
552                                         goto retry;
553                                 }
554                                 return;
555                         }
556                         if (!--stuck)
557                                 break;
558                 } while (dispatch_stat & 0x5555555555555555UL);
559
560                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
561                                      : : "r" (pstate));
562
563                 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
564                         /* Busy bits will not clear, continue instead
565                          * of freezing up on this cpu.
566                          */
567                         printk("CPU[%d]: mondo stuckage result[%016lx]\n",
568                                smp_processor_id(), dispatch_stat);
569                 } else {
570                         int i, this_busy_nack = 0;
571
572                         /* Delay some random time with interrupts enabled
573                          * to prevent deadlock.
574                          */
575                         udelay(2 * nack_busy_id);
576
577                         /* Clear out the mask bits for cpus which did not
578                          * NACK us.
579                          */
580                         for_each_cpu_mask(i, mask) {
581                                 u64 check_mask;
582
583                                 if (is_jbus)
584                                         check_mask = (0x2UL << (2*i));
585                                 else
586                                         check_mask = (0x2UL <<
587                                                       this_busy_nack);
588                                 if ((dispatch_stat & check_mask) == 0)
589                                         cpu_clear(i, mask);
590                                 this_busy_nack += 2;
591                                 if (this_busy_nack == 64)
592                                         break;
593                         }
594
595                         goto retry;
596                 }
597         }
598 }
599
600 /* Multi-cpu list version.  */
601 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
602 {
603         struct trap_per_cpu *tb;
604         u16 *cpu_list;
605         u64 *mondo;
606         cpumask_t error_mask;
607         unsigned long flags, status;
608         int cnt, retries, this_cpu, prev_sent, i;
609
610         if (cpus_empty(mask))
611                 return;
612
613         /* We have to do this whole thing with interrupts fully disabled.
614          * Otherwise if we send an xcall from interrupt context it will
615          * corrupt both our mondo block and cpu list state.
616          *
617          * One consequence of this is that we cannot use timeout mechanisms
618          * that depend upon interrupts being delivered locally.  So, for
619          * example, we cannot sample jiffies and expect it to advance.
620          *
621          * Fortunately, udelay() uses %stick/%tick so we can use that.
622          */
623         local_irq_save(flags);
624
625         this_cpu = smp_processor_id();
626         tb = &trap_block[this_cpu];
627
628         mondo = __va(tb->cpu_mondo_block_pa);
629         mondo[0] = data0;
630         mondo[1] = data1;
631         mondo[2] = data2;
632         wmb();
633
634         cpu_list = __va(tb->cpu_list_pa);
635
636         /* Setup the initial cpu list.  */
637         cnt = 0;
638         for_each_cpu_mask(i, mask)
639                 cpu_list[cnt++] = i;
640
641         cpus_clear(error_mask);
642         retries = 0;
643         prev_sent = 0;
644         do {
645                 int forward_progress, n_sent;
646
647                 status = sun4v_cpu_mondo_send(cnt,
648                                               tb->cpu_list_pa,
649                                               tb->cpu_mondo_block_pa);
650
651                 /* HV_EOK means all cpus received the xcall, we're done.  */
652                 if (likely(status == HV_EOK))
653                         break;
654
655                 /* First, see if we made any forward progress.
656                  *
657                  * The hypervisor indicates successful sends by setting
658                  * cpu list entries to the value 0xffff.
659                  */
660                 n_sent = 0;
661                 for (i = 0; i < cnt; i++) {
662                         if (likely(cpu_list[i] == 0xffff))
663                                 n_sent++;
664                 }
665
666                 forward_progress = 0;
667                 if (n_sent > prev_sent)
668                         forward_progress = 1;
669
670                 prev_sent = n_sent;
671
672                 /* If we get a HV_ECPUERROR, then one or more of the cpus
673                  * in the list are in error state.  Use the cpu_state()
674                  * hypervisor call to find out which cpus are in error state.
675                  */
676                 if (unlikely(status == HV_ECPUERROR)) {
677                         for (i = 0; i < cnt; i++) {
678                                 long err;
679                                 u16 cpu;
680
681                                 cpu = cpu_list[i];
682                                 if (cpu == 0xffff)
683                                         continue;
684
685                                 err = sun4v_cpu_state(cpu);
686                                 if (err >= 0 &&
687                                     err == HV_CPU_STATE_ERROR) {
688                                         cpu_list[i] = 0xffff;
689                                         cpu_set(cpu, error_mask);
690                                 }
691                         }
692                 } else if (unlikely(status != HV_EWOULDBLOCK))
693                         goto fatal_mondo_error;
694
695                 /* Don't bother rewriting the CPU list, just leave the
696                  * 0xffff and non-0xffff entries in there and the
697                  * hypervisor will do the right thing.
698                  *
699                  * Only advance timeout state if we didn't make any
700                  * forward progress.
701                  */
702                 if (unlikely(!forward_progress)) {
703                         if (unlikely(++retries > 10000))
704                                 goto fatal_mondo_timeout;
705
706                         /* Delay a little bit to let other cpus catch up
707                          * on their cpu mondo queue work.
708                          */
709                         udelay(2 * cnt);
710                 }
711         } while (1);
712
713         local_irq_restore(flags);
714
715         if (unlikely(!cpus_empty(error_mask)))
716                 goto fatal_mondo_cpu_error;
717
718         return;
719
720 fatal_mondo_cpu_error:
721         printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
722                "were in error state\n",
723                this_cpu);
724         printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
725         for_each_cpu_mask(i, error_mask)
726                 printk("%d ", i);
727         printk("]\n");
728         return;
729
730 fatal_mondo_timeout:
731         local_irq_restore(flags);
732         printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
733                " progress after %d retries.\n",
734                this_cpu, retries);
735         goto dump_cpu_list_and_out;
736
737 fatal_mondo_error:
738         local_irq_restore(flags);
739         printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
740                this_cpu, status);
741         printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
742                "mondo_block_pa(%lx)\n",
743                this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
744
745 dump_cpu_list_and_out:
746         printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
747         for (i = 0; i < cnt; i++)
748                 printk("%u ", cpu_list[i]);
749         printk("]\n");
750 }
751
752 /* Send cross call to all processors mentioned in MASK
753  * except self.
754  */
755 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
756 {
757         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
758         int this_cpu = get_cpu();
759
760         cpus_and(mask, mask, cpu_online_map);
761         cpu_clear(this_cpu, mask);
762
763         if (tlb_type == spitfire)
764                 spitfire_xcall_deliver(data0, data1, data2, mask);
765         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
766                 cheetah_xcall_deliver(data0, data1, data2, mask);
767         else
768                 hypervisor_xcall_deliver(data0, data1, data2, mask);
769         /* NOTE: Caller runs local copy on master. */
770
771         put_cpu();
772 }
773
774 extern unsigned long xcall_sync_tick;
775
776 static void smp_start_sync_tick_client(int cpu)
777 {
778         cpumask_t mask = cpumask_of_cpu(cpu);
779
780         smp_cross_call_masked(&xcall_sync_tick,
781                               0, 0, 0, mask);
782 }
783
784 /* Send cross call to all processors except self. */
785 #define smp_cross_call(func, ctx, data1, data2) \
786         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
787
788 struct call_data_struct {
789         void (*func) (void *info);
790         void *info;
791         atomic_t finished;
792         int wait;
793 };
794
795 static struct call_data_struct *call_data;
796
797 extern unsigned long xcall_call_function;
798
799 /**
800  * smp_call_function(): Run a function on all other CPUs.
801  * @func: The function to run. This must be fast and non-blocking.
802  * @info: An arbitrary pointer to pass to the function.
803  * @nonatomic: currently unused.
804  * @wait: If true, wait (atomically) until function has completed on other CPUs.
805  *
806  * Returns 0 on success, else a negative status code. Does not return until
807  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
808  *
809  * You must not call this function with disabled interrupts or from a
810  * hardware interrupt handler or from a bottom half handler.
811  */
812 static int smp_call_function_mask(void (*func)(void *info), void *info,
813                                   int nonatomic, int wait, cpumask_t mask)
814 {
815         struct call_data_struct data;
816         int cpus;
817
818         /* Can deadlock when called with interrupts disabled */
819         WARN_ON(irqs_disabled());
820
821         data.func = func;
822         data.info = info;
823         atomic_set(&data.finished, 0);
824         data.wait = wait;
825
826         spin_lock(&call_lock);
827
828         cpu_clear(smp_processor_id(), mask);
829         cpus = cpus_weight(mask);
830         if (!cpus)
831                 goto out_unlock;
832
833         call_data = &data;
834         mb();
835
836         smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
837
838         /* Wait for response */
839         while (atomic_read(&data.finished) != cpus)
840                 cpu_relax();
841
842 out_unlock:
843         spin_unlock(&call_lock);
844
845         return 0;
846 }
847
848 int smp_call_function(void (*func)(void *info), void *info,
849                       int nonatomic, int wait)
850 {
851         return smp_call_function_mask(func, info, nonatomic, wait,
852                                       cpu_online_map);
853 }
854
855 void smp_call_function_client(int irq, struct pt_regs *regs)
856 {
857         void (*func) (void *info) = call_data->func;
858         void *info = call_data->info;
859
860         clear_softint(1 << irq);
861         if (call_data->wait) {
862                 /* let initiator proceed only after completion */
863                 func(info);
864                 atomic_inc(&call_data->finished);
865         } else {
866                 /* let initiator proceed after getting data */
867                 atomic_inc(&call_data->finished);
868                 func(info);
869         }
870 }
871
872 static void tsb_sync(void *info)
873 {
874         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
875         struct mm_struct *mm = info;
876
877         /* It is not valid to test "currrent->active_mm == mm" here.
878          *
879          * The value of "current" is not changed atomically with
880          * switch_mm().  But that's OK, we just need to check the
881          * current cpu's trap block PGD physical address.
882          */
883         if (tp->pgd_paddr == __pa(mm->pgd))
884                 tsb_context_switch(mm);
885 }
886
887 void smp_tsb_sync(struct mm_struct *mm)
888 {
889         smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
890 }
891
892 extern unsigned long xcall_flush_tlb_mm;
893 extern unsigned long xcall_flush_tlb_pending;
894 extern unsigned long xcall_flush_tlb_kernel_range;
895 extern unsigned long xcall_report_regs;
896 extern unsigned long xcall_receive_signal;
897 extern unsigned long xcall_new_mmu_context_version;
898
899 #ifdef DCACHE_ALIASING_POSSIBLE
900 extern unsigned long xcall_flush_dcache_page_cheetah;
901 #endif
902 extern unsigned long xcall_flush_dcache_page_spitfire;
903
904 #ifdef CONFIG_DEBUG_DCFLUSH
905 extern atomic_t dcpage_flushes;
906 extern atomic_t dcpage_flushes_xcall;
907 #endif
908
909 static __inline__ void __local_flush_dcache_page(struct page *page)
910 {
911 #ifdef DCACHE_ALIASING_POSSIBLE
912         __flush_dcache_page(page_address(page),
913                             ((tlb_type == spitfire) &&
914                              page_mapping(page) != NULL));
915 #else
916         if (page_mapping(page) != NULL &&
917             tlb_type == spitfire)
918                 __flush_icache_page(__pa(page_address(page)));
919 #endif
920 }
921
922 void smp_flush_dcache_page_impl(struct page *page, int cpu)
923 {
924         cpumask_t mask = cpumask_of_cpu(cpu);
925         int this_cpu;
926
927         if (tlb_type == hypervisor)
928                 return;
929
930 #ifdef CONFIG_DEBUG_DCFLUSH
931         atomic_inc(&dcpage_flushes);
932 #endif
933
934         this_cpu = get_cpu();
935
936         if (cpu == this_cpu) {
937                 __local_flush_dcache_page(page);
938         } else if (cpu_online(cpu)) {
939                 void *pg_addr = page_address(page);
940                 u64 data0;
941
942                 if (tlb_type == spitfire) {
943                         data0 =
944                                 ((u64)&xcall_flush_dcache_page_spitfire);
945                         if (page_mapping(page) != NULL)
946                                 data0 |= ((u64)1 << 32);
947                         spitfire_xcall_deliver(data0,
948                                                __pa(pg_addr),
949                                                (u64) pg_addr,
950                                                mask);
951                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
952 #ifdef DCACHE_ALIASING_POSSIBLE
953                         data0 =
954                                 ((u64)&xcall_flush_dcache_page_cheetah);
955                         cheetah_xcall_deliver(data0,
956                                               __pa(pg_addr),
957                                               0, mask);
958 #endif
959                 }
960 #ifdef CONFIG_DEBUG_DCFLUSH
961                 atomic_inc(&dcpage_flushes_xcall);
962 #endif
963         }
964
965         put_cpu();
966 }
967
968 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
969 {
970         void *pg_addr = page_address(page);
971         cpumask_t mask = cpu_online_map;
972         u64 data0;
973         int this_cpu;
974
975         if (tlb_type == hypervisor)
976                 return;
977
978         this_cpu = get_cpu();
979
980         cpu_clear(this_cpu, mask);
981
982 #ifdef CONFIG_DEBUG_DCFLUSH
983         atomic_inc(&dcpage_flushes);
984 #endif
985         if (cpus_empty(mask))
986                 goto flush_self;
987         if (tlb_type == spitfire) {
988                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
989                 if (page_mapping(page) != NULL)
990                         data0 |= ((u64)1 << 32);
991                 spitfire_xcall_deliver(data0,
992                                        __pa(pg_addr),
993                                        (u64) pg_addr,
994                                        mask);
995         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
996 #ifdef DCACHE_ALIASING_POSSIBLE
997                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
998                 cheetah_xcall_deliver(data0,
999                                       __pa(pg_addr),
1000                                       0, mask);
1001 #endif
1002         }
1003 #ifdef CONFIG_DEBUG_DCFLUSH
1004         atomic_inc(&dcpage_flushes_xcall);
1005 #endif
1006  flush_self:
1007         __local_flush_dcache_page(page);
1008
1009         put_cpu();
1010 }
1011
1012 static void __smp_receive_signal_mask(cpumask_t mask)
1013 {
1014         smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
1015 }
1016
1017 void smp_receive_signal(int cpu)
1018 {
1019         cpumask_t mask = cpumask_of_cpu(cpu);
1020
1021         if (cpu_online(cpu))
1022                 __smp_receive_signal_mask(mask);
1023 }
1024
1025 void smp_receive_signal_client(int irq, struct pt_regs *regs)
1026 {
1027         clear_softint(1 << irq);
1028 }
1029
1030 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
1031 {
1032         struct mm_struct *mm;
1033         unsigned long flags;
1034
1035         clear_softint(1 << irq);
1036
1037         /* See if we need to allocate a new TLB context because
1038          * the version of the one we are using is now out of date.
1039          */
1040         mm = current->active_mm;
1041         if (unlikely(!mm || (mm == &init_mm)))
1042                 return;
1043
1044         spin_lock_irqsave(&mm->context.lock, flags);
1045
1046         if (unlikely(!CTX_VALID(mm->context)))
1047                 get_new_mmu_context(mm);
1048
1049         spin_unlock_irqrestore(&mm->context.lock, flags);
1050
1051         load_secondary_context(mm);
1052         __flush_tlb_mm(CTX_HWBITS(mm->context),
1053                        SECONDARY_CONTEXT);
1054 }
1055
1056 void smp_new_mmu_context_version(void)
1057 {
1058         smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1059 }
1060
1061 void smp_report_regs(void)
1062 {
1063         smp_cross_call(&xcall_report_regs, 0, 0, 0);
1064 }
1065
1066 /* We know that the window frames of the user have been flushed
1067  * to the stack before we get here because all callers of us
1068  * are flush_tlb_*() routines, and these run after flush_cache_*()
1069  * which performs the flushw.
1070  *
1071  * The SMP TLB coherency scheme we use works as follows:
1072  *
1073  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1074  *    space has (potentially) executed on, this is the heuristic
1075  *    we use to avoid doing cross calls.
1076  *
1077  *    Also, for flushing from kswapd and also for clones, we
1078  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1079  *
1080  * 2) TLB context numbers are shared globally across all processors
1081  *    in the system, this allows us to play several games to avoid
1082  *    cross calls.
1083  *
1084  *    One invariant is that when a cpu switches to a process, and
1085  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1086  *    current cpu's bit set, that tlb context is flushed locally.
1087  *
1088  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1089  *    cross calls when we want to flush the currently running process's
1090  *    tlb state.  This is done by clearing all cpu bits except the current
1091  *    processor's in current->active_mm->cpu_vm_mask and performing the
1092  *    flush locally only.  This will force any subsequent cpus which run
1093  *    this task to flush the context from the local tlb if the process
1094  *    migrates to another cpu (again).
1095  *
1096  * 3) For shared address spaces (threads) and swapping we bite the
1097  *    bullet for most cases and perform the cross call (but only to
1098  *    the cpus listed in cpu_vm_mask).
1099  *
1100  *    The performance gain from "optimizing" away the cross call for threads is
1101  *    questionable (in theory the big win for threads is the massive sharing of
1102  *    address space state across processors).
1103  */
1104
1105 /* This currently is only used by the hugetlb arch pre-fault
1106  * hook on UltraSPARC-III+ and later when changing the pagesize
1107  * bits of the context register for an address space.
1108  */
1109 void smp_flush_tlb_mm(struct mm_struct *mm)
1110 {
1111         u32 ctx = CTX_HWBITS(mm->context);
1112         int cpu = get_cpu();
1113
1114         if (atomic_read(&mm->mm_users) == 1) {
1115                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1116                 goto local_flush_and_out;
1117         }
1118
1119         smp_cross_call_masked(&xcall_flush_tlb_mm,
1120                               ctx, 0, 0,
1121                               mm->cpu_vm_mask);
1122
1123 local_flush_and_out:
1124         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1125
1126         put_cpu();
1127 }
1128
1129 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1130 {
1131         u32 ctx = CTX_HWBITS(mm->context);
1132         int cpu = get_cpu();
1133
1134         if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1135                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1136         else
1137                 smp_cross_call_masked(&xcall_flush_tlb_pending,
1138                                       ctx, nr, (unsigned long) vaddrs,
1139                                       mm->cpu_vm_mask);
1140
1141         __flush_tlb_pending(ctx, nr, vaddrs);
1142
1143         put_cpu();
1144 }
1145
1146 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1147 {
1148         start &= PAGE_MASK;
1149         end    = PAGE_ALIGN(end);
1150         if (start != end) {
1151                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1152                                0, start, end);
1153
1154                 __flush_tlb_kernel_range(start, end);
1155         }
1156 }
1157
1158 /* CPU capture. */
1159 /* #define CAPTURE_DEBUG */
1160 extern unsigned long xcall_capture;
1161
1162 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1163 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1164 static unsigned long penguins_are_doing_time;
1165
1166 void smp_capture(void)
1167 {
1168         int result = atomic_add_ret(1, &smp_capture_depth);
1169
1170         if (result == 1) {
1171                 int ncpus = num_online_cpus();
1172
1173 #ifdef CAPTURE_DEBUG
1174                 printk("CPU[%d]: Sending penguins to jail...",
1175                        smp_processor_id());
1176 #endif
1177                 penguins_are_doing_time = 1;
1178                 membar_storestore_loadstore();
1179                 atomic_inc(&smp_capture_registry);
1180                 smp_cross_call(&xcall_capture, 0, 0, 0);
1181                 while (atomic_read(&smp_capture_registry) != ncpus)
1182                         rmb();
1183 #ifdef CAPTURE_DEBUG
1184                 printk("done\n");
1185 #endif
1186         }
1187 }
1188
1189 void smp_release(void)
1190 {
1191         if (atomic_dec_and_test(&smp_capture_depth)) {
1192 #ifdef CAPTURE_DEBUG
1193                 printk("CPU[%d]: Giving pardon to "
1194                        "imprisoned penguins\n",
1195                        smp_processor_id());
1196 #endif
1197                 penguins_are_doing_time = 0;
1198                 membar_storeload_storestore();
1199                 atomic_dec(&smp_capture_registry);
1200         }
1201 }
1202
1203 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1204  * can service tlb flush xcalls...
1205  */
1206 extern void prom_world(int);
1207
1208 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1209 {
1210         clear_softint(1 << irq);
1211
1212         preempt_disable();
1213
1214         __asm__ __volatile__("flushw");
1215         prom_world(1);
1216         atomic_inc(&smp_capture_registry);
1217         membar_storeload_storestore();
1218         while (penguins_are_doing_time)
1219                 rmb();
1220         atomic_dec(&smp_capture_registry);
1221         prom_world(0);
1222
1223         preempt_enable();
1224 }
1225
1226 /* /proc/profile writes can call this, don't __init it please. */
1227 int setup_profiling_timer(unsigned int multiplier)
1228 {
1229         return -EINVAL;
1230 }
1231
1232 void __init smp_prepare_cpus(unsigned int max_cpus)
1233 {
1234 }
1235
1236 void __devinit smp_prepare_boot_cpu(void)
1237 {
1238 }
1239
1240 void __devinit smp_fill_in_sib_core_maps(void)
1241 {
1242         unsigned int i;
1243
1244         for_each_present_cpu(i) {
1245                 unsigned int j;
1246
1247                 cpus_clear(cpu_core_map[i]);
1248                 if (cpu_data(i).core_id == 0) {
1249                         cpu_set(i, cpu_core_map[i]);
1250                         continue;
1251                 }
1252
1253                 for_each_present_cpu(j) {
1254                         if (cpu_data(i).core_id ==
1255                             cpu_data(j).core_id)
1256                                 cpu_set(j, cpu_core_map[i]);
1257                 }
1258         }
1259
1260         for_each_present_cpu(i) {
1261                 unsigned int j;
1262
1263                 cpus_clear(per_cpu(cpu_sibling_map, i));
1264                 if (cpu_data(i).proc_id == -1) {
1265                         cpu_set(i, per_cpu(cpu_sibling_map, i));
1266                         continue;
1267                 }
1268
1269                 for_each_present_cpu(j) {
1270                         if (cpu_data(i).proc_id ==
1271                             cpu_data(j).proc_id)
1272                                 cpu_set(j, per_cpu(cpu_sibling_map, i));
1273                 }
1274         }
1275 }
1276
1277 int __cpuinit __cpu_up(unsigned int cpu)
1278 {
1279         int ret = smp_boot_one_cpu(cpu);
1280
1281         if (!ret) {
1282                 cpu_set(cpu, smp_commenced_mask);
1283                 while (!cpu_isset(cpu, cpu_online_map))
1284                         mb();
1285                 if (!cpu_isset(cpu, cpu_online_map)) {
1286                         ret = -ENODEV;
1287                 } else {
1288                         /* On SUN4V, writes to %tick and %stick are
1289                          * not allowed.
1290                          */
1291                         if (tlb_type != hypervisor)
1292                                 smp_synchronize_one_tick(cpu);
1293                 }
1294         }
1295         return ret;
1296 }
1297
1298 #ifdef CONFIG_HOTPLUG_CPU
1299 void cpu_play_dead(void)
1300 {
1301         int cpu = smp_processor_id();
1302         unsigned long pstate;
1303
1304         idle_task_exit();
1305
1306         if (tlb_type == hypervisor) {
1307                 struct trap_per_cpu *tb = &trap_block[cpu];
1308
1309                 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1310                                 tb->cpu_mondo_pa, 0);
1311                 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1312                                 tb->dev_mondo_pa, 0);
1313                 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1314                                 tb->resum_mondo_pa, 0);
1315                 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1316                                 tb->nonresum_mondo_pa, 0);
1317         }
1318
1319         cpu_clear(cpu, smp_commenced_mask);
1320         membar_safe("#Sync");
1321
1322         local_irq_disable();
1323
1324         __asm__ __volatile__(
1325                 "rdpr   %%pstate, %0\n\t"
1326                 "wrpr   %0, %1, %%pstate"
1327                 : "=r" (pstate)
1328                 : "i" (PSTATE_IE));
1329
1330         while (1)
1331                 barrier();
1332 }
1333
1334 int __cpu_disable(void)
1335 {
1336         int cpu = smp_processor_id();
1337         cpuinfo_sparc *c;
1338         int i;
1339
1340         for_each_cpu_mask(i, cpu_core_map[cpu])
1341                 cpu_clear(cpu, cpu_core_map[i]);
1342         cpus_clear(cpu_core_map[cpu]);
1343
1344         for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1345                 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1346         cpus_clear(per_cpu(cpu_sibling_map, cpu));
1347
1348         c = &cpu_data(cpu);
1349
1350         c->core_id = 0;
1351         c->proc_id = -1;
1352
1353         spin_lock(&call_lock);
1354         cpu_clear(cpu, cpu_online_map);
1355         spin_unlock(&call_lock);
1356
1357         smp_wmb();
1358
1359         /* Make sure no interrupts point to this cpu.  */
1360         fixup_irqs();
1361
1362         local_irq_enable();
1363         mdelay(1);
1364         local_irq_disable();
1365
1366         return 0;
1367 }
1368
1369 void __cpu_die(unsigned int cpu)
1370 {
1371         int i;
1372
1373         for (i = 0; i < 100; i++) {
1374                 smp_rmb();
1375                 if (!cpu_isset(cpu, smp_commenced_mask))
1376                         break;
1377                 msleep(100);
1378         }
1379         if (cpu_isset(cpu, smp_commenced_mask)) {
1380                 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1381         } else {
1382 #if defined(CONFIG_SUN_LDOMS)
1383                 unsigned long hv_err;
1384                 int limit = 100;
1385
1386                 do {
1387                         hv_err = sun4v_cpu_stop(cpu);
1388                         if (hv_err == HV_EOK) {
1389                                 cpu_clear(cpu, cpu_present_map);
1390                                 break;
1391                         }
1392                 } while (--limit > 0);
1393                 if (limit <= 0) {
1394                         printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1395                                hv_err);
1396                 }
1397 #endif
1398         }
1399 }
1400 #endif
1401
1402 void __init smp_cpus_done(unsigned int max_cpus)
1403 {
1404 }
1405
1406 void smp_send_reschedule(int cpu)
1407 {
1408         smp_receive_signal(cpu);
1409 }
1410
1411 /* This is a nop because we capture all other cpus
1412  * anyways when making the PROM active.
1413  */
1414 void smp_send_stop(void)
1415 {
1416 }
1417
1418 unsigned long __per_cpu_base __read_mostly;
1419 unsigned long __per_cpu_shift __read_mostly;
1420
1421 EXPORT_SYMBOL(__per_cpu_base);
1422 EXPORT_SYMBOL(__per_cpu_shift);
1423
1424 void __init real_setup_per_cpu_areas(void)
1425 {
1426         unsigned long goal, size, i;
1427         char *ptr;
1428
1429         /* Copy section for each CPU (we discard the original) */
1430         goal = PERCPU_ENOUGH_ROOM;
1431
1432         __per_cpu_shift = PAGE_SHIFT;
1433         for (size = PAGE_SIZE; size < goal; size <<= 1UL)
1434                 __per_cpu_shift++;
1435
1436         ptr = alloc_bootmem_pages(size * NR_CPUS);
1437
1438         __per_cpu_base = ptr - __per_cpu_start;
1439
1440         for (i = 0; i < NR_CPUS; i++, ptr += size)
1441                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1442
1443         /* Setup %g5 for the boot cpu.  */
1444         __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1445 }