19d0c25762824ce55d85f01b176c0d6354580ce8
[linux-2.6.git] / arch / powerpc / kernel / smp.c
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34
35 #include <asm/ptrace.h>
36 #include <asm/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/prom.h>
41 #include <asm/smp.h>
42 #include <asm/time.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
47 #include <asm/mpic.h>
48 #include <asm/vdso_datapage.h>
49 #ifdef CONFIG_PPC64
50 #include <asm/paca.h>
51 #endif
52
53 #ifdef DEBUG
54 #include <asm/udbg.h>
55 #define DBG(fmt...) udbg_printf(fmt)
56 #else
57 #define DBG(fmt...)
58 #endif
59
60 struct thread_info *secondary_ti;
61
62 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
63 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
64
65 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
66 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
67
68 /* SMP operations for this machine */
69 struct smp_ops_t *smp_ops;
70
71 /* Can't be static due to PowerMac hackery */
72 volatile unsigned int cpu_callin_map[NR_CPUS];
73
74 int smt_enabled_at_boot = 1;
75
76 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
77
78 #ifdef CONFIG_PPC64
79 void __devinit smp_generic_kick_cpu(int nr)
80 {
81         BUG_ON(nr < 0 || nr >= NR_CPUS);
82
83         /*
84          * The processor is currently spinning, waiting for the
85          * cpu_start field to become non-zero After we set cpu_start,
86          * the processor will continue on to secondary_start
87          */
88         paca[nr].cpu_start = 1;
89         smp_mb();
90 }
91 #endif
92
93 void smp_message_recv(int msg)
94 {
95         switch(msg) {
96         case PPC_MSG_CALL_FUNCTION:
97                 generic_smp_call_function_interrupt();
98                 break;
99         case PPC_MSG_RESCHEDULE:
100                 /* we notice need_resched on exit */
101                 break;
102         case PPC_MSG_CALL_FUNC_SINGLE:
103                 generic_smp_call_function_single_interrupt();
104                 break;
105         case PPC_MSG_DEBUGGER_BREAK:
106                 if (crash_ipi_function_ptr) {
107                         crash_ipi_function_ptr(get_irq_regs());
108                         break;
109                 }
110 #ifdef CONFIG_DEBUGGER
111                 debugger_ipi(get_irq_regs());
112                 break;
113 #endif /* CONFIG_DEBUGGER */
114                 /* FALLTHROUGH */
115         default:
116                 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
117                        smp_processor_id(), msg);
118                 break;
119         }
120 }
121
122 static irqreturn_t call_function_action(int irq, void *data)
123 {
124         generic_smp_call_function_interrupt();
125         return IRQ_HANDLED;
126 }
127
128 static irqreturn_t reschedule_action(int irq, void *data)
129 {
130         /* we just need the return path side effect of checking need_resched */
131         return IRQ_HANDLED;
132 }
133
134 static irqreturn_t call_function_single_action(int irq, void *data)
135 {
136         generic_smp_call_function_single_interrupt();
137         return IRQ_HANDLED;
138 }
139
140 static irqreturn_t debug_ipi_action(int irq, void *data)
141 {
142         smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
143         return IRQ_HANDLED;
144 }
145
146 static irq_handler_t smp_ipi_action[] = {
147         [PPC_MSG_CALL_FUNCTION] =  call_function_action,
148         [PPC_MSG_RESCHEDULE] = reschedule_action,
149         [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
150         [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
151 };
152
153 const char *smp_ipi_name[] = {
154         [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
155         [PPC_MSG_RESCHEDULE] = "ipi reschedule",
156         [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
157         [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
158 };
159
160 /* optional function to request ipi, for controllers with >= 4 ipis */
161 int smp_request_message_ipi(int virq, int msg)
162 {
163         int err;
164
165         if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
166                 return -EINVAL;
167         }
168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
169         if (msg == PPC_MSG_DEBUGGER_BREAK) {
170                 return 1;
171         }
172 #endif
173         err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
174                           smp_ipi_name[msg], 0);
175         WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
176                 virq, smp_ipi_name[msg], err);
177
178         return err;
179 }
180
181 void smp_send_reschedule(int cpu)
182 {
183         if (likely(smp_ops))
184                 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
185 }
186
187 void arch_send_call_function_single_ipi(int cpu)
188 {
189         smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
190 }
191
192 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
193 {
194         unsigned int cpu;
195
196         for_each_cpu(cpu, mask)
197                 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
198 }
199
200 #ifdef CONFIG_DEBUGGER
201 void smp_send_debugger_break(int cpu)
202 {
203         if (likely(smp_ops))
204                 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
205 }
206 #endif
207
208 #ifdef CONFIG_KEXEC
209 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
210 {
211         crash_ipi_function_ptr = crash_ipi_callback;
212         if (crash_ipi_callback && smp_ops) {
213                 mb();
214                 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
215         }
216 }
217 #endif
218
219 static void stop_this_cpu(void *dummy)
220 {
221         /* Remove this CPU */
222         set_cpu_online(smp_processor_id(), false);
223
224         local_irq_disable();
225         while (1)
226                 ;
227 }
228
229 void smp_send_stop(void)
230 {
231         smp_call_function(stop_this_cpu, NULL, 0);
232 }
233
234 struct thread_info *current_set[NR_CPUS];
235
236 static void __devinit smp_store_cpu_info(int id)
237 {
238         per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
239 }
240
241 static void __init smp_create_idle(unsigned int cpu)
242 {
243         struct task_struct *p;
244
245         /* create a process for the processor */
246         p = fork_idle(cpu);
247         if (IS_ERR(p))
248                 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
249 #ifdef CONFIG_PPC64
250         paca[cpu].__current = p;
251         paca[cpu].kstack = (unsigned long) task_thread_info(p)
252                 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
253 #endif
254         current_set[cpu] = task_thread_info(p);
255         task_thread_info(p)->cpu = cpu;
256 }
257
258 void __init smp_prepare_cpus(unsigned int max_cpus)
259 {
260         unsigned int cpu;
261
262         DBG("smp_prepare_cpus\n");
263
264         /* 
265          * setup_cpu may need to be called on the boot cpu. We havent
266          * spun any cpus up but lets be paranoid.
267          */
268         BUG_ON(boot_cpuid != smp_processor_id());
269
270         /* Fixup boot cpu */
271         smp_store_cpu_info(boot_cpuid);
272         cpu_callin_map[boot_cpuid] = 1;
273
274         for_each_possible_cpu(cpu) {
275                 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
276                                         GFP_KERNEL, cpu_to_node(cpu));
277                 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
278                                         GFP_KERNEL, cpu_to_node(cpu));
279         }
280
281         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
282         cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
283
284         if (smp_ops)
285                 if (smp_ops->probe)
286                         max_cpus = smp_ops->probe();
287                 else
288                         max_cpus = NR_CPUS;
289         else
290                 max_cpus = 1;
291
292         for_each_possible_cpu(cpu)
293                 if (cpu != boot_cpuid)
294                         smp_create_idle(cpu);
295 }
296
297 void __devinit smp_prepare_boot_cpu(void)
298 {
299         BUG_ON(smp_processor_id() != boot_cpuid);
300 #ifdef CONFIG_PPC64
301         paca[boot_cpuid].__current = current;
302 #endif
303         current_set[boot_cpuid] = task_thread_info(current);
304 }
305
306 #ifdef CONFIG_HOTPLUG_CPU
307 /* State of each CPU during hotplug phases */
308 DEFINE_PER_CPU(int, cpu_state) = { 0 };
309
310 int generic_cpu_disable(void)
311 {
312         unsigned int cpu = smp_processor_id();
313
314         if (cpu == boot_cpuid)
315                 return -EBUSY;
316
317         set_cpu_online(cpu, false);
318 #ifdef CONFIG_PPC64
319         vdso_data->processorCount--;
320         fixup_irqs(cpu_online_mask);
321 #endif
322         return 0;
323 }
324
325 void generic_cpu_die(unsigned int cpu)
326 {
327         int i;
328
329         for (i = 0; i < 100; i++) {
330                 smp_rmb();
331                 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
332                         return;
333                 msleep(100);
334         }
335         printk(KERN_ERR "CPU%d didn't die...\n", cpu);
336 }
337
338 void generic_mach_cpu_die(void)
339 {
340         unsigned int cpu;
341
342         local_irq_disable();
343         idle_task_exit();
344         cpu = smp_processor_id();
345         printk(KERN_DEBUG "CPU%d offline\n", cpu);
346         __get_cpu_var(cpu_state) = CPU_DEAD;
347         smp_wmb();
348         while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
349                 cpu_relax();
350 }
351 #endif
352
353 static int __devinit cpu_enable(unsigned int cpu)
354 {
355         if (smp_ops && smp_ops->cpu_enable)
356                 return smp_ops->cpu_enable(cpu);
357
358         return -ENOSYS;
359 }
360
361 int __cpuinit __cpu_up(unsigned int cpu)
362 {
363         int c;
364
365         secondary_ti = current_set[cpu];
366         if (!cpu_enable(cpu))
367                 return 0;
368
369         if (smp_ops == NULL ||
370             (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
371                 return -EINVAL;
372
373         /* Make sure callin-map entry is 0 (can be leftover a CPU
374          * hotplug
375          */
376         cpu_callin_map[cpu] = 0;
377
378         /* The information for processor bringup must
379          * be written out to main store before we release
380          * the processor.
381          */
382         smp_mb();
383
384         /* wake up cpus */
385         DBG("smp: kicking cpu %d\n", cpu);
386         smp_ops->kick_cpu(cpu);
387
388         /*
389          * wait to see if the cpu made a callin (is actually up).
390          * use this value that I found through experimentation.
391          * -- Cort
392          */
393         if (system_state < SYSTEM_RUNNING)
394                 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
395                         udelay(100);
396 #ifdef CONFIG_HOTPLUG_CPU
397         else
398                 /*
399                  * CPUs can take much longer to come up in the
400                  * hotplug case.  Wait five seconds.
401                  */
402                 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
403                         msleep(1);
404 #endif
405
406         if (!cpu_callin_map[cpu]) {
407                 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
408                 return -ENOENT;
409         }
410
411         DBG("Processor %u found.\n", cpu);
412
413         if (smp_ops->give_timebase)
414                 smp_ops->give_timebase();
415
416         /* Wait until cpu puts itself in the online map */
417         while (!cpu_online(cpu))
418                 cpu_relax();
419
420         return 0;
421 }
422
423 /* Return the value of the reg property corresponding to the given
424  * logical cpu.
425  */
426 int cpu_to_core_id(int cpu)
427 {
428         struct device_node *np;
429         const int *reg;
430         int id = -1;
431
432         np = of_get_cpu_node(cpu, NULL);
433         if (!np)
434                 goto out;
435
436         reg = of_get_property(np, "reg", NULL);
437         if (!reg)
438                 goto out;
439
440         id = *reg;
441 out:
442         of_node_put(np);
443         return id;
444 }
445
446 /* Helper routines for cpu to core mapping */
447 int cpu_core_index_of_thread(int cpu)
448 {
449         return cpu >> threads_shift;
450 }
451 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
452
453 int cpu_first_thread_of_core(int core)
454 {
455         return core << threads_shift;
456 }
457 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
458
459 /* Must be called when no change can occur to cpu_present_map,
460  * i.e. during cpu online or offline.
461  */
462 static struct device_node *cpu_to_l2cache(int cpu)
463 {
464         struct device_node *np;
465         struct device_node *cache;
466
467         if (!cpu_present(cpu))
468                 return NULL;
469
470         np = of_get_cpu_node(cpu, NULL);
471         if (np == NULL)
472                 return NULL;
473
474         cache = of_find_next_cache_node(np);
475
476         of_node_put(np);
477
478         return cache;
479 }
480
481 /* Activate a secondary processor. */
482 void __devinit start_secondary(void *unused)
483 {
484         unsigned int cpu = smp_processor_id();
485         struct device_node *l2_cache;
486         int i, base;
487
488         atomic_inc(&init_mm.mm_count);
489         current->active_mm = &init_mm;
490
491         smp_store_cpu_info(cpu);
492         set_dec(tb_ticks_per_jiffy);
493         preempt_disable();
494         cpu_callin_map[cpu] = 1;
495
496         if (smp_ops->setup_cpu)
497                 smp_ops->setup_cpu(cpu);
498         if (smp_ops->take_timebase)
499                 smp_ops->take_timebase();
500
501         secondary_cpu_time_init();
502
503         ipi_call_lock();
504         notify_cpu_starting(cpu);
505         set_cpu_online(cpu, true);
506         /* Update sibling maps */
507         base = cpu_first_thread_sibling(cpu);
508         for (i = 0; i < threads_per_core; i++) {
509                 if (cpu_is_offline(base + i))
510                         continue;
511                 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
512                 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
513
514                 /* cpu_core_map should be a superset of
515                  * cpu_sibling_map even if we don't have cache
516                  * information, so update the former here, too.
517                  */
518                 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
519                 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
520         }
521         l2_cache = cpu_to_l2cache(cpu);
522         for_each_online_cpu(i) {
523                 struct device_node *np = cpu_to_l2cache(i);
524                 if (!np)
525                         continue;
526                 if (np == l2_cache) {
527                         cpumask_set_cpu(cpu, cpu_core_mask(i));
528                         cpumask_set_cpu(i, cpu_core_mask(cpu));
529                 }
530                 of_node_put(np);
531         }
532         of_node_put(l2_cache);
533         ipi_call_unlock();
534
535         local_irq_enable();
536
537         cpu_idle();
538
539         BUG();
540 }
541
542 int setup_profiling_timer(unsigned int multiplier)
543 {
544         return 0;
545 }
546
547 void __init smp_cpus_done(unsigned int max_cpus)
548 {
549         cpumask_var_t old_mask;
550
551         /* We want the setup_cpu() here to be called from CPU 0, but our
552          * init thread may have been "borrowed" by another CPU in the meantime
553          * se we pin us down to CPU 0 for a short while
554          */
555         alloc_cpumask_var(&old_mask, GFP_NOWAIT);
556         cpumask_copy(old_mask, &current->cpus_allowed);
557         set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
558         
559         if (smp_ops && smp_ops->setup_cpu)
560                 smp_ops->setup_cpu(boot_cpuid);
561
562         set_cpus_allowed_ptr(current, old_mask);
563
564         free_cpumask_var(old_mask);
565
566         dump_numa_cpu_topology();
567 }
568
569 int arch_sd_sibling_asym_packing(void)
570 {
571         if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
572                 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
573                 return SD_ASYM_PACKING;
574         }
575         return 0;
576 }
577
578 #ifdef CONFIG_HOTPLUG_CPU
579 int __cpu_disable(void)
580 {
581         struct device_node *l2_cache;
582         int cpu = smp_processor_id();
583         int base, i;
584         int err;
585
586         if (!smp_ops->cpu_disable)
587                 return -ENOSYS;
588
589         err = smp_ops->cpu_disable();
590         if (err)
591                 return err;
592
593         /* Update sibling maps */
594         base = cpu_first_thread_sibling(cpu);
595         for (i = 0; i < threads_per_core; i++) {
596                 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
597                 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
598                 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
599                 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
600         }
601
602         l2_cache = cpu_to_l2cache(cpu);
603         for_each_present_cpu(i) {
604                 struct device_node *np = cpu_to_l2cache(i);
605                 if (!np)
606                         continue;
607                 if (np == l2_cache) {
608                         cpumask_clear_cpu(cpu, cpu_core_mask(i));
609                         cpumask_clear_cpu(i, cpu_core_mask(cpu));
610                 }
611                 of_node_put(np);
612         }
613         of_node_put(l2_cache);
614
615
616         return 0;
617 }
618
619 void __cpu_die(unsigned int cpu)
620 {
621         if (smp_ops->cpu_die)
622                 smp_ops->cpu_die(cpu);
623 }
624
625 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
626
627 void cpu_hotplug_driver_lock()
628 {
629         mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
630 }
631
632 void cpu_hotplug_driver_unlock()
633 {
634         mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
635 }
636
637 void cpu_die(void)
638 {
639         if (ppc_md.cpu_die)
640                 ppc_md.cpu_die();
641
642         /* If we return, we re-enter start_secondary */
643         start_secondary_resume();
644 }
645
646 #endif