2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
29 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/localtimer.h>
45 #include <asm/smp_plat.h>
47 #include <asm/mach/arch.h>
50 * as from 2.5, kernels no longer have an init_tasks structure
51 * so we need some other way of telling a new secondary core
52 * where to place its SVC stack
54 struct secondary_data secondary_data;
57 * control for which core is the next to come out of the secondary
60 volatile int __cpuinitdata pen_release = -1;
72 static DECLARE_COMPLETION(cpu_running);
74 static struct smp_operations smp_ops;
76 void __init smp_set_ops(struct smp_operations *ops)
82 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
87 * We need to tell the secondary core where to find
88 * its stack and the page tables.
90 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
91 secondary_data.pgdir = virt_to_phys(idmap_pgd);
92 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
93 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
94 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
97 * Now bring the CPU into our world.
99 ret = boot_secondary(cpu, idle);
102 * CPU was successfully started, wait for it
103 * to come online or time out.
105 wait_for_completion_timeout(&cpu_running,
106 msecs_to_jiffies(1000));
108 if (!cpu_online(cpu)) {
109 pr_crit("CPU%u: failed to come online\n", cpu);
113 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
116 secondary_data.stack = NULL;
117 secondary_data.pgdir = 0;
122 /* platform specific SMP operations */
123 void __init smp_init_cpus(void)
125 if (smp_ops.smp_init_cpus)
126 smp_ops.smp_init_cpus();
129 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
131 if (smp_ops.smp_boot_secondary)
132 return smp_ops.smp_boot_secondary(cpu, idle);
136 #ifdef CONFIG_HOTPLUG_CPU
137 static void percpu_timer_stop(void);
139 static int platform_cpu_kill(unsigned int cpu)
141 if (smp_ops.cpu_kill)
142 return smp_ops.cpu_kill(cpu);
146 static int platform_cpu_disable(unsigned int cpu)
148 if (smp_ops.cpu_disable)
149 return smp_ops.cpu_disable(cpu);
152 * By default, allow disabling all CPUs except the first one,
153 * since this is special on a lot of platforms, e.g. because
154 * of clock tick interrupts.
156 return cpu == 0 ? -EPERM : 0;
159 * __cpu_disable runs on the processor to be shutdown.
161 int __cpuinit __cpu_disable(void)
163 unsigned int cpu = smp_processor_id();
166 ret = platform_cpu_disable(cpu);
171 * Take this CPU offline. Once we clear this, we can't return,
172 * and we must not schedule until we're ready to give up the cpu.
174 set_cpu_online(cpu, false);
177 * OK - migrate IRQs away from this CPU
182 * Stop the local timer for this CPU.
187 * Flush user cache and TLB mappings, and then remove this CPU
188 * from the vm mask set of all processes.
190 * Caches are flushed to the Level of Unification Inner Shareable
191 * to write-back dirty lines to unified caches shared by all CPUs.
194 local_flush_tlb_all();
196 clear_tasks_mm_cpumask(cpu);
201 static DECLARE_COMPLETION(cpu_died);
204 * called on the thread which is asking for a CPU to be shutdown -
205 * waits until shutdown has completed, or it is timed out.
207 void __cpuinit __cpu_die(unsigned int cpu)
209 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
210 pr_err("CPU%u: cpu didn't die\n", cpu);
213 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
216 * platform_cpu_kill() is generally expected to do the powering off
217 * and/or cutting of clocks to the dying CPU. Optionally, this may
218 * be done by the CPU which is dying in preference to supporting
219 * this call, but that means there is _no_ synchronisation between
220 * the requesting CPU and the dying CPU actually losing power.
222 if (!platform_cpu_kill(cpu))
223 printk("CPU%u: unable to kill\n", cpu);
227 * Called from the idle thread for the CPU which has been shutdown.
229 * Note that we disable IRQs here, but do not re-enable them
230 * before returning to the caller. This is also the behaviour
231 * of the other hotplug-cpu capable cores, so presumably coming
232 * out of idle fixes this.
234 void __ref cpu_die(void)
236 unsigned int cpu = smp_processor_id();
243 * Flush the data out of the L1 cache for this CPU. This must be
244 * before the completion to ensure that data is safely written out
245 * before platform_cpu_kill() gets called - which may disable
246 * *this* CPU and power down its cache.
251 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
252 * this returns, power and/or clocks can be removed at any point
253 * from this CPU and its cache by platform_cpu_kill().
258 * Ensure that the cache lines associated with that completion are
259 * written out. This covers the case where _this_ CPU is doing the
260 * powering down, to ensure that the completion is visible to the
261 * CPU waiting for this one.
266 * The actual CPU shutdown procedure is at least platform (if not
267 * CPU) specific. This may remove power, or it may simply spin.
269 * Platforms are generally expected *NOT* to return from this call,
270 * although there are some which do because they have no way to
271 * power down the CPU. These platforms are the _only_ reason we
272 * have a return path which uses the fragment of assembly below.
274 * The return path should not be used for platforms which can
278 smp_ops.cpu_die(cpu);
281 * Do not return to the idle loop - jump back to the secondary
282 * cpu initialisation. There's some initialisation which needs
283 * to be repeated to undo the effects of taking the CPU offline.
285 __asm__("mov sp, %0\n"
287 " b secondary_start_kernel"
289 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
291 #endif /* CONFIG_HOTPLUG_CPU */
294 * Called by both boot and secondaries to move global data into
295 * per-processor storage.
297 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
299 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
301 cpu_info->loops_per_jiffy = loops_per_jiffy;
302 cpu_info->cpuid = read_cpuid_id();
304 store_cpu_topology(cpuid);
307 static void percpu_timer_setup(void);
310 * This is the secondary CPU boot entry. We're using this CPUs
311 * idle thread stack, but a set of temporary page tables.
313 asmlinkage void __cpuinit secondary_start_kernel(void)
315 struct mm_struct *mm = &init_mm;
319 * The identity mapping is uncached (strongly ordered), so
320 * switch away from it before attempting any exclusive accesses.
322 cpu_switch_mm(mm->pgd, mm);
323 local_flush_bp_all();
324 enter_lazy_tlb(mm, current);
325 local_flush_tlb_all();
328 * All kernel threads share the same mm context; grab a
329 * reference and switch to it.
331 cpu = smp_processor_id();
332 atomic_inc(&mm->mm_count);
333 current->active_mm = mm;
334 cpumask_set_cpu(cpu, mm_cpumask(mm));
338 printk("CPU%u: Booted secondary processor\n", cpu);
341 trace_hardirqs_off();
344 * Give the platform a chance to do its own initialisation.
346 if (smp_ops.smp_secondary_init)
347 smp_ops.smp_secondary_init(cpu);
349 notify_cpu_starting(cpu);
353 smp_store_cpu_info(cpu);
356 * OK, now it's safe to let the boot CPU continue. Wait for
357 * the CPU migration code to notice that the CPU is online
358 * before we continue - which happens after __cpu_up returns.
360 set_cpu_online(cpu, true);
361 complete(&cpu_running);
364 * Setup the percpu timer for this CPU.
366 percpu_timer_setup();
372 * OK, it's off to the idle thread for us
374 cpu_startup_entry(CPUHP_ONLINE);
377 void __init smp_cpus_done(unsigned int max_cpus)
380 unsigned long bogosum = 0;
382 for_each_online_cpu(cpu)
383 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
385 printk(KERN_INFO "SMP: Total of %d processors activated "
386 "(%lu.%02lu BogoMIPS).\n",
388 bogosum / (500000/HZ),
389 (bogosum / (5000/HZ)) % 100);
394 void __init smp_prepare_boot_cpu(void)
396 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
399 void __init smp_prepare_cpus(unsigned int max_cpus)
401 unsigned int ncores = num_possible_cpus();
405 smp_store_cpu_info(smp_processor_id());
408 * are we trying to boot more cores than exist?
410 if (max_cpus > ncores)
412 if (ncores > 1 && max_cpus) {
414 * Enable the local timer or broadcast device for the
415 * boot CPU, but only if we have more than one CPU.
417 percpu_timer_setup();
420 * Initialise the present map, which describes the set of CPUs
421 * actually populated at the present time. A platform should
422 * re-initialize the map in the platforms smp_prepare_cpus()
423 * if present != possible (e.g. physical hotplug).
425 init_cpu_present(cpu_possible_mask);
428 * Initialise the SCU if there are more than one CPU
429 * and let them know where to start.
431 if (smp_ops.smp_prepare_cpus)
432 smp_ops.smp_prepare_cpus(max_cpus);
436 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
438 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
444 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
446 smp_cross_call(mask, IPI_CALL_FUNC);
449 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
451 smp_cross_call(mask, IPI_WAKEUP);
454 void arch_send_call_function_single_ipi(int cpu)
456 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
459 static const char *ipi_types[NR_IPI] = {
460 #define S(x,s) [x] = s
461 S(IPI_WAKEUP, "CPU wakeup interrupts"),
462 S(IPI_TIMER, "Timer broadcast interrupts"),
463 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
464 S(IPI_CALL_FUNC, "Function call interrupts"),
465 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
466 S(IPI_CPU_STOP, "CPU stop interrupts"),
467 S(IPI_CPU_BACKTRACE, "CPU backtrace"),
470 void show_ipi_list(struct seq_file *p, int prec)
474 for (i = 0; i < NR_IPI; i++) {
475 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
477 for_each_online_cpu(cpu)
478 seq_printf(p, "%10u ",
479 __get_irq_stat(cpu, ipi_irqs[i]));
481 seq_printf(p, " %s\n", ipi_types[i]);
485 u64 smp_irq_stat_cpu(unsigned int cpu)
490 for (i = 0; i < NR_IPI; i++)
491 sum += __get_irq_stat(cpu, ipi_irqs[i]);
497 * Timer (local or broadcast) support
499 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
501 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
502 void tick_broadcast(const struct cpumask *mask)
504 smp_cross_call(mask, IPI_TIMER);
508 static void broadcast_timer_set_mode(enum clock_event_mode mode,
509 struct clock_event_device *evt)
513 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
515 evt->name = "dummy_timer";
516 evt->features = CLOCK_EVT_FEAT_ONESHOT |
517 CLOCK_EVT_FEAT_PERIODIC |
518 CLOCK_EVT_FEAT_DUMMY;
521 evt->set_mode = broadcast_timer_set_mode;
523 clockevents_register_device(evt);
526 static struct local_timer_ops *lt_ops;
528 #ifdef CONFIG_LOCAL_TIMERS
529 int local_timer_register(struct local_timer_ops *ops)
531 if (!is_smp() || !setup_max_cpus)
542 static void __cpuinit percpu_timer_setup(void)
544 unsigned int cpu = smp_processor_id();
545 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
547 evt->cpumask = cpumask_of(cpu);
549 if (!lt_ops || lt_ops->setup(evt))
550 broadcast_timer_setup(evt);
553 #ifdef CONFIG_HOTPLUG_CPU
555 * The generic clock events code purposely does not stop the local timer
556 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
559 static void percpu_timer_stop(void)
561 unsigned int cpu = smp_processor_id();
562 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
569 static DEFINE_RAW_SPINLOCK(stop_lock);
572 * ipi_cpu_stop - handle IPI from smp_send_stop()
574 static void ipi_cpu_stop(unsigned int cpu)
576 if (system_state == SYSTEM_BOOTING ||
577 system_state == SYSTEM_RUNNING) {
578 raw_spin_lock(&stop_lock);
579 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
581 raw_spin_unlock(&stop_lock);
584 set_cpu_online(cpu, false);
593 static cpumask_t backtrace_mask;
594 static DEFINE_RAW_SPINLOCK(backtrace_lock);
596 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
597 static unsigned long backtrace_flag;
599 void smp_send_all_cpu_backtrace(void)
601 unsigned int this_cpu = smp_processor_id();
604 if (test_and_set_bit(0, &backtrace_flag))
606 * If there is already a trigger_all_cpu_backtrace() in progress
607 * (backtrace_flag == 1), don't output double cpu dump infos.
611 cpumask_copy(&backtrace_mask, cpu_online_mask);
612 cpu_clear(this_cpu, backtrace_mask);
614 pr_info("Backtrace for cpu %d (current):\n", this_cpu);
617 pr_info("\nsending IPI to all other CPUs:\n");
618 smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
620 /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
621 for (i = 0; i < 10 * 1000; i++) {
622 if (cpumask_empty(&backtrace_mask))
627 clear_bit(0, &backtrace_flag);
628 smp_mb__after_clear_bit();
632 * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
634 static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
636 if (cpu_isset(cpu, backtrace_mask)) {
637 raw_spin_lock(&backtrace_lock);
638 pr_warning("IPI backtrace for cpu %d\n", cpu);
640 raw_spin_unlock(&backtrace_lock);
641 cpu_clear(cpu, backtrace_mask);
646 * Main handler for inter-processor interrupts
648 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
650 handle_IPI(ipinr, regs);
653 void handle_IPI(int ipinr, struct pt_regs *regs)
655 unsigned int cpu = smp_processor_id();
656 struct pt_regs *old_regs = set_irq_regs(regs);
659 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
665 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
668 tick_receive_broadcast();
679 generic_smp_call_function_interrupt();
683 case IPI_CALL_FUNC_SINGLE:
685 generic_smp_call_function_single_interrupt();
695 case IPI_CPU_BACKTRACE:
696 ipi_cpu_backtrace(cpu, regs);
700 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
704 set_irq_regs(old_regs);
707 void smp_send_reschedule(int cpu)
709 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
712 void smp_send_stop(void)
714 unsigned long timeout;
717 cpumask_copy(&mask, cpu_online_mask);
718 cpumask_clear_cpu(smp_processor_id(), &mask);
719 if (!cpumask_empty(&mask))
720 smp_cross_call(&mask, IPI_CPU_STOP);
722 /* Wait up to one second for other CPUs to stop */
723 timeout = USEC_PER_SEC;
724 while (num_online_cpus() > 1 && timeout--)
727 if (num_online_cpus() > 1)
728 pr_warning("SMP: failed to stop secondary CPUs\n");
734 int setup_profiling_timer(unsigned int multiplier)
739 #ifdef CONFIG_CPU_FREQ
741 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
742 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
743 static unsigned long global_l_p_j_ref;
744 static unsigned long global_l_p_j_ref_freq;
746 static int cpufreq_callback(struct notifier_block *nb,
747 unsigned long val, void *data)
749 struct cpufreq_freqs *freq = data;
752 if (freq->flags & CPUFREQ_CONST_LOOPS)
755 if (!per_cpu(l_p_j_ref, cpu)) {
756 per_cpu(l_p_j_ref, cpu) =
757 per_cpu(cpu_data, cpu).loops_per_jiffy;
758 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
759 if (!global_l_p_j_ref) {
760 global_l_p_j_ref = loops_per_jiffy;
761 global_l_p_j_ref_freq = freq->old;
765 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
766 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
767 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
768 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
769 global_l_p_j_ref_freq,
771 per_cpu(cpu_data, cpu).loops_per_jiffy =
772 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
773 per_cpu(l_p_j_ref_freq, cpu),
779 static struct notifier_block cpufreq_notifier = {
780 .notifier_call = cpufreq_callback,
783 static int __init register_cpufreq_notifier(void)
785 return cpufreq_register_notifier(&cpufreq_notifier,
786 CPUFREQ_TRANSITION_NOTIFIER);
788 core_initcall(register_cpufreq_notifier);