* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
+#include <linux/ftrace.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/completion.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
+#include <asm/cputype.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-
-/*
- * bitmask of present and online CPUs.
- * The present bitmask indicates that the CPU is physically present.
- * The online bitmask indicates that the CPU is up and running.
- */
-cpumask_t cpu_possible_map;
-cpumask_t cpu_online_map;
+#include <asm/localtimer.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
*/
struct secondary_data secondary_data;
-/*
- * structures for inter-processor calls
- * - A collection of single bit ipi messages.
- */
-struct ipi_data {
- spinlock_t lock;
- unsigned long ipi_count;
- unsigned long bits;
-};
-
-static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
- .lock = SPIN_LOCK_UNLOCKED,
-};
-
enum ipi_msg_type {
- IPI_TIMER,
+ IPI_TIMER = 2,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t pending;
- cpumask_t unfinished;
-};
-
-static struct smp_call_struct * volatile smp_call_function_data;
-static DEFINE_SPINLOCK(smp_call_function_lock);
-
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
pgd_t *pgd;
- pmd_t *pmd;
int ret;
/*
return PTR_ERR(idle);
}
ci->idle = idle;
+ } else {
+ /*
+ * Since this idle thread is being re-used, call
+ * init_idle() to reinitialize the thread structure.
+ */
+ init_idle(idle, cpu);
}
/*
* a 1:1 mapping for the physical address of the kernel.
*/
pgd = pgd_alloc(&init_mm);
- pmd = pmd_offset(pgd, PHYS_OFFSET);
- *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
- PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
+ if (!pgd)
+ return -ENOMEM;
+
+ if (PHYS_OFFSET != PAGE_OFFSET) {
+#ifndef CONFIG_HOTPLUG_CPU
+ identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
+#endif
+ identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
+ identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
+ }
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
- secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;
+ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
- wmb();
+ secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+ __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
+ outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
/*
* Now bring the CPU into our world.
barrier();
}
- if (!cpu_online(cpu))
+ if (!cpu_online(cpu)) {
+ pr_crit("CPU%u: failed to come online\n", cpu);
ret = -EIO;
+ }
+ } else {
+ pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
- secondary_data.stack = 0;
+ secondary_data.stack = NULL;
secondary_data.pgdir = 0;
- *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
- pgd_free(pgd);
-
- if (ret) {
- printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
-
- /*
- * FIXME: We need to clean up the new idle thread. --rmk
- */
+ if (PHYS_OFFSET != PAGE_OFFSET) {
+#ifndef CONFIG_HOTPLUG_CPU
+ identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
+#endif
+ identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
+ identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
}
+ pgd_free(&init_mm, pgd);
+
return ret;
}
#ifdef CONFIG_HOTPLUG_CPU
+static void percpu_timer_stop(void);
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
struct task_struct *p;
int ret;
- ret = mach_cpu_disable(cpu);
+ ret = platform_cpu_disable(cpu);
if (ret)
return ret;
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
+ /*
+ * Stop the local timer for this CPU.
+ */
+ percpu_timer_stop();
+
/*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_clear(cpu, p->mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
return 0;
}
+static DECLARE_COMPLETION(cpu_died);
+
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_err("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+
if (!platform_cpu_kill(cpu))
printk("CPU%u: unable to kill\n", cpu);
}
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __cpuexit cpu_die(void)
+void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
- local_irq_disable();
idle_task_exit();
+ local_irq_disable();
+ mb();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
/*
* actual CPU shutdown procedure is at least platform (if not
- * CPU) specific
+ * CPU) specific.
*/
platform_cpu_die(cpu);
* to be repeated to undo the effects of taking the CPU offline.
*/
__asm__("mov sp, %0\n"
+ " mov fp, #0\n"
" b secondary_start_kernel"
:
- : "r" ((void *)current->thread_info + THREAD_SIZE - 8));
+ : "r" (task_stack_page(current) + THREAD_SIZE - 8));
}
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
+
+ cpu_info->loops_per_jiffy = loops_per_jiffy;
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- cpu_set(cpu, mm->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
cpu_init();
+ preempt_disable();
+ trace_hardirqs_off();
/*
* Give the platform a chance to do its own initialisation.
/*
* Enable local interrupts.
*/
+ notify_cpu_starting(cpu);
local_irq_enable();
local_fiq_enable();
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
calibrate_delay();
smp_store_cpu_info(cpu);
/*
* OK, now it's safe to let the boot CPU continue
*/
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
/*
* OK, it's off to the idle thread for us
cpu_idle();
}
-/*
- * Called by both boot and secondaries to move global data into
- * per-processor storage.
- */
-void __cpuinit smp_store_cpu_info(unsigned int cpuid)
-{
- struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
-
- cpu_info->loops_per_jiffy = loops_per_jiffy;
-}
-
void __init smp_cpus_done(unsigned int max_cpus)
{
int cpu;
unsigned int cpu = smp_processor_id();
per_cpu(cpu_data, cpu).idle = current;
-
- cpu_set(cpu, cpu_possible_map);
- cpu_set(cpu, cpu_present_map);
- cpu_set(cpu, cpu_online_map);
}
-static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
+void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long flags;
- unsigned int cpu;
-
- local_irq_save(flags);
-
- for_each_cpu_mask(cpu, callmap) {
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned int ncores = num_possible_cpus();
- spin_lock(&ipi->lock);
- ipi->bits |= 1 << msg;
- spin_unlock(&ipi->lock);
- }
+ smp_store_cpu_info(smp_processor_id());
/*
- * Call the platform specific cross-CPU call function.
+ * are we trying to boot more cores than exist?
*/
- smp_cross_call(callmap);
-
- local_irq_restore(flags);
-}
-
-/*
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler, nor from a bottom half handler.
- */
-int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry,
- int wait, cpumask_t callmap)
-{
- struct smp_call_struct data;
- unsigned long timeout;
- int ret = 0;
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- goto out;
-
- data.pending = callmap;
- if (wait)
- data.unfinished = callmap;
-
- /*
- * try to get the mutex on smp_call_function_data
- */
- spin_lock(&smp_call_function_lock);
- smp_call_function_data = &data;
-
- send_ipi_message(callmap, IPI_CALL_FUNC);
+ if (max_cpus > ncores)
+ max_cpus = ncores;
- timeout = jiffies + HZ;
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- /*
- * did we time out?
- */
- if (!cpus_empty(data.pending)) {
+ if (max_cpus > 1) {
/*
- * this may be causing our panic - report it
+ * Enable the local timer or broadcast device for the
+ * boot CPU, but only if we have more than one CPU.
*/
- printk(KERN_CRIT
- "CPU%u: smp_call_function timeout for %p(%p)\n"
- " callmap %lx pending %lx, %swait\n",
- smp_processor_id(), func, info, *cpus_addr(callmap),
- *cpus_addr(data.pending), wait ? "" : "no ");
+ percpu_timer_setup();
/*
- * TRACE
+ * Initialise the SCU if there are more than one CPU
+ * and let them know where to start.
*/
- timeout = jiffies + (5 * HZ);
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- if (cpus_empty(data.pending))
- printk(KERN_CRIT " RESOLVED\n");
- else
- printk(KERN_CRIT " STILL STUCK\n");
+ platform_smp_prepare_cpus(max_cpus);
}
-
- /*
- * whatever happened, we're done with the data, so release it
- */
- smp_call_function_data = NULL;
- spin_unlock(&smp_call_function_lock);
-
- if (!cpus_empty(data.pending)) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- if (wait)
- while (!cpus_empty(data.unfinished))
- barrier();
- out:
-
- return 0;
}
-int smp_call_function(void (*func)(void *info), void *info, int retry,
- int wait)
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- return smp_call_function_on_cpu(func, info, retry, wait,
- cpu_online_map);
+ smp_cross_call = fn;
}
-void show_ipi_list(struct seq_file *p)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- unsigned int cpu;
-
- seq_puts(p, "IPI:");
-
- for_each_present_cpu(cpu)
- seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
-
- seq_putc(p, '\n');
+ smp_cross_call(mask, IPI_CALL_FUNC);
}
-static void ipi_timer(struct pt_regs *regs)
+void arch_send_call_function_single_ipi(int cpu)
{
- int user = user_mode(regs);
-
- irq_enter();
- profile_tick(CPU_PROFILING, regs);
- update_process_times(user);
- irq_exit();
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
-/*
- * ipi_call_function - handle IPI from smp_call_function()
- *
- * Note that we copy data out of the cross-call structure and then
- * let the caller know that we're here and have done with their data
- */
-static void ipi_call_function(unsigned int cpu)
+static const char *ipi_types[NR_IPI] = {
+#define S(x,s) [x - IPI_TIMER] = s
+ S(IPI_TIMER, "Timer broadcast interrupts"),
+ S(IPI_RESCHEDULE, "Rescheduling interrupts"),
+ S(IPI_CALL_FUNC, "Function call interrupts"),
+ S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
+ S(IPI_CPU_STOP, "CPU stop interrupts"),
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
{
- struct smp_call_struct *data = smp_call_function_data;
- void (*func)(void *info) = data->func;
- void *info = data->info;
- int wait = data->wait;
+ unsigned int cpu, i;
- cpu_clear(cpu, data->pending);
+ for (i = 0; i < NR_IPI; i++) {
+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
- func(info);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
- if (wait)
- cpu_clear(cpu, data->unfinished);
+ seq_printf(p, " %s\n", ipi_types[i]);
+ }
}
-static DEFINE_SPINLOCK(stop_lock);
-
-/*
- * ipi_cpu_stop - handle IPI from smp_send_stop()
- */
-static void ipi_cpu_stop(unsigned int cpu)
+u64 smp_irq_stat_cpu(unsigned int cpu)
{
- spin_lock(&stop_lock);
- printk(KERN_CRIT "CPU%u: stopping\n", cpu);
- dump_stack();
- spin_unlock(&stop_lock);
+ u64 sum = 0;
+ int i;
- cpu_clear(cpu, cpu_online_map);
+ for (i = 0; i < NR_IPI; i++)
+ sum += __get_irq_stat(cpu, ipi_irqs[i]);
- local_fiq_disable();
- local_irq_disable();
+#ifdef CONFIG_LOCAL_TIMERS
+ sum += __get_irq_stat(cpu, local_timer_irqs);
+#endif
- while (1)
- cpu_relax();
+ return sum;
}
/*
- * Main handler for inter-processor interrupts
- *
- * For ARM, the ipimask now only identifies a single
- * category of IPI (Bit 1 IPIs have been replaced by a
- * different mechanism):
- *
- * Bit 0 - Inter-processor function call
+ * Timer (local or broadcast) support
*/
-void do_IPI(struct pt_regs *regs)
-{
- unsigned int cpu = smp_processor_id();
- struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
- ipi->ipi_count++;
-
- for (;;) {
- unsigned long msgs;
-
- spin_lock(&ipi->lock);
- msgs = ipi->bits;
- ipi->bits = 0;
- spin_unlock(&ipi->lock);
-
- if (!msgs)
- break;
+static void ipi_timer(void)
+{
+ struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
+ irq_enter();
+ evt->event_handler(evt);
+ irq_exit();
+}
- do {
- unsigned nextmsg;
+#ifdef CONFIG_LOCAL_TIMERS
+asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ int cpu = smp_processor_id();
- nextmsg = msgs & -msgs;
- msgs &= ~nextmsg;
- nextmsg = ffz(~nextmsg);
+ if (local_timer_ack()) {
+ __inc_irq_stat(cpu, local_timer_irqs);
+ ipi_timer();
+ }
- switch (nextmsg) {
- case IPI_TIMER:
- ipi_timer(regs);
- break;
+ set_irq_regs(old_regs);
+}
- case IPI_RESCHEDULE:
- /*
- * nothing more to do - eveything is
- * done on the interrupt return path
- */
- break;
+void show_local_irqs(struct seq_file *p, int prec)
+{
+ unsigned int cpu;
- case IPI_CALL_FUNC:
- ipi_call_function(cpu);
- break;
+ seq_printf(p, "%*s: ", prec, "LOC");
- case IPI_CPU_STOP:
- ipi_cpu_stop(cpu);
- break;
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
- default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
- cpu, nextmsg);
- break;
- }
- } while (msgs);
- }
+ seq_printf(p, " Local timer interrupts\n");
}
+#endif
-void smp_send_reschedule(int cpu)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+static void smp_timer_broadcast(const struct cpumask *mask)
{
- send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+ smp_cross_call(mask, IPI_TIMER);
}
+#else
+#define smp_timer_broadcast NULL
+#endif
-void smp_send_timer(void)
+static void broadcast_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
- send_ipi_message(mask, IPI_TIMER);
}
-void smp_send_stop(void)
+static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
- send_ipi_message(mask, IPI_CPU_STOP);
-}
+ evt->name = "dummy_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ evt->rating = 400;
+ evt->mult = 1;
+ evt->set_mode = broadcast_timer_set_mode;
-/*
- * not supported here
- */
-int __init setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
+ clockevents_register_device(evt);
}
-static int
-on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
- cpumask_t mask)
+void __cpuinit percpu_timer_setup(void)
{
- int ret = 0;
-
- preempt_disable();
-
- ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
- if (cpu_isset(smp_processor_id(), mask))
- func(info);
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- preempt_enable();
+ evt->cpumask = cpumask_of(cpu);
+ evt->broadcast = smp_timer_broadcast;
- return ret;
+ if (local_timer_setup(evt))
+ broadcast_timer_setup(evt);
}
-/**********************************************************************/
-
+#ifdef CONFIG_HOTPLUG_CPU
/*
- * TLB operations
+ * The generic clock events code purposely does not stop the local timer
+ * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
+ * manually here.
*/
-struct tlb_args {
- struct vm_area_struct *ta_vma;
- unsigned long ta_start;
- unsigned long ta_end;
-};
-
-static inline void ipi_flush_tlb_all(void *ignored)
+static void percpu_timer_stop(void)
{
- local_flush_tlb_all();
-}
-
-static inline void ipi_flush_tlb_mm(void *arg)
-{
- struct mm_struct *mm = (struct mm_struct *)arg;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- local_flush_tlb_mm(mm);
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
}
+#endif
-static inline void ipi_flush_tlb_page(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
-
- local_flush_tlb_page(ta->ta_vma, ta->ta_start);
-}
+static DEFINE_SPINLOCK(stop_lock);
-static inline void ipi_flush_tlb_kernel_page(void *arg)
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
{
- struct tlb_args *ta = (struct tlb_args *)arg;
+ if (system_state == SYSTEM_BOOTING ||
+ system_state == SYSTEM_RUNNING) {
+ spin_lock(&stop_lock);
+ printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+ dump_stack();
+ spin_unlock(&stop_lock);
+ }
- local_flush_tlb_kernel_page(ta->ta_start);
-}
+ set_cpu_online(cpu, false);
-static inline void ipi_flush_tlb_range(void *arg)
-{
- struct tlb_args *ta = (struct tlb_args *)arg;
+ local_fiq_disable();
+ local_irq_disable();
- local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+ while (1)
+ cpu_relax();
}
-static inline void ipi_flush_tlb_kernel_range(void *arg)
+/*
+ * Main handler for inter-processor interrupts
+ */
+asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
{
- struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int cpu = smp_processor_id();
+ struct pt_regs *old_regs = set_irq_regs(regs);
- local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
-}
+ if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
+ __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
-void flush_tlb_all(void)
-{
- on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
-}
+ switch (ipinr) {
+ case IPI_TIMER:
+ ipi_timer();
+ break;
-void flush_tlb_mm(struct mm_struct *mm)
-{
- cpumask_t mask = mm->cpu_vm_mask;
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
-}
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
-{
- cpumask_t mask = vma->vm_mm->cpu_vm_mask;
- struct tlb_args ta;
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
- ta.ta_vma = vma;
- ta.ta_start = uaddr;
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
+ default:
+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
+ cpu, ipinr);
+ break;
+ }
+ set_irq_regs(old_regs);
}
-void flush_tlb_kernel_page(unsigned long kaddr)
+void smp_send_reschedule(int cpu)
{
- struct tlb_args ta;
-
- ta.ta_start = kaddr;
-
- on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
+ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void smp_send_stop(void)
{
- cpumask_t mask = vma->vm_mm->cpu_vm_mask;
- struct tlb_args ta;
+ unsigned long timeout;
- ta.ta_vma = vma;
- ta.ta_start = start;
- ta.ta_end = end;
+ if (num_online_cpus() > 1) {
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(smp_processor_id(), mask);
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
-}
+ smp_cross_call(&mask, IPI_CPU_STOP);
+ }
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- struct tlb_args ta;
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
- ta.ta_start = start;
- ta.ta_end = end;
+ if (num_online_cpus() > 1)
+ pr_warning("SMP: failed to stop secondary CPUs\n");
+}
- on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);
+/*
+ * not supported here
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
}